{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,29]],"date-time":"2025-11-29T07:56:32Z","timestamp":1764402992217},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icassp40776.2020.9054591","type":"proceedings-article","created":{"date-parts":[[2020,4,9]],"date-time":"2020-04-09T16:21:13Z","timestamp":1586449273000},"page":"3267-3271","source":"Crossref","is-referenced-by-count":18,"title":["Unsupervised Style and Content Separation by Minimizing Mutual Information for Speech Synthesis"],"prefix":"10.1109","author":[{"given":"Ting-Yao","family":"Hu","sequence":"first","affiliation":[]},{"given":"Ashish","family":"Shrivastava","sequence":"additional","affiliation":[]},{"given":"Oncel","family":"Tuzel","sequence":"additional","affiliation":[]},{"given":"Chandra","family":"Dhir","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Mutual information neural estimation","author":"belghazi","year":"2018","journal-title":"Proc ICML"},{"key":"ref11","article-title":"Deep voice 3: 2000-speaker neural text-to-speech","author":"ping","year":"2018","journal-title":"Proc ICLR"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TASSP.1984.1164317"},{"article-title":"Wavenet: A generative model for raw audio","year":"2016","author":"den oord","key":"ref13"},{"article-title":"WaveGlow: A flow-based generative network for speech synthesis","year":"2018","author":"prenger","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.265"},{"key":"ref16","article-title":"Fader networks: Manipulating images by sliding attributes","author":"lample","year":"2017","journal-title":"Proc NIPS"},{"key":"ref17","article-title":"InfoGAN: Interpretable representation learning by information maximizing generative adversarial nets","author":"chen","year":"2016","journal-title":"Proc NIPS"},{"key":"ref18","article-title":"Toward controlled generation of text","author":"hu","year":"2017","journal-title":"Proc ICML"},{"key":"ref19","article-title":"Generative adversarial nets","author":"goodfellow","year":"2014","journal-title":"Proc NIPS"},{"key":"ref4","article-title":"Style tokens: Unsupervised style modeling, control and transfer in end-to-end speech synthesis","author":"wang","year":"2018","journal-title":"Proc ICML"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016706"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/K18-2025"},{"key":"ref5","article-title":"Hierarchical generative modeling for controllable speech synthesis","author":"hsu","year":"2019","journal-title":"Proc ICLR"},{"article-title":"Melnet: A generative model for audio in the frequency domain","year":"2019","author":"vasquez","key":"ref8"},{"key":"ref7","article-title":"A generative adversarial network for style modeling in a text-to-speech system","author":"ma","year":"2019","journal-title":"Proc ICLR"},{"key":"ref2","article-title":"Deep voice: Real-time neural text-tospeech","author":"ar?k","year":"2017","journal-title":"Proc ICML"},{"key":"ref9","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Proc NIPS"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1214\/aoms\/1177729694"},{"article-title":"English multi-speaker corpus for cstr voice cloning toolkit","year":"2012","author":"yamagishi","key":"ref22"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1002\/cpa.3160360204"},{"article-title":"The lj speech dataset","year":"2017","author":"ito","key":"ref24"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"}],"event":{"name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2020,5,4]]},"location":"Barcelona, Spain","end":{"date-parts":[[2020,5,8]]}},"container-title":["ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9040208\/9052899\/09054591.pdf?arnumber=9054591","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,27]],"date-time":"2022-06-27T20:08:59Z","timestamp":1656360539000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9054591\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/icassp40776.2020.9054591","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}