{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,9]],"date-time":"2026-01-09T02:19:55Z","timestamp":1767925195868,"version":"3.49.0"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icassp40776.2020.9054734","type":"proceedings-article","created":{"date-parts":[[2020,4,9]],"date-time":"2020-04-09T20:21:13Z","timestamp":1586463673000},"page":"6284-6288","source":"Crossref","is-referenced-by-count":56,"title":["F0-Consistent Many-To-Many Non-Parallel Voice Conversion Via Conditional Autoencoder"],"prefix":"10.1109","author":[{"given":"Kaizhi","family":"Qian","sequence":"first","affiliation":[]},{"given":"Zeyu","family":"Jin","sequence":"additional","affiliation":[]},{"given":"Mark","family":"Hasegawa-Johnson","sequence":"additional","affiliation":[]},{"given":"Gautham J.","family":"Mysore","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1190"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1528"},{"key":"ref12","author":"kameoka","year":"2018","journal-title":"Acvae-vc Non-parallel many-to-many voice conversion with auxiliary classifier variational autoencoder"},{"key":"ref13","author":"kameoka","year":"2018","journal-title":"Stargan-vc Non-parallel many-to-many voice conversion with star generative adversarial networks"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461384"},{"key":"ref15","article-title":"One-to-many voice conversion based on tensor representation of speaker space","author":"saito","year":"2011","journal-title":"Twelfth Annual Conference of the International Speech Communication Association"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2010.5495139"},{"key":"ref17","first-page":"5210","article-title":"AutoVC: Zero-shot voice style transfer with only autoencoder loss","author":"qian","year":"2019","journal-title":"International Conference on Machine Learning"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/APSIPA.2016.7820786"},{"key":"ref19","author":"huang","year":"2018","journal-title":"Voice conversion based on cross-domain features using variational auto encoders"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2014.2353991"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2005.1415037"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178896"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2014.7078543"},{"key":"ref8","article-title":"Wavenet: A generative model for raw audio","volume":"abs 1609 3499","author":"oord","year":"2016","journal-title":"CoRR"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/APSIPA.2017.8282025"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1998.674423"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"1138","DOI":"10.21437\/Interspeech.2017-986","article-title":"Statistical voice conversion with wavenet-based waveform generation","author":"kobayashi","year":"2017","journal-title":"InterSpeech"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/89.661472"},{"key":"ref20","author":"fang","year":"2018","journal-title":"High-quality nonparallel voice conversion based on cycle-consistent adversarial network"},{"key":"ref22","author":"hsu","year":"2017","journal-title":"Voice conversion from unaligned corpora using variational autoencoding wasserstein generative adversarial networks"},{"key":"ref21","author":"gao","year":"2018","journal-title":"Voice impersonation using generative adversarial networks"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-116"},{"key":"ref23","author":"chou","year":"2018","journal-title":"Multi-target voice conversion without parallel data by adversarially learning disentangled audio representations"},{"key":"ref26","author":"veaux","year":"2016","journal-title":"CSTR VCTK corpus English multi-speaker corpus for cstr voice cloning toolkit"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"}],"event":{"name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Barcelona, Spain","start":{"date-parts":[[2020,5,4]]},"end":{"date-parts":[[2020,5,8]]}},"container-title":["ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9040208\/9052899\/09054734.pdf?arnumber=9054734","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T00:18:29Z","timestamp":1656375509000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9054734\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/icassp40776.2020.9054734","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}