{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T19:35:31Z","timestamp":1730230531564,"version":"3.28.0"},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icassp40776.2020.9054010","type":"proceedings-article","created":{"date-parts":[[2020,4,9]],"date-time":"2020-04-09T20:21:13Z","timestamp":1586463673000},"page":"7754-7758","source":"Crossref","is-referenced-by-count":0,"title":["An Improved Frame-Unit-Selection Based Voice Conversion System Without Parallel Training Data"],"prefix":"10.1109","author":[{"given":"Feng-Long","family":"Xie","sequence":"first","affiliation":[]},{"given":"Xin-Hui","family":"Li","sequence":"additional","affiliation":[]},{"given":"Bo","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Yi-Bin","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Li","family":"Meng","sequence":"additional","affiliation":[]},{"given":"Li","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Frank K.","family":"Soong","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1255"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1016\/S0167-6393(98)00085-5"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1214\/aoms\/1177729694"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/FSKD.2007.347"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2018.11.007"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1705"},{"key":"ref37","doi-asserted-by":"crossref","first-page":"195","DOI":"10.21437\/Odyssey.2018-28","article-title":"The voice conversion challenge 2018: Promoting development of parallel and nonparallel methods","author":"lorenzo-trueba","year":"2018","journal-title":"Proc Speaker Odyssey"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854672"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-275"},{"key":"ref34","article-title":"The CMU ARCTIC databases for speech synthesis","author":"kominek","year":"2003","journal-title":"Tech Rep CMU-LTI-03-177 Language Technologies Institute Carnegie Mellon University"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683282"},{"key":"ref11","article-title":"A first step towards text-independent voice conversion","author":"sundermann","year":"2004","journal-title":"Proc ICSLP"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2006.1659962"},{"key":"ref13","article-title":"Voice conversion for unkown speakers","author":"ye","year":"2004","journal-title":"Proc ICSLP"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2009.2038669"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2010.2041688"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6855140"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-116"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICME.2016.7552917"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1190"},{"key":"ref28","first-page":"2410","article-title":"Efficient neural audio synthesis","author":"kalchbrenner","year":"2018","journal-title":"Proc ICML"},{"key":"ref4","first-page":"2283","article-title":"sequence error (SE) minimization training of neural network for voice conversion","author":"xie","year":"2014","journal-title":"Proc INTERSPEECH"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2010.2047683"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178896"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682804"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2014.2353991"},{"key":"ref8","doi-asserted-by":"crossref","first-page":"1506","DOI":"10.1109\/TASLP.2014.2333242","article-title":"Exemplar-based sparse representation with residual compensation for voice conversion","volume":"22","author":"wu","year":"2014","journal-title":"IEEE\/ACM Transactions on Audio Speech and Language Processing"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1587\/transfun.E96.A.1946"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2007.907344"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/89.661472"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2892235"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462258"},{"key":"ref22","first-page":"677","article-title":"Sparse representation of phonetic features for voice conversion with and without parallel data","author":"\u00e7i?man","year":"2017","journal-title":"Proc ASRU"},{"key":"ref21","doi-asserted-by":"crossref","first-page":"3364","DOI":"10.21437\/Interspeech.2017-63","article-title":"Voice conversion from unaligned corpora using variational autoencoding Wasserstein generative adversarial networks","author":"hsu","year":"2017","journal-title":"Proc INTERSPEECH"},{"article-title":"WaveNet: A generative model for raw audio","year":"2016","author":"van den oord","key":"ref24"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-247"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-986"},{"key":"ref25","doi-asserted-by":"crossref","first-page":"1118","DOI":"10.21437\/Interspeech.2017-314","article-title":"Speaker-dependent WaveNet vocoder","author":"tamamori","year":"2017","journal-title":"Proc INTERSPEECH"}],"event":{"name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2020,5,4]]},"location":"Barcelona, Spain","end":{"date-parts":[[2020,5,8]]}},"container-title":["ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9040208\/9052899\/09054010.pdf?arnumber=9054010","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T00:21:05Z","timestamp":1656375665000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9054010\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/icassp40776.2020.9054010","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}