{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,22]],"date-time":"2024-10-22T23:45:12Z","timestamp":1729640712994,"version":"3.28.0"},"reference-count":46,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,11,1]],"date-time":"2019-11-01T00:00:00Z","timestamp":1572566400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,11,1]],"date-time":"2019-11-01T00:00:00Z","timestamp":1572566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,11,1]],"date-time":"2019-11-01T00:00:00Z","timestamp":1572566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,11]]},"DOI":"10.1109\/apsipaasc47483.2019.9023277","type":"proceedings-article","created":{"date-parts":[[2020,3,6]],"date-time":"2020-03-06T17:03:54Z","timestamp":1583514234000},"page":"1282-1287","source":"Crossref","is-referenced-by-count":4,"title":["Many-to-many Cross-lingual Voice Conversion with a Jointly Trained Speaker Embedding Network"],"prefix":"10.1109","author":[{"given":"Yi","family":"Zhou","sequence":"first","affiliation":[]},{"given":"Xiaohai","family":"Tian","sequence":"additional","affiliation":[]},{"given":"Rohan Kumar","family":"Das","sequence":"additional","affiliation":[]},{"given":"Haizhou","family":"Li","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"The kaldi speech recognition toolkit","author":"povey","year":"2011","journal-title":"IEEE ASRU no EPFL-CONF-192584"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.21437\/Odyssey.2018-28"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1131"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2017.8269002"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-1043"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683746"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-1066"},{"key":"ref36","article-title":"The cmu arctic speech databases","author":"kominek","year":"2004","journal-title":"SSW"},{"key":"ref35","article-title":"CSTR VCTK corpus: English multi-speaker corpus for cstr voice cloning toolkit","author":"veaux","year":"2017","journal-title":"Centre for Speech Technology Research University of Edinburgh"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683874"},{"key":"ref10","doi-asserted-by":"crossref","first-page":"1635","DOI":"10.21437\/Interspeech.2009-488","article-title":"Cross-language voice conversion based on eigenvoices","author":"charlier","year":"2009","journal-title":"InterSpeech"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1587\/transinf.2015EDP7457"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2003.1318521"},{"key":"ref12","article-title":"A framework for cross-lingual voice conversion using artificial neural networks","author":"desai","year":"2009","journal-title":"7th ICON"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2014.2353991"},{"key":"ref14","first-page":"1","article-title":"Phonetic pos-teriorgrams for many-to-one voice conversion without parallel data training","author":"sun","year":"2016","journal-title":"IEEE ICME"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472732"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953231"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-63"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Odyssey.2018-34"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639507"},{"key":"ref28","first-page":"1836","article-title":"Combining source and system information for limited data speaker verification","author":"das","year":"2014","journal-title":"InterSpeech"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1250\/ast.11.71"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2010.2064307"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2009.2038669"},{"key":"ref6","first-page":"1969","article-title":"Frame alignment method for cross-lingual voice conversion","author":"erro","year":"2007","journal-title":"InterSpeech"},{"key":"ref29","first-page":"2962","article-title":"Deep voice 2: Multi-speaker neural text-to-speech","author":"gibiansky","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1990.115676"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178897"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2011.5947509"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/s00034-015-0118-1"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/89.661472"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2011.2177820"},{"key":"ref46","article-title":"A flexible and modular crosslingual voice conversion system","author":"machado","year":"2014","journal-title":"ICMC"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2910637"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1002\/scj.20354"},{"key":"ref22","first-page":"1956","article-title":"A training method of average voice model for HMM-based speech synthesis","volume":"86","author":"yamagishi","year":"2003","journal-title":"IEICE Transactions on Fundamentals of Electronics Communications and Computer Sciences"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1357"},{"key":"ref42","first-page":"1537","article-title":"Transformation of prosody in voice conversion","author":"sisman","year":"2017","journal-title":"IEEE APSIPA ASC"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Odyssey.2018-32"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.21437\/SSW.2016-33"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/APSIPA.2016.7820901"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2000.861820"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2018.2878949"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.21437\/Odyssey.2018-38"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.23919\/APSIPA.2018.8659543"}],"event":{"name":"2019 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)","start":{"date-parts":[[2019,11,18]]},"location":"Lanzhou, China","end":{"date-parts":[[2019,11,21]]}},"container-title":["2019 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8989870\/9023008\/09023277.pdf?arnumber=9023277","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,17]],"date-time":"2022-10-17T21:52:16Z","timestamp":1666043536000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9023277\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,11]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/apsipaasc47483.2019.9023277","relation":{},"subject":[],"published":{"date-parts":[[2019,11]]}}}