{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T20:20:00Z","timestamp":1740169200552,"version":"3.37.3"},"reference-count":23,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100010226","name":"Department of Education of Guangdong Province through Special Innovation Program","doi-asserted-by":"publisher","award":["2015KTSCX183"],"award-info":[{"award-number":["2015KTSCX183"]}],"id":[{"id":"10.13039\/501100010226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005015","name":"South China University of Technology through \u201cDevelopment Fund\u201d","doi-asserted-by":"publisher","award":["x2js-F8150310"],"award-info":[{"award-number":["x2js-F8150310"]}],"id":[{"id":"10.13039\/501100005015","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2020]]},"DOI":"10.1109\/access.2020.3034253","type":"journal-article","created":{"date-parts":[[2020,10,27]],"date-time":"2020-10-27T19:48:59Z","timestamp":1603828139000},"page":"196578-196586","source":"Crossref","is-referenced-by-count":2,"title":["One-Shot Voice Conversion Algorithm Based on Representations Separation"],"prefix":"10.1109","volume":"8","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8297-9934","authenticated-orcid":false,"given":"Chunhui","family":"Deng","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9325-2543","authenticated-orcid":false,"given":"Ying","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6394-4496","authenticated-orcid":false,"given":"Huifang","family":"Deng","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"2519","article-title":"Hidden Markov model based voice conversion using dynamic characteristics of speaker","author":"kim","year":"1997","journal-title":"Proc Conf Int Voice Commun Assoc"},{"key":"ref11","article-title":"WaveNet: A generative model for raw audio","author":"van den oord","year":"2016","journal-title":"arXiv 1609 03499"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2892235"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1190"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-63"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639535"},{"key":"ref16","article-title":"Parallel-Data-Free voice conversion using cycle-consistent adversarial networks","author":"kaneko","year":"2017","journal-title":"arXiv 1711 11293"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1830"},{"key":"ref18","article-title":"AUTOVC: Zero-shot voice style transfer with only autoencoder loss","author":"qian","year":"2019","journal-title":"arXiv 1905 05879"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462665"},{"key":"ref4","first-page":"35","article-title":"Improvement of voice conversion algorism based on codebook mapping","volume":"43","author":"fang","year":"2015","journal-title":"Microprocessors"},{"key":"ref3","first-page":"665","article-title":"Quality improvement of voice conversion systems based on trellis structured vector quantization","author":"eslami","year":"2011","journal-title":"Proc Conference Int Voice Commun Assoc"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2001.941046"},{"key":"ref5","first-page":"279","article-title":"Straight-based voice conversion algorithm based on Gaussian mixture model","author":"toda","year":"2000","journal-title":"Proc Conf Int Voice Commun Assoc"},{"key":"ref8","first-page":"1470","article-title":"A method for voice conversion based on Viterbi algorithm","volume":"37","author":"zhihua","year":"2009","journal-title":"Acta Electronica Sinica"},{"key":"ref7","first-page":"2266","article-title":"Maximum likelihood voice conversion based on GMM with STRAIGHT mixed excitation","volume":"5","author":"ohtani","year":"2006","journal-title":"Proc 9th Int Conf Spoken Lang Process INTERSPEECH (ICSLP)"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ISCAS.1991.176405"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1121\/1.402284"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP.2010.5684869"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref22","first-page":"4480","article-title":"Transfer learning from speaker verification to multispeaker text-to-speech synthesis","author":"jia","year":"2018","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-950"},{"key":"ref23","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"maaten","year":"2008","journal-title":"J Mach Learn Res"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/8948470\/09240913.pdf?arnumber=9240913","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,12]],"date-time":"2022-01-12T01:08:51Z","timestamp":1641949731000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9240913\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/access.2020.3034253","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2020]]}}}