{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T13:51:04Z","timestamp":1762869064034},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6,4]]},"DOI":"10.1109\/icassp49357.2023.10094850","type":"proceedings-article","created":{"date-parts":[[2023,5,5]],"date-time":"2023-05-05T17:28:30Z","timestamp":1683307710000},"source":"Crossref","is-referenced-by-count":11,"title":["ACE-VC: Adaptive and Controllable Voice Conversion Using Explicitly Disentangled Self-Supervised Speech Representations"],"prefix":"10.1109","author":[{"given":"Shehzeen","family":"Hussain","sequence":"first","affiliation":[{"name":"University of California,San Diego,CA,USA"}]},{"given":"Paarth","family":"Neekhara","sequence":"additional","affiliation":[{"name":"University of California,San Diego,CA,USA"}]},{"given":"Jocelyn","family":"Huang","sequence":"additional","affiliation":[{"name":"NVIDIA Corporation,Santa Clara,CA,USA"}]},{"given":"Jason","family":"Li","sequence":"additional","affiliation":[{"name":"NVIDIA Corporation,Santa Clara,CA,USA"}]},{"given":"Boris","family":"Ginsburg","sequence":"additional","affiliation":[{"name":"NVIDIA Corporation,Santa Clara,CA,USA"}]}],"member":"263","reference":[{"key":"ref13","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","author":"baevski","year":"2020","journal-title":"NeurIPS"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746430"},{"key":"ref15","article-title":"Multitask voice activated framework using self-supervised learning","author":"hussain","year":"2022","journal-title":"ICASSP"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413699"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-475"},{"key":"ref2","doi-asserted-by":"crossref","DOI":"10.1016\/j.specom.2017.01.008","article-title":"An overview of voice conversion systems","author":"mohammadi","year":"2017","journal-title":"Speech Communication"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2011-268"},{"key":"ref17","article-title":"Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis","author":"kong","year":"2020","journal-title":"NeurIPS"},{"key":"ref16","article-title":"Signature verification using a","author":"bromley","year":"1993","journal-title":"siamese\" time delay neural network \" in NIPS"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.713"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref24","author":"koluguri","year":"2020","journal-title":"SpeakerNet 1D Depth-wise Separable Convolutional Network for Text-Independent Speaker Recognition and Verification"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053889"},{"key":"ref25","author":"gu","year":"2021","journal-title":"Mediumvc Anyto-any voice conversion using synthetic specific-speaker speeches as intermedium features"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2019.101027"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1121\/1.1458024"},{"key":"ref21","article-title":"Fastpitch: Parallel text-to-speech with pitch prediction","author":"la?cucki","year":"2021","journal-title":"ICASSP"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Odyssey.2018-32"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICME.2016.7552917"},{"key":"ref9","article-title":"On generative spoken language modeling from raw audio","author":"lakhotia","year":"2021","journal-title":"Transactions of the Association for Computational Linguistics"},{"key":"ref4","article-title":"Autovc: Zero-shot voice style transfer with only autoencoder loss","author":"qian","year":"2019","journal-title":"ICML"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2663"},{"key":"ref6","article-title":"Yourtts: Towards zero-shot multi-speaker tts and zero-shot voice conversion for everyone","author":"casanova","year":"2022","journal-title":"ICML"},{"key":"ref5","article-title":"Neural analysis and synthesis: Reconstructing speech from self-supervised representations","author":"choi","year":"2021","journal-title":"NeurIPS"}],"event":{"name":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Rhodes Island, Greece","start":{"date-parts":[[2023,6,4]]},"end":{"date-parts":[[2023,6,10]]}},"container-title":["ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10094559\/10094560\/10094850.pdf?arnumber=10094850","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,20]],"date-time":"2023-11-20T19:09:17Z","timestamp":1700507357000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10094850\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,4]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/icassp49357.2023.10094850","relation":{},"subject":[],"published":{"date-parts":[[2023,6,4]]}}}