{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:46:40Z","timestamp":1776887200972,"version":"3.51.2"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6,4]]},"DOI":"10.1109\/icassp49357.2023.10097147","type":"proceedings-article","created":{"date-parts":[[2023,5,5]],"date-time":"2023-05-05T17:28:30Z","timestamp":1683307710000},"page":"1-5","source":"Crossref","is-referenced-by-count":18,"title":["Self-Supervised Representations for Singing Voice Conversion"],"prefix":"10.1109","author":[{"given":"Tejas","family":"Jayashankar","sequence":"first","affiliation":[{"name":"Massachusetts Institute of Technology"}]},{"given":"Jilong","family":"Wu","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Leda","family":"Sari","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"David","family":"Kant","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Vimal","family":"Manohar","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Qing","family":"He","sequence":"additional","affiliation":[{"name":"Meta AI"}]}],"member":"263","reference":[{"key":"ref24","article-title":"Robust speech representation learning via flow-based embedding regularization","author":"kang","year":"2021"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052942"},{"key":"ref12","article-title":"Towards high-fidelity singing voice conversion with acoustic reference and contrastive predictive coding","author":"wang","year":"2021"},{"key":"ref15","first-page":"12449","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume":"33","author":"baevski","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref14","article-title":"Speech resynthesis from discrete dis-entangled self-supervised representations","author":"polyak","year":"2021"},{"key":"ref20","article-title":"Children&#x2019;s song dataset for singing voice research","author":"choi","year":"2020","journal-title":"International Society for Music Information Retrieval Conference (ISMIR)"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref11","article-title":"Un-supervised cross-domain singing voice conversion","author":"polyak","year":"2020"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054199"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461329"},{"key":"ref2","article-title":"Recent development of the hmm-based singing voice synthesis system&#x2014;sinsy","author":"oura","year":"2010","journal-title":"Seventh ISCA Workshop on Speech Synthesis"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2006-584"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref16","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2018"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/APSIPA.2013.6694316"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2558"},{"key":"ref8","first-page":"17022","article-title":"Hifigan: Generative adversarial networks for efficient and high fidelity speech synthesis","volume":"33","author":"kong","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref7","first-page":"2410","article-title":"Efficient neural audio synthesis","author":"kalchbrenner","year":"2018","journal-title":"International Conference on Machine Learning"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1761"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2014-539"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6853599"},{"key":"ref6","article-title":"Wavenet: A generative model for raw audio","author":"van den oord","year":"2016"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2015-580"}],"event":{"name":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Rhodes Island, Greece","start":{"date-parts":[[2023,6,4]]},"end":{"date-parts":[[2023,6,10]]}},"container-title":["ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10094559\/10094560\/10097147.pdf?arnumber=10097147","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,20]],"date-time":"2023-11-20T18:57:37Z","timestamp":1700506657000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10097147\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,4]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/icassp49357.2023.10097147","relation":{},"subject":[],"published":{"date-parts":[[2023,6,4]]}}}