{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,15]],"date-time":"2026-04-15T18:09:03Z","timestamp":1776276543078,"version":"3.50.1"},"reference-count":34,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,7,15]],"date-time":"2024-07-15T00:00:00Z","timestamp":1721001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,7,15]],"date-time":"2024-07-15T00:00:00Z","timestamp":1721001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,7,15]]},"DOI":"10.1109\/icme57554.2024.10688322","type":"proceedings-article","created":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T17:24:16Z","timestamp":1727717056000},"page":"1-6","source":"Crossref","is-referenced-by-count":4,"title":["Boosting Multi-Speaker Expressive Speech Synthesis with Semi-Supervised Contrastive Learning"],"prefix":"10.1109","author":[{"given":"Xinfa","family":"Zhu","sequence":"first","affiliation":[{"name":"School of Computer Science, Northwestern Polytechnical University,Audio, Speech and Language Processing Group (ASLP@NPU),Xi&#x2019;an,China"}]},{"given":"Yuke","family":"Li","sequence":"additional","affiliation":[{"name":"School of Computer Science, Northwestern Polytechnical University,Audio, Speech and Language Processing Group (ASLP@NPU),Xi&#x2019;an,China"}]},{"given":"Yi","family":"Lei","sequence":"additional","affiliation":[{"name":"School of Computer Science, Northwestern Polytechnical University,Audio, Speech and Language Processing Group (ASLP@NPU),Xi&#x2019;an,China"}]},{"given":"Ning","family":"Jiang","sequence":"additional","affiliation":[{"name":"Mashang Consumer Finance Co., Ltd"}]},{"given":"Guoqing","family":"Zhao","sequence":"additional","affiliation":[{"name":"Mashang Consumer Finance Co., Ltd"}]},{"given":"Lei","family":"Xie","sequence":"additional","affiliation":[{"name":"School of Computer Science, Northwestern Polytechnical University,Audio, Speech and Language Processing Group (ASLP@NPU),Xi&#x2019;an,China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"A survey on neural speech synthesis","volume":"abs\/2106.15561","author":"Tan","year":"2021"},{"key":"ref2","article-title":"Fastspeech 2: Fast and high-quality end-to-end text to speech","volume-title":"Proc. ICLR","author":"Ren"},{"key":"ref3","first-page":"5530","article-title":"Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech","volume-title":"Proc. ICML","author":"Kim"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1854"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3145293"},{"key":"ref6","first-page":"7748","article-title":"Meta-stylespeech : Multi-speaker adaptive text-to-speech generation","volume-title":"Proc. ICML","author":"Min"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP49672.2021.9362069"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3164181"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-58347-1_10"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-283"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2022.3203888"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095776"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11275"},{"key":"ref14","first-page":"1597","article-title":"A simple framework for contrastive learning of visual representations","volume-title":"Proc. ICML. 2020, vol. 119 of Proceedings of Machine Learning Research","author":"Chen"},{"key":"ref15","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. ICML","volume":"139","author":"Radford"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.2307\/j.ctt4cgngj.10"},{"key":"ref17","first-page":"559","article-title":"Mulan: A joint embedding of music audio and natural language","volume-title":"Proc. ISMIR","author":"Huang"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.518"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/HSI55341.2022.9869453"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413910"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.3390\/technologies9010002"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612053"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3197315"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/SMC52423.2021.9658830"},{"key":"ref26","first-page":"1779","article-title":"CLUB: A contrastive log-ratio upper bound of mutual information","volume-title":"Proc. ICML","author":"Cheng"},{"key":"ref27","article-title":"Controllable and lossless non-autoregressive end-to-end text-to-speech","author":"Liu","year":"2022","journal-title":"CoRR"},{"key":"ref28","article-title":"An hmm-based system for automatic segmentation and alignment of speech","author":"Sj\u00f6lander","year":"2003"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2650"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1983"},{"key":"ref31","first-page":"2579","article-title":"Visualizing data using t-sne","volume":"9","author":"Van der Maaten","year":"2008"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/1520340.1520483"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2005-379"},{"key":"ref34","article-title":"Speak foreign languages with your own voice: Cross-lingual neural codec language modeling","author":"Zhang","year":"2023","journal-title":"CoRR"}],"event":{"name":"2024 IEEE International Conference on Multimedia and Expo (ICME)","location":"Niagara Falls, ON, Canada","start":{"date-parts":[[2024,7,15]]},"end":{"date-parts":[[2024,7,19]]}},"container-title":["2024 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10685847\/10687354\/10688322.pdf?arnumber=10688322","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T05:57:29Z","timestamp":1727762249000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10688322\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7,15]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/icme57554.2024.10688322","relation":{},"subject":[],"published":{"date-parts":[[2024,7,15]]}}}