{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T21:54:55Z","timestamp":1775253295666,"version":"3.50.1"},"reference-count":404,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"China NSFC projects","award":["62401377"],"award-info":[{"award-number":["62401377"]}]},{"name":"China NSFC projects","award":["62271432"],"award-info":[{"award-number":["62271432"]}]},{"name":"Internal Project of Shenzhen Research Institute of Big Data","award":["T00120220002"],"award-info":[{"award-number":["T00120220002"]}]},{"name":"Internal Project of Shenzhen Research Institute of Big Data","award":["J00220230014"],"award-info":[{"award-number":["J00220230014"]}]},{"name":"Shenzhen Science and Technology Program","award":["ZDSYS20230626091302006"],"award-info":[{"award-number":["ZDSYS20230626091302006"]}]},{"name":"Shenzhen Science and Technology Research Fund","award":["JCYJ20220818103001002"],"award-info":[{"award-number":["JCYJ20220818103001002"]}]},{"name":"China NSFC projects","award":["62122050"],"award-info":[{"award-number":["62122050"]}]},{"name":"China NSFC projects","award":["62071288"],"award-info":[{"award-number":["62071288"]}]},{"name":"Shanghai Municipal Science and Technology Commission Project","award":["2021SHZDZX0102"],"award-info":[{"award-number":["2021SHZDZX0102"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE\/ACM Trans. Audio Speech Lang. Process."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/taslp.2024.3492793","type":"journal-article","created":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T19:20:28Z","timestamp":1732216828000},"page":"4971-4998","source":"Crossref","is-referenced-by-count":19,"title":["Overview of Speaker Modeling and Its Applications: From the Lens of Deep Speaker Representation Learning"],"prefix":"10.1109","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1523-9631","authenticated-orcid":false,"given":"Shuai","family":"Wang","sequence":"first","affiliation":[{"name":"Shenzhen Research Institute of Big Data, School of Data Science, The Chinese University of Hong Kong, Shenzhen, China"}]},{"given":"Zhengyang","family":"Chen","sequence":"additional","affiliation":[{"name":"Auditory Cognition and Computational Acoustics Lab, Department of Computer Science and Engineering and MoE Key Laboratory of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9133-3000","authenticated-orcid":false,"given":"Kong Aik","family":"Lee","sequence":"additional","affiliation":[{"name":"Department of Electrical and Electronic Engineering, The Hong Kong Polytechnic University, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0314-3790","authenticated-orcid":false,"given":"Yanmin","family":"Qian","sequence":"additional","affiliation":[{"name":"Auditory Cognition and Computational Acoustics Lab, Department of Computer Science and Engineering and MoE Key Laboratory of Artificial Intelligence, AI Institute, Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9158-9401","authenticated-orcid":false,"given":"Haizhou","family":"Li","sequence":"additional","affiliation":[{"name":"Shenzhen Research Institute of Big Data, School of Data Science, The Chinese University of Hong Kong, Shenzhen, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1166\/asem.2018.2219"},{"issue":"2","key":"ref2","article-title":"Speaker recognition for surveillance application","volume":"8","author":"Kiktova","year":"2015","journal-title":"J. Elect. Electron. Eng."},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472820"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2008.931100"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2021.03.004"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2015.2462851"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-17641-8_18"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2009.08.009"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-01793-3_106"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.sigpro.2007.11.017"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2021.101317"},{"key":"ref12","first-page":"10040","article-title":"Neural voice cloning with a few samples","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Arik","year":"2018"},{"key":"ref13","first-page":"4485","article-title":"Transfer learning from speaker verification to multispeaker text-to-speech synthesis","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Jia","year":"2018"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-557"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/SSW.2019-28"},{"key":"ref16","article-title":"The voiceprivacy 2024 challenge evaluation plan","author":"Tomashenko","year":"2024"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1602"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054311"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2023.3240008"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1121\/1.1916342"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1983.1172258"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1002\/j.1538-7305.1987.tb00198.x"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/78.80876"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1990.115629"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1991.150360"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1991.150357"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/89.260362"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICNN.1997.614225"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1006\/dspr.1999.0361"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/PROC.1985.13340"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.1983.1056716"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.21236\/ada164453"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/89.279278"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.21437\/Eurospeech.2003-759"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2006.870086"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2006.1659966"},{"key":"ref37","article-title":"Joint factor analysis of speaker and session variability: Theory and algorithms","author":"Kenny","year":"2005"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TSA.2004.840940"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2006.881693"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2010.2064307"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1007\/11744085_41"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461375"},{"key":"ref43","article-title":"But system description to voxceleb speaker recognition challenge","author":"Zeinali","year":"2019"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2021.3091932"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6639344"},{"key":"ref46","first-page":"1106","article-title":"Imagenet classification with deep convolutional neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"25","author":"Krizhevsky","year":"2012"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2010-343"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2013.50"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854363"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2015-81"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2016.7846260"},{"key":"ref52","article-title":"Deep speaker: An end-to-end neural speaker embedding system","author":"Li","year":"2017"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472652"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-620"},{"key":"ref55","article-title":"The Kaldi speech recognition toolkit","volume-title":"Proc. IEEE 2011 Workshop Autom. Speech Recognit. Understanding, IEEE Signal Process. Soc.","author":"Povey","year":"2011"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2650"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1513"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.21437\/Odyssey.2018-11"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/APSIPAASC47483.2019.9023039"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1064"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2928128"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1158"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP49672.2021.9362097"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683120"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1016\/j.compeleceng.2021.107005"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3084299"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-3015"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2022-563"},{"key":"ref69","article-title":"Neural machine translation by jointly learning to align and translate","author":"Bahdanau","year":"2014"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP.2018.8706589"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639586"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-02698-1_20"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/29.21701"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1929"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1769"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/SLT48900.2021.9383531"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/Confluence52989.2022.9734175"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447141"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-484"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3385277"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1294"},{"key":"ref83","article-title":"Unisound system for voxceleb speaker recognition challenge 2023","author":"Zheng","year":"2023"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref85","first-page":"4171","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2019","journal-title":"NAACL-HLT (1)"},{"key":"ref86","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3134566"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1446"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746050"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746639"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-88"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2023.3342714"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096333"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-402"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2023-697"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448107"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053871"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-3036"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10022924"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-1982"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1011"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2022-126"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639585"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389750"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-1570"},{"key":"ref106","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747021"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1275"},{"key":"ref108","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-1707"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10023305"},{"key":"ref110","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447037"},{"key":"ref111","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2023.101600"},{"key":"ref112","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9688119"},{"key":"ref113","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682712"},{"key":"ref114","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1025"},{"key":"ref115","doi-asserted-by":"publisher","DOI":"10.3390\/app121910154"},{"key":"ref116","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747172"},{"key":"ref117","doi-asserted-by":"publisher","DOI":"10.1007\/s00034-024-02666-6"},{"key":"ref118","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3402072"},{"key":"ref119","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2007.10.005"},{"key":"ref120","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746084"},{"key":"ref121","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3313431"},{"key":"ref122","doi-asserted-by":"publisher","DOI":"10.21437\/odyssey.2020-65"},{"key":"ref123","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053110"},{"key":"ref124","article-title":"Knowing what to listen to: Early attention for deep speech representation learning","author":"Hajavi","year":"2020"},{"key":"ref125","doi-asserted-by":"publisher","DOI":"10.5555\/3524938.3525087"},{"key":"ref126","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"ref127","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref128","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2023-1202"},{"key":"ref129","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3197315"},{"key":"ref130","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3198315"},{"key":"ref131","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414713"},{"key":"ref132","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2022-742"},{"key":"ref133","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10022470"},{"key":"ref134","article-title":"vq-wav2vec: Self-supervised learning of discrete speech representations","author":"Baevski","year":"2019"},{"key":"ref135","first-page":"12449","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Baevski","year":"2020"},{"key":"ref136","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref137","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3188113"},{"key":"ref138","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747814"},{"key":"ref139","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP.2018.8706570"},{"key":"ref140","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1545"},{"key":"ref141","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2018.2822810"},{"key":"ref142","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683649"},{"key":"ref143","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00482"},{"key":"ref144","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298682"},{"key":"ref145","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1608"},{"key":"ref146","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2018.2831456"},{"key":"ref147","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462508"},{"key":"ref148","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.145"},{"key":"ref149","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682255"},{"key":"ref150","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46478-7_31"},{"key":"ref151","doi-asserted-by":"publisher","DOI":"10.1126\/science.1127647"},{"key":"ref152","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.278"},{"key":"ref153","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2842"},{"key":"ref154","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2921890"},{"key":"ref155","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2380"},{"key":"ref156","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413351"},{"key":"ref157","article-title":"Augmentation adversarial training for self-supervised speaker recognition","author":"Huh","year":"2020"},{"key":"ref158","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414973"},{"key":"ref159","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3331949"},{"key":"ref160","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2019.101027"},{"key":"ref161","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref162","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2022-126"},{"key":"ref163","article-title":"The IDLAB VoxCeleb speaker recognition challenge 2020 system description","author":"Thienpondt","year":"2020"},{"key":"ref164","article-title":"The jhu submission to voxsrc-21: Track 3","author":"Cho","year":"2021"},{"key":"ref165","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10094610"},{"key":"ref166","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3162078"},{"key":"ref167","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747162"},{"key":"ref168","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1440"},{"key":"ref169","doi-asserted-by":"publisher","DOI":"10.21437\/FFSVC.2022-3"},{"key":"ref170","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447138"},{"key":"ref171","first-page":"1641","article-title":"Semi-supervised contrastive learning with generalized contrastive loss and its application to speaker recognition","volume-title":"Proc. 2020 IEEE Asia-Pacific Signal Inf. Process. Assoc. Annu. Summit Conf.","author":"Inoue","year":"2020"},{"key":"ref172","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389802"},{"key":"ref173","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1209"},{"key":"ref174","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-1873"},{"key":"ref175","first-page":"1298","article-title":"Data2vec: A general framework for self-supervised learning in speech, vision and language","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Baevski","year":"2022"},{"key":"ref176","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-1775"},{"key":"ref177","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1280"},{"key":"ref178","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746952"},{"key":"ref179","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10094795"},{"key":"ref180","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096659"},{"key":"ref181","first-page":"176","article-title":"Multimodal person recognition using unconstrained audio and video","volume-title":"Proc. Int. Conf. Audio Video-Based Person Authentication","author":"Choudhury","year":"1999"},{"key":"ref182","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-69568-4_23"},{"key":"ref183","doi-asserted-by":"publisher","DOI":"10.1109\/MMUL.2006.37"},{"key":"ref184","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2007-157"},{"key":"ref185","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2009.4959999"},{"key":"ref186","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683477"},{"key":"ref187","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2229"},{"key":"ref188","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3057230"},{"key":"ref189","doi-asserted-by":"publisher","DOI":"10.1109\/FG47880.2020.00074"},{"key":"ref190","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-394"},{"key":"ref191","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095883"},{"key":"ref192","doi-asserted-by":"publisher","DOI":"10.1109\/TBIOM.2023.3346938"},{"key":"ref193","doi-asserted-by":"publisher","DOI":"10.1109\/DICTA47822.2019.8945863"},{"key":"ref194","doi-asserted-by":"publisher","DOI":"10.1145\/3240508.3240601"},{"key":"ref195","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-20873-8_18"},{"key":"ref196","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00879"},{"key":"ref197","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01261-8_5"},{"key":"ref198","article-title":"Disjoint mapping network for cross-modal matching of voices and faces","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wen","year":"2019"},{"key":"ref199","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1996"},{"key":"ref200","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1814"},{"key":"ref201","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-2119"},{"key":"ref202","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i11.26525"},{"key":"ref203","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096814"},{"key":"ref204","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-885"},{"key":"ref205","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3268568"},{"key":"ref206","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054333"},{"key":"ref207","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682259"},{"key":"ref208","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682611"},{"key":"ref209","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683616"},{"key":"ref210","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2662"},{"key":"ref211","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2007.366909"},{"key":"ref212","doi-asserted-by":"publisher","DOI":"10.21437\/FFSVC.2022-3"},{"key":"ref213","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053905"},{"key":"ref214","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1125"},{"key":"ref215","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3267833"},{"key":"ref216","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003826"},{"key":"ref217","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3065202"},{"key":"ref218","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-617"},{"key":"ref219","first-page":"50221","article-title":"Disentangling voice and content with self-supervision for speaker recognition","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Liu"},{"key":"ref220","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1216"},{"key":"ref221","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2226"},{"key":"ref222","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.3030499"},{"key":"ref223","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414261"},{"key":"ref224","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-967"},{"key":"ref225","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2023.3280851"},{"key":"ref226","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-591"},{"key":"ref227","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683828"},{"key":"ref228","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682488"},{"key":"ref229","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054601"},{"key":"ref230","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053323"},{"key":"ref231","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-19530-3_11"},{"key":"ref232","doi-asserted-by":"publisher","DOI":"10.1109\/ICB.2012.6199796"},{"key":"ref233","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2016.2602542"},{"key":"ref234","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2022-648"},{"key":"ref235","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953216"},{"key":"ref236","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054317"},{"key":"ref237","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1700"},{"key":"ref238","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1498"},{"key":"ref239","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10445819"},{"key":"ref240","doi-asserted-by":"publisher","DOI":"10.1121\/1.415166"},{"key":"ref241","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i18.17994"},{"key":"ref242","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.clinicalnlp-1.27"},{"key":"ref243","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-2240"},{"key":"ref244","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1489"},{"key":"ref245","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1283"},{"key":"ref246","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054036"},{"key":"ref247","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9004029"},{"key":"ref248","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-2868"},{"key":"ref249","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-430"},{"key":"ref250","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2018-1680"},{"key":"ref251","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2022.01.002"},{"key":"ref252","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746294"},{"key":"ref253","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-2021"},{"key":"ref254","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053585"},{"key":"ref255","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3169977"},{"key":"ref256","article-title":"On the impact of the quality of pseudo-labels on the self-supervised speaker verification task","volume-title":"Proc. NeurIPS ENLSP Workshop","author":"Fathan","year":"2022"},{"key":"ref257","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-742"},{"key":"ref258","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-600"},{"key":"ref259","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSPW59220.2023.10193337"},{"key":"ref260","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683443"},{"key":"ref261","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP57327.2022.10038276"},{"key":"ref262","doi-asserted-by":"publisher","DOI":"10.1109\/itaic58329.2023.10409026"},{"key":"ref263","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854828"},{"key":"ref264","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054471"},{"key":"ref265","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN48605.2020.9207519"},{"key":"ref266","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3273417"},{"key":"ref267","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746247"},{"key":"ref268","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-927"},{"key":"ref269","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1524"},{"key":"ref270","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-600"},{"key":"ref271","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-800"},{"key":"ref272","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746529"},{"key":"ref273","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-913"},{"key":"ref274","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447160"},{"key":"ref275","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2022.09.014"},{"key":"ref276","doi-asserted-by":"publisher","DOI":"10.1109\/APSIPAASC58517.2023.10317337"},{"key":"ref277","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3338533"},{"key":"ref278","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3182856"},{"key":"ref279","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447161"},{"key":"ref280","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003979"},{"key":"ref281","doi-asserted-by":"publisher","DOI":"10.1109\/ICDMW58026.2022.00120"},{"key":"ref282","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-1775"},{"key":"ref283","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-1212"},{"key":"ref284","doi-asserted-by":"publisher","DOI":"10.21437\/odyssey.2020-66"},{"key":"ref285","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683332"},{"key":"ref286","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414600"},{"key":"ref287","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10094744"},{"key":"ref288","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446422"},{"key":"ref289","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096149"},{"key":"ref290","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3089943"},{"key":"ref291","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2023-1298"},{"key":"ref292","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1286"},{"key":"ref293","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446329"},{"key":"ref294","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-92627-8_17"},{"key":"ref295","first-page":"5338","article-title":"Concept bottleneck models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Koh","year":"2020"},{"key":"ref296","article-title":"Explainable attribute-based speaker verification","author":"Wu","year":"2024"},{"key":"ref297","doi-asserted-by":"publisher","DOI":"10.21437\/chime.2020\u20139"},{"key":"ref298","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054535"},{"key":"ref299","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10094752"},{"key":"ref300","first-page":"5530","article-title":"Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kim","year":"2021"},{"key":"ref301","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11671"},{"key":"ref302","first-page":"3942","article-title":"Adaspeech: Adaptive text to speech for custom voice","volume-title":"Proc. 9th Int. Conf. Learn. Representations, ICLR 2021, Virtual Event","author":"Chen","year":"2021"},{"key":"ref303","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.167"},{"key":"ref304","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096449"},{"key":"ref305","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2013.2264673"},{"key":"ref306","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2014.7078610"},{"key":"ref307","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2899"},{"key":"ref308","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003959"},{"key":"ref309","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3162080"},{"key":"ref310","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1022"},{"key":"ref311","doi-asserted-by":"publisher","DOI":"10.1109\/SLT48900.2021.9383555"},{"key":"ref312","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1228"},{"key":"ref313","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3366756"},{"key":"ref314","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2023.3279781"},{"key":"ref315","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095185"},{"key":"ref316","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747772"},{"key":"ref317","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446072"},{"key":"ref318","article-title":"Target speech diarization with multimodal prompts","author":"Jiang","year":"2024"},{"key":"ref319","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854823"},{"key":"ref320","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3195113"},{"key":"ref321","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461932"},{"key":"ref322","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-1856"},{"key":"ref323","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3243690"},{"key":"ref324","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1632"},{"key":"ref325","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2022-10054"},{"key":"ref326","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1032"},{"key":"ref327","article-title":"Neural codec language models are zero-shot text to speech synthesizers","author":"Wang","year":"2023"},{"key":"ref328","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i16.29747"},{"key":"ref329","article-title":"Vall-t: Decoder-only generative transducer for robust and decoding-controllable text-to-speech","author":"Du","year":"2024"},{"key":"ref330","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.3038524"},{"key":"ref331","doi-asserted-by":"publisher","DOI":"10.21437\/odyssey.2020-62"},{"key":"ref332","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2022-856"},{"key":"ref333","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2019.2922820"},{"key":"ref334","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.2987429"},{"key":"ref335","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1397"},{"key":"ref336","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2024.102550"},{"key":"ref337","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-1101"},{"key":"ref338","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10097210"},{"key":"ref339","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095609"},{"key":"ref340","article-title":"3d-speaker: A large-scale multi-device, multi-distance, and multi-dialect corpus for speech representation disentanglement","author":"Zheng","year":"2023"},{"key":"ref341","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2018-1456"},{"key":"ref342","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1983"},{"key":"ref343","article-title":"Speechbrain: A general-purpose speech toolkit","author":"Ravanelli","year":"2021"},{"key":"ref344","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414676"},{"key":"ref345","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096626"},{"key":"ref346","first-page":"93","article-title":"The darpa speech recognition research database: Specifications and status","volume-title":"Proc. DARPA Workshop speech Recognit.","author":"Fisher","year":"1986"},{"key":"ref347","doi-asserted-by":"publisher","DOI":"10.1016\/B978-044481607-8\/50088-8"},{"key":"ref348","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1992.225858"},{"key":"ref349","doi-asserted-by":"publisher","DOI":"10.3989\/loquens.2014.007"},{"key":"ref350","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2019.101032"},{"key":"ref351","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2014.03.001"},{"key":"ref352","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2015-95"},{"key":"ref353","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref354","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-1129"},{"key":"ref355","doi-asserted-by":"publisher","DOI":"10.1109\/ICSDA.2017.8384449"},{"key":"ref356","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2017-950"},{"key":"ref357","article-title":"Aishell-2: Transforming mandarin ASR research into industrial scale","author":"Du","year":"2018"},{"key":"ref358","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1454"},{"key":"ref359","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-1837"},{"key":"ref360","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052942"},{"key":"ref361","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054423"},{"key":"ref362","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053258"},{"key":"ref363","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054017"},{"key":"ref364","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2826"},{"key":"ref365","article-title":"The FFSVC 2020 evaluation plan","author":"Qin","year":"2020"},{"key":"ref366","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746833"},{"key":"ref367","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413815"},{"key":"ref368","article-title":"NIST SRE CTS superset: A large-scale dataset for telephony speaker recognition","author":"Sadjadi","year":"2021"},{"key":"ref369","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1965"},{"key":"ref370","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746682"},{"key":"ref371","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1083"},{"key":"ref372","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446780"},{"key":"ref373","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448292"},{"key":"ref374","article-title":"Scaling laws for neural language models","author":"Kaplan","year":"2020"},{"key":"ref375","article-title":"The speakin system for voxceleb speaker recognition challange 2021","author":"Zhao","year":"2021"},{"key":"ref376","article-title":"ID R&D system description to VoxCeleb speaker recognition challenge 2022","author":"Makarov","year":"2022","journal-title":"ID R&D Inc.: New York, NY, USA"},{"key":"ref377","article-title":"The xx205 system for the VoxCeleb speaker recognition challenge 2020","author":"Xiang","year":"2020"},{"key":"ref378","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-1345"},{"key":"ref379","article-title":"3D-speaker-toolkit: An open source toolkit for multi-modal speaker verification and diarization","author":"Chen","year":"2024"},{"key":"ref380","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2022.101362"},{"key":"ref381","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3190741"},{"key":"ref382","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3313429"},{"key":"ref383","doi-asserted-by":"publisher","DOI":"10.56553\/popets-2023-0007"},{"key":"ref384","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10022601"},{"key":"ref385","first-page":"11421","article-title":"Synvox2: Towards a privacy-friendly voxceleb2 dataset","volume-title":"Proc. IEEE Int. Conf. Acoust., Speech Signal Process. (ICASSP)","author":"Miao","year":"2024"},{"key":"ref386","first-page":"28492","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2023"},{"key":"ref387","article-title":"Base tts: Lessons from building a billion-parameter text-to-speech model on 100 k hours of data","author":"ajszczak","year":"2024"},{"key":"ref388","article-title":"Gpt-4 technical report","author":"Achiam","year":"2023"},{"key":"ref389","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref390","article-title":"Qwen technical report","author":"Bai","year":"2023"},{"key":"ref391","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.1055"},{"key":"ref392","article-title":"GPT-4O: The cutting-edge advancement in multimodal LLM","author":"Islam","year":"2024","journal-title":"Authorea Preprints"},{"key":"ref393","article-title":"SALMONN: Towards generic hearing abilities for large language models","volume-title":"Proc. 12th Int. Conf. Learn. Representations","author":"Tang","year":"2024"},{"key":"ref394","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096285"},{"key":"ref395","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3402088"},{"key":"ref396","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389772"},{"key":"ref397","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-1465"},{"key":"ref398","doi-asserted-by":"publisher","DOI":"10.1017\/ATSIP.2019.21"},{"key":"ref399","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1052"},{"key":"ref400","first-page":"5180","article-title":"Style tokens: Unsupervised style modeling, control and transfer in end-to-end speech synthesis","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wang","year":"2018"},{"key":"ref401","article-title":"Flowtron: An autoregressive flow-based generative network for text-to-speech synthesis","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Valle","year":"2021"},{"key":"ref402","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-208"},{"key":"ref403","article-title":"Naturalspeech 3: Zero-shot speech synthesis with factorized codec and diffusion models","volume-title":"Proc. 41st Int. Conf. Mach. Learn.","author":"Ju","year":"2024"},{"key":"ref404","article-title":"Speechtokenizer: Unified speech tokenizer for speech large language models","volume-title":"Proc. 12th Int. Conf. Learn. Representations","author":"Zhang","year":"2024"}],"container-title":["IEEE\/ACM Transactions on Audio, Speech, and Language Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6570655\/10304349\/10760244.pdf?arnumber=10760244","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T07:49:51Z","timestamp":1732693791000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10760244\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":404,"URL":"https:\/\/doi.org\/10.1109\/taslp.2024.3492793","relation":{},"ISSN":["2329-9290","2329-9304"],"issn-type":[{"value":"2329-9290","type":"print"},{"value":"2329-9304","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}