{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:29:17Z","timestamp":1775230157296,"version":"3.50.1"},"reference-count":75,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Institute of Information &amp; Communications Technology Planning &amp; Evaluation","award":["2021-0-00456"],"award-info":[{"award-number":["2021-0-00456"]}]},{"name":"Development of Ultra-high Speech Quality Technology for Remote Multi-speaker Conference System"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE\/ACM Trans. Audio Speech Lang. Process."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/taslp.2023.3343615","type":"journal-article","created":{"date-parts":[[2023,12,15]],"date-time":"2023-12-15T19:49:34Z","timestamp":1702669774000},"page":"891-905","source":"Crossref","is-referenced-by-count":2,"title":["Partitioning Attention Weight: Mitigating Adverse Effect of Incorrect Pseudo-Labels for Self-Supervised ASR"],"prefix":"10.1109","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-3717-2988","authenticated-orcid":false,"given":"Jae-Hong","family":"Lee","sequence":"first","affiliation":[{"name":"School of Electronics, Hanyang University, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2610-2323","authenticated-orcid":false,"given":"Joon-Hyuk","family":"Chang","sequence":"additional","affiliation":[{"name":"School of Electronics, Hanyang University, Seoul, South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2012.2205597"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638947"},{"key":"ref3","volume-title":"Statistical Methods for Speech Recognition","author":"Jelinek","year":"1997"},{"key":"ref4","article-title":"Attention-based models for speech recognition","volume":"28","author":"Chorowski","year":"2015","journal-title":"Proc. Adv. Neural Inf. Process. Syst."},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472621"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462506"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref9","first-page":"173","article-title":"Deep speech 2 : End-to-end speech recognition in english and mandarin","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Amodei","year":"2015"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053889"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052942"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.2009.2015974"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.1965.1053799"},{"key":"ref14","article-title":"Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks","volume-title":"Proc. Workshop Challenges Representation Learn.","volume":"3","author":"Lee","year":"2013"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683690"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054295"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/SLT48900.2021.9383552"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1800"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11034"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-740"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746832"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-571"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746249"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-2258"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3306709"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.5555\/3045390.3045502"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1470"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054295"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3301230"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003981"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414414"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/icassp39728.2021.9414058"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462331"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10330"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462497"},{"key":"ref36","first-page":"6649","article-title":"T-GSA: Transformer with Gaussian-weighted self-attention for speech enhancement","volume-title":"Proc. IEEE Int. Conf. Acoust. Speech Signal Process.","author":"Jaeyoung","year":"2020"},{"key":"ref37","first-page":"5869","article-title":"Capturing multi-resolution context by dilated self-attention","volume-title":"Proc. IEEE Int. Conf. Acoust. Speech Signal Process.","author":"Niko","year":"2021"},{"key":"ref38","article-title":"Representation learning with contrastive predictive coding","author":"Oord","year":"2018"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1873"},{"key":"ref40","article-title":"vq-wav2vec: Self-supervised learning of discrete speech representations","author":"Baevski","year":"2020","journal-title":"Proc. Int. Conf. Learn. Representations"},{"key":"ref41","first-page":"12449","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Baevski","year":"2020"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9688253"},{"key":"ref43","first-page":"1298","article-title":"data2vec: A general framework for self-supervised learning in speech, vision and language","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Baevski","year":"2022"},{"key":"ref44","article-title":"Efficient self-supervised learning with contextualized target representations for vision, speech and language","author":"Baevski","year":"2022","journal-title":"Proc. Int. Conf. Mach. Learn."},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3188113"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414641"},{"key":"ref48","article-title":"Pushing the limits of semi-supervised learning for automatic speech recognition","author":"Zhang","year":"2020","journal-title":"Proc. Int. Conf. Neural Inf. Process. Syst."},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-60276-5_27"},{"key":"ref50","article-title":"The HTK hidden Markov model toolkit: Design and philosophy","author":"Young","year":"1993"},{"key":"ref51","article-title":"The Kaldi speech recognition toolkit","volume-title":"Proc. IEEE Autom. Speech Recognit. Understanding Workshop","author":"Povey","year":"2011"},{"key":"ref52","first-page":"192","article-title":"Prosodylab-Aligner: A tool for forced alignment of laboratory speech","volume":"39","author":"Gorman","year":"2011","journal-title":"Can. Acoust."},{"key":"ref53","article-title":"Signal processing via web services: The use case WebMAUS","volume-title":"Proc. Dig. Humanities Conf.","author":"Kisler","year":"2012"},{"key":"ref54","article-title":"FAVE (forced alignment and vowel extraction) suite version 1.1.3","author":"Rosenfelder","year":"2014"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1386"},{"key":"ref56","first-page":"5998","article-title":"Attention is all you need","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Vaswani","year":"2017"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003750"},{"key":"ref58","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proc. North Amer. Chapter Assoc. Comput. Linguistics - Hum. Lang. Technol.","author":"Devlin","year":"2019"},{"key":"ref59","article-title":"Deep Speech: Scaling up end-to-end speech recognition","author":"Hannun","year":"2014"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2015-711"},{"key":"ref61","article-title":"Vocal tract length perturbation (VTLP) improves speech recognition","volume-title":"Proc. ICML Workshop Deep Learn. Audio, Speech, Lang. Process.","author":"Jaitly","year":"2013"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1510"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2680"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1470"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-1679"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-99579-3_21"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-4009"},{"key":"ref69","article-title":"Adam: A method for stochastic optimization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kingma","year":"2015"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683535"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414299"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10565"},{"key":"ref73","article-title":"Pseudo-labeling curriculum for unsupervised domain adaptation","volume-title":"Proc. Brit. Mach. Vis. Conf.","author":"Choi","year":"2019"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-411"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746310"}],"container-title":["IEEE\/ACM Transactions on Audio, Speech, and Language Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6570655\/10304349\/10361275.pdf?arnumber=10361275","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,12]],"date-time":"2024-01-12T22:35:20Z","timestamp":1705098920000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10361275\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":75,"URL":"https:\/\/doi.org\/10.1109\/taslp.2023.3343615","relation":{},"ISSN":["2329-9290","2329-9304"],"issn-type":[{"value":"2329-9290","type":"print"},{"value":"2329-9304","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}