{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T20:33:27Z","timestamp":1776890007914,"version":"3.51.2"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T00:00:00Z","timestamp":1760227200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T00:00:00Z","timestamp":1760227200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,12]]},"DOI":"10.1109\/waspaa66052.2025.11230944","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:47Z","timestamp":1763146007000},"page":"1-5","source":"Crossref","is-referenced-by-count":3,"title":["Unveiling the Best Practices for Applying Speech Foundation Models to Speech Intelligibility Prediction for Hearing-Impaired People"],"prefix":"10.1109","author":[{"given":"Haoshuai","family":"Zhou","sequence":"first","affiliation":[{"name":"Orka Labs Inc.,China"}]},{"given":"Boxuan","family":"Cao","sequence":"additional","affiliation":[{"name":"Orka Labs Inc.,China"}]},{"given":"Changgeng","family":"Mo","sequence":"additional","affiliation":[{"name":"Orka Labs Inc.,China"}]},{"given":"Linkai","family":"Li","sequence":"additional","affiliation":[{"name":"Orka Labs Inc.,China"}]},{"given":"Shan Xiang","family":"Wang","sequence":"additional","affiliation":[{"name":"Stanford University,Electrical Engineering,United States"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10821"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446441"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2014.06.002"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1044\/2021_JSLHR-21-00111"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1121\/1.4977197"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.3389\/fnins.2022.789565"},{"key":"ref7","article-title":"On the opportunities and risks of foundation models","volume":"abs\/2108.07258","author":"Bommasani","year":"2021"},{"key":"ref8","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume":"abs\/2006.11477","author":"Baevski","year":"2020"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref10","article-title":"Language models are few-shot learners","volume":"abs\/2005.14165","author":"Brown","year":"2020"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-1775"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447907"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447597"},{"key":"ref14","article-title":"What do speech foundation models not learn about speech?","volume":"abs\/2410.12948","author":"Waheed","year":"2024"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/WASPAA58266.2023.10248049"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-2294"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389701"},{"key":"ref18","article-title":"Attention is all you need","volume-title":"Neural Information Processing Systems","author":"Vaswani"},{"key":"ref19","article-title":"Efficient sequence transduction by jointly predicting tokens and durations","volume":"abs\/2304.06795","author":"Xu","year":"2023"},{"key":"ref20","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"International Conference on Machine Learning","author":"Radford"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389676"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-1194"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10022656"},{"key":"ref24","article-title":"Phi-4-mini technical report: Compact yet powerful multimodal language models via mixture-of-loras","volume":"abs\/2503.01743","author":"Abouelenin","year":"2025"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-2193"},{"key":"ref26","article-title":"Open automatic speech recognition leaderboard","author":"Srivastav","year":"2023"}],"event":{"name":"2025 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)","location":"Tahoe City, CA, USA","start":{"date-parts":[[2025,10,12]]},"end":{"date-parts":[[2025,10,15]]}},"container-title":["2025 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11230875\/11230917\/11230944.pdf?arnumber=11230944","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:19:15Z","timestamp":1763191155000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11230944\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,12]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/waspaa66052.2025.11230944","relation":{},"subject":[],"published":{"date-parts":[[2025,10,12]]}}}