{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,13]],"date-time":"2025-10-13T00:22:23Z","timestamp":1760314943741,"version":"build-2065373602"},"publisher-location":"Cham","reference-count":37,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783032079589","type":"print"},{"value":"9783032079596","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,10,13]],"date-time":"2025-10-13T00:00:00Z","timestamp":1760313600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,10,13]],"date-time":"2025-10-13T00:00:00Z","timestamp":1760313600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-07959-6_3","type":"book-chapter","created":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T09:22:21Z","timestamp":1760260941000},"page":"29-44","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Ground Truth-Free WER Prediction for\u00a0ASR via\u00a0Audio Quality and\u00a0Model Confidence Features"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-2216-0468","authenticated-orcid":false,"given":"Anton","family":"Polevoi","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0009-5152-887X","authenticated-orcid":false,"given":"Alexander","family":"Kragin","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1883-4121","authenticated-orcid":false,"given":"Natalia","family":"Loukachevitch","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,10,13]]},"reference":[{"issue":"4","key":"3_CR1","doi-asserted-by":"publisher","first-page":"745","DOI":"10.1109\/TASLP.2014.2304637","volume":"22","author":"J Li","year":"2014","unstructured":"Li, J., Deng, L., Gong, Y., Haeb-Umbach, R.: An overview of noise-robust automatic speech recognition. IEEE\/ACM Trans. Audio Speech Lang. Process. 22(4), 745\u2013777 (2014)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"3_CR2","unstructured":"Shah, M.A., Noguero, D.S., Heikkila, M.A., Raj, B., Kourtellis, N.: Speech robust bench: a robustness benchmark for speech recognition. arXiv preprint arXiv:2403.07937 (2024)"},{"key":"3_CR3","unstructured":"Radford, A., Kim, J.W., Xu, T., Brockman, G., McLeavey, C., Sutskever, I.: Robust speech recognition via large-scale weak supervision. In: International Conference on Machine Learning, pp. 28492\u201328518. PMLR (2023)"},{"key":"3_CR4","unstructured":"Baevski, A., Zhou, Y., Mohamed, A., Auli, M.: wav2vec 2.0: a framework for self-supervised learning of speech representations. Adv. Neural Inf. Process. Syst. 33, 12449\u201312460 (2020)"},{"key":"3_CR5","doi-asserted-by":"publisher","first-page":"3451","DOI":"10.1109\/TASLP.2021.3122291","volume":"29","author":"WN Hsu","year":"2021","unstructured":"Hsu, W.N., Bolte, B., Tsai, Y.H., Lakhotia, K., Salakhutdinov, R., Mohamed, A.: Hubert: self-supervised speech representation learning by masked prediction of hidden units. IEEE\/ACM Trans. Audio Speech Lang. Process. 29, 3451\u20133460 (2021)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"issue":"1","key":"3_CR6","doi-asserted-by":"publisher","first-page":"380","DOI":"10.1016\/j.csl.2012.07.004","volume":"27","author":"A Tsilfidis","year":"2013","unstructured":"Tsilfidis, A., Mporas, I., Mourjopoulos, J., Fakotakis, N.: Automatic speech recognition performance in different room acoustic environments with and without dereverberation preprocessing. Comput. Speech Lang. 27(1), 380\u2013395 (2013)","journal-title":"Comput. Speech Lang."},{"key":"3_CR7","series-title":"Lecture Notes in Computer Science (Lecture Notes in Artificial Intelligence)","doi-asserted-by":"publisher","first-page":"160","DOI":"10.1007\/978-3-642-38847-7_21","volume-title":"Advances in Nonlinear Speech Processing","author":"E Loweimi","year":"2013","unstructured":"Loweimi, E., Ahadi, S.M., Drugman, T., Loveymi, S.: On the importance of pre-emphasis and window shape in phase-based speech recognition. In: Drugman, T., Dutoit, T. (eds.) NOLISP 2013. LNCS (LNAI), vol. 7911, pp. 160\u2013167. Springer, Heidelberg (2013). https:\/\/doi.org\/10.1007\/978-3-642-38847-7_21"},{"issue":"1","key":"3_CR8","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1186\/s13636-014-0047-0","volume":"2015","author":"S Yin","year":"2015","unstructured":"Yin, S., et al.: Noisy training for deep neural networks in speech recognition. EURASIP J. Audio Speech Music Process. 2015(1), 1\u201314 (2015). https:\/\/doi.org\/10.1186\/s13636-014-0047-0","journal-title":"EURASIP J. Audio Speech Music Process."},{"issue":"7","key":"3_CR9","doi-asserted-by":"publisher","first-page":"1315","DOI":"10.1109\/TASLP.2016.2545928","volume":"24","author":"C Kim","year":"2016","unstructured":"Kim, C., Stern, R.M.: Power-normalized cepstral coefficients (PNCC) for robust speech recognition. IEEE\/ACM Trans. Audio Speech Lang. Process. 24(7), 1315\u20131329 (2016)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"issue":"6","key":"3_CR10","doi-asserted-by":"publisher","first-page":"34","DOI":"10.1109\/MSP.2012.2207989","volume":"29","author":"RM Stern","year":"2012","unstructured":"Stern, R.M., Morgan, N.: Hearing is believing: biologically inspired methods for robust automatic speech recognition. IEEE Signal Process. Mag. 29(6), 34\u201343 (2012)","journal-title":"IEEE Signal Process. Mag."},{"key":"3_CR11","doi-asserted-by":"crossref","unstructured":"Ali, A., Renals, S.: Word error rate estimation for speech recognition: e-WER. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, vol. 2: Short Papers, pp. 20\u201324. ACL (2018)","DOI":"10.18653\/v1\/P18-2004"},{"key":"3_CR12","doi-asserted-by":"crossref","unstructured":"Ali, A., Renals, S.: Word error rate estimation without asr output: E-wer2. arXiv preprint arXiv:2008.03403 (2020)","DOI":"10.21437\/Interspeech.2020-2357"},{"key":"3_CR13","doi-asserted-by":"crossref","unstructured":"Chowdhury, S.A., Ali, A.: Multilingual word error rate estimation: e-WER3. In: ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 1\u20135. IEEE (2023)","DOI":"10.1109\/ICASSP49357.2023.10095888"},{"key":"3_CR14","unstructured":"Litman, D., Hirschberg, J., Swerts, M.: Predicting automatic speech recognition performance using prosodic cues. In: 1st Meeting of the North American Chapter of the Association for Computational Linguistics, pp. 1\u20138. ACL (2000)"},{"key":"3_CR15","unstructured":"Fish, R., Hu, Q., Boykin, S.: Using audio quality to predict word error rate in an automatic speech recognition system. Unpublished technical report (2006)"},{"key":"3_CR16","doi-asserted-by":"crossref","unstructured":"Gallardo, L.F., M\u00f6ller, S., Beerends, J.: Predicting automatic speech recognition performance over communication channels from instrumental speech quality and intelligibility scores. In: INTERSPEECH 2017, pp. 2939\u20132943. ISCA (2017)","DOI":"10.21437\/Interspeech.2017-36"},{"key":"3_CR17","unstructured":"Conneau, A., et al.: FLEURS: Few-shot Learning Evaluation of Universal Representations of Speech. arXiv preprint arXiv:2205.12446 (2022)"},{"key":"3_CR18","doi-asserted-by":"crossref","unstructured":"Panayotov, V., Chen, G., Povey, D., Khudanpur, S.: Librispeech: an ASR corpus based on public domain audio books. In: ICASSP 2015, pp. 5206\u20135210. IEEE (2015)","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"3_CR19","unstructured":"Beerends, J., et al.: Perceptual objective listening quality assessment (POLQA), the third generation ITU-T standard for end-to-end speech quality measurement part I-temporal alignment. AES: J. Audio Eng. Soc. 61(6), 366\u2013384 (2013)"},{"key":"3_CR20","doi-asserted-by":"crossref","unstructured":"Park, C., Lu, C., Chen, M., Hain, T.: Fast Word Error Rate Estimation Using Self-Supervised Representations for Speech and Text. arXiv preprint arXiv:2310.08225 (2025)","DOI":"10.1109\/ICASSP49660.2025.10890056"},{"key":"3_CR21","unstructured":"Park, C., Chen, M., Hain, T.: Automatic Speech Recognition System-Independent Word Error Rate Estimation. arXiv preprint arXiv:2404.16743 (2024)"},{"key":"3_CR22","doi-asserted-by":"crossref","unstructured":"Subakan, C., Ravanelli, M., Cornell, S., Grondin, F.: REAL-M: Towards Speech Separation on Real Mixtures. arXiv preprint arXiv:2110.10812 (2021)","DOI":"10.1109\/ICASSP43922.2022.9746662"},{"key":"3_CR23","doi-asserted-by":"publisher","unstructured":"Kim, C., Stern, R.: Robust signal-to-noise ratio estimation based on waveform amplitude distribution analysis. In: INTERSPEECH 2008, pp. 2598\u20132601. ISCA (2008). https:\/\/doi.org\/10.21437\/Interspeech.2008-644","DOI":"10.21437\/Interspeech.2008-644"},{"key":"3_CR24","doi-asserted-by":"publisher","unstructured":"Mittag, G., Naderi, B., Chehadi, A., M\u00f6ller, S.: NISQA: a deep CNN-self-attention model for multidimensional speech quality prediction with crowdsourced datasets. In: INTERSPEECH 2021, pp. 1\u20135. ISCA (2021). https:\/\/doi.org\/10.21437\/Interspeech.2021-299","DOI":"10.21437\/Interspeech.2021-299"},{"key":"3_CR25","doi-asserted-by":"publisher","unstructured":"Graves, A., Fern\u00e1ndez, S., Gomez, F., Schmidhuber, J.: Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks. In: ICML 2006, pp. 369\u2013376. ACM (2006). https:\/\/doi.org\/10.1145\/1143844.1143891","DOI":"10.1145\/1143844.1143891"},{"key":"3_CR26","unstructured":"Harper, E., Majumdar, S., Kuchaiev, O., et al.: NeMo: a toolkit for Conversational AI and Large Language Models. NVIDIA (2019). https:\/\/nvidia.github.io\/NeMo\/"},{"key":"3_CR27","doi-asserted-by":"crossref","unstructured":"Rekesh, D., Koluguri, N.R., Kriman, S., et al.: Fast Conformer with Linearly Scalable Attention for Efficient Speech Recognition. arXiv preprint arXiv:2305.05084 (2023)","DOI":"10.1109\/ASRU57964.2023.10389701"},{"issue":"48","key":"3_CR28","doi-asserted-by":"publisher","first-page":"E7856","DOI":"10.1073\/pnas.1612524113","volume":"113","author":"J Traer","year":"2016","unstructured":"Traer, J., McDermott, J.H.: Statistics of natural reverberation enable perceptual separation of sound and space. Proc. Natl. Acad. Sci. 113(48), E7856\u2013E7865 (2016). https:\/\/doi.org\/10.1073\/pnas.1612524113","journal-title":"Proc. Natl. Acad. Sci."},{"issue":"1","key":"3_CR29","doi-asserted-by":"publisher","first-page":"269","DOI":"10.1007\/s10772-021-09950-9","volume":"25","author":"L Bouchakour","year":"2021","unstructured":"Bouchakour, L., Debyeche, M.: Noise-robust speech recognition in mobile network based on convolution neural networks. Int. J. Speech Technol. 25(1), 269\u2013277 (2021). https:\/\/doi.org\/10.1007\/s10772-021-09950-9","journal-title":"Int. J. Speech Technol."},{"issue":"1","key":"3_CR30","doi-asserted-by":"publisher","first-page":"670","DOI":"10.5753\/jis.2024.4267","volume":"15","author":"J Duarte","year":"2024","unstructured":"Duarte, J., Colcher, S.: Noise-robust automatic speech recognition: a case study for communication interference. J. Interact. Syst. 15(1), 670\u2013681 (2024). https:\/\/doi.org\/10.5753\/jis.2024.4267","journal-title":"J. Interact. Syst."},{"key":"3_CR31","doi-asserted-by":"publisher","unstructured":"Chen, G., O\u2019Shaughnessy, D., Tolba, H.: A performance investigation of noisy voice recognition over IP telephony networks. In: INTERSPEECH 2005, pp. 2681\u20132684. ISCA (2005). https:\/\/doi.org\/10.21437\/Interspeech.2005-259","DOI":"10.21437\/Interspeech.2005-259"},{"key":"3_CR32","doi-asserted-by":"publisher","unstructured":"Jordal, I., Tamazian, A., Dhyani, T., et al.: iver56\/audiomentations: v0.39.0. Zenodo (2025). https:\/\/doi.org\/10.5281\/zenodo.14856562","DOI":"10.5281\/zenodo.14856562"},{"issue":"7993","key":"3_CR33","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1038\/s41586-024-08328-6","volume":"625","author":"N Hollmann","year":"2025","unstructured":"Hollmann, N., M\u00fcller, S., Purucker, L., et al.: Accurate predictions on small data with a tabular foundation model. Nature 625(7993), 1\u20139 (2025). https:\/\/doi.org\/10.1038\/s41586-024-08328-6","journal-title":"Nature"},{"key":"3_CR34","unstructured":"Hollmann, N., M\u00fcller, S., Eggensperger, K., Hutter, F.: TabPFN: a transformer that solves small tabular classification problems in a second. In: ICLR 2023 (2023)"},{"key":"3_CR35","unstructured":"Prokhorenkova, L., Gusev, G., Vorobev, A., Dorogush, A.V., Gulin, A.: CatBoost: unbiased boosting with categorical features. arXiv preprint arXiv:1706.09516 (2019)"},{"key":"3_CR36","unstructured":"Vakhrushev, A., Ryzhkov, A., Savchenko, M., Simakov, D., Damdinov, R., Tuzhilin, A.: LightAutoML: AutoML Solution for a Large Financial Services Ecosystem. arXiv preprint arXiv:2109.01528 (2022)"},{"issue":"14","key":"3_CR37","doi-asserted-by":"publisher","first-page":"7684","DOI":"10.1073\/pnas.1915768117","volume":"117","author":"A Koenecke","year":"2020","unstructured":"Koenecke, A., et al.: Racial disparities in automated speech recognition. PNAS 117(14), 7684\u20137689 (2020). https:\/\/doi.org\/10.1073\/pnas.1915768117","journal-title":"PNAS"}],"container-title":["Lecture Notes in Computer Science","Speech and Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-07959-6_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T09:22:28Z","timestamp":1760260948000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-07959-6_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,13]]},"ISBN":["9783032079589","9783032079596"],"references-count":37,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-07959-6_3","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,10,13]]},"assertion":[{"value":"13 October 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"SPECOM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Speech and Computer","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Szeged","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Hungary","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"13 October 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15 October 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"specom2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/specom.inf.u-szeged.hu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}