{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,29]],"date-time":"2025-11-29T07:21:26Z","timestamp":1764400886358,"version":"3.46.0"},"reference-count":35,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,22]],"date-time":"2025-10-22T00:00:00Z","timestamp":1761091200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,22]],"date-time":"2025-10-22T00:00:00Z","timestamp":1761091200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001695","name":"JST","doi-asserted-by":"publisher","award":["JPMJSP2140"],"award-info":[{"award-number":["JPMJSP2140"]}],"id":[{"id":"10.13039\/501100001695","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100000646","name":"JSPS","doi-asserted-by":"publisher","award":["JP21H05054,JP23K21681,JP25H01139"],"award-info":[{"award-number":["JP21H05054,JP23K21681,JP25H01139"]}],"id":[{"id":"10.13039\/501100000646","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,22]]},"DOI":"10.1109\/apsipaasc65261.2025.11249368","type":"proceedings-article","created":{"date-parts":[[2025,11,28]],"date-time":"2025-11-28T18:40:26Z","timestamp":1764355226000},"page":"2541-2546","source":"Crossref","is-referenced-by-count":0,"title":["Rethinking Robust ASR Strategies: Can Textual in-Context Learning Improve Acoustic Robustness?"],"prefix":"10.1109","author":[{"given":"Benita Angela","family":"Titalim","sequence":"first","affiliation":[{"name":"Nara Institute of Science and Technology,Japan"}]},{"given":"Faisal","family":"Mehmood","sequence":"additional","affiliation":[{"name":"Nara Institute of Science and Technology,Japan"}]},{"given":"Sakriani","family":"Sakti","sequence":"additional","affiliation":[{"name":"Nara Institute of Science and Technology,Japan"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.3726\/978-3-653-06377-6\/5"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2025.101821"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9687942"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2014.11.008"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP57327.2022.10037796"},{"journal-title":"Frustratingly easy noise-aware training of acoustic models","year":"2020","author":"Raj","key":"ref6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2014-488"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-225"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414639"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2015.7404837"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1186\/s13634-016-0306-6"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/ICSLP.2000-743"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/jproc.2020.3018668"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2907015"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-22482-4_11"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7177943"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1186\/s13636-024-00349-3"},{"journal-title":"Scaling speech technology to 1,000+ languages","year":"2023","author":"Pratap","key":"ref18"},{"journal-title":"Large language models are efficient learners of noise-robust speech recognition","year":"2024","author":"Hu","key":"ref19"},{"journal-title":"Seed-ASR: Understanding diverse speech and contexts with llm-based speech recognition","year":"2024","author":"Bai","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.618"},{"journal-title":"Language models are few-shot learners","year":"2020","author":"Brown","key":"ref22"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.385"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.222"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.customnlp4u-1.15"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.968"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01785"},{"journal-title":"Incontext learning state vector with inner and momentum optimization","year":"2024","author":"Li","key":"ref28"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.395"},{"journal-title":"Salmonn: Towards generic hearing abilities for large language models","year":"2024","author":"Tang","key":"ref30"},{"key":"ref31","first-page":"1","author":"Radford","year":"2023","journal-title":"Robust speech recognition via large-scale weak supervision"},{"journal-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","year":"2023","author":"Li","key":"ref32"},{"key":"ref33","first-page":"6","volume":"2","author":"Chiang","year":"2023","journal-title":"Vicuna: An opensource chatbot impressing gpt- 4 with 90 % chatgpt quality"},{"journal-title":"Llama: Open and efficient foundation language models","year":"2023","author":"Touvron","key":"ref34"},{"journal-title":"Lora: Low-rank adaptation of large language models","year":"2022","author":"Hu","key":"ref35"}],"event":{"name":"2025 Asia Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)","start":{"date-parts":[[2025,10,22]]},"location":"Singapore, Singapore","end":{"date-parts":[[2025,10,24]]}},"container-title":["2025 Asia Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11248853\/11248968\/11249368.pdf?arnumber=11249368","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,29]],"date-time":"2025-11-29T07:18:01Z","timestamp":1764400681000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11249368\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,22]]},"references-count":35,"URL":"https:\/\/doi.org\/10.1109\/apsipaasc65261.2025.11249368","relation":{},"subject":[],"published":{"date-parts":[[2025,10,22]]}}}