{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T07:03:16Z","timestamp":1775199796323,"version":"3.50.1"},"reference-count":47,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434657","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-7","source":"Crossref","is-referenced-by-count":0,"title":["Group Relative Policy Optimization for Speech Recognition"],"prefix":"10.1109","author":[{"given":"Prashanth Gurunath","family":"Shivakumar","sequence":"first","affiliation":[{"name":"Amazon Science,Seattle,U.S.A"}]},{"given":"Yile","family":"Gu","sequence":"additional","affiliation":[{"name":"Amazon Science,Seattle,U.S.A"}]},{"given":"Ankur","family":"Gandhe","sequence":"additional","affiliation":[{"name":"Amazon Science,Seattle,U.S.A"}]},{"given":"Ivan","family":"Bulyko","sequence":"additional","affiliation":[{"name":"Amazon Science,Seattle,U.S.A"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00430"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/icassp48485.2024.10447112"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.1055"},{"key":"ref4","article-title":"Lauragpt: Listen, attend, understand, and regenerate audio with gpt","author":"Du","year":"2023","journal-title":"arXiv preprint arXiv:2310.04673"},{"key":"ref5","article-title":"An embarrassingly simple approach for 11 m with strong asr capacity","author":"Ma","year":"2024","journal-title":"arXiv preprint arXiv:2402.08846"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389705"},{"key":"ref7","article-title":"Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models","author":"Chu","year":"2023","journal-title":"arXiv preprint arXiv:2311.07919"},{"key":"ref8","article-title":"Salmonn: Towards generic hearing abilities for large language models","author":"Tang","year":"2023","journal-title":"arXiv preprint arXiv:2310.13289"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10445874"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447605"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.459"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-acl.1190"},{"key":"ref13","article-title":"Hallucinations in neural automatic speech recognition: Identifying errors and hallucinatory models","author":"Frieske","year":"2024","journal-title":"arXiv preprint arXiv:2401.01572"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/0885-2308(90)90006-R"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2009.2032618"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2922617"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462656"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i11.26484"},{"key":"ref19","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv preprint arXiv:1707.06347"},{"key":"ref20","article-title":"Deepseekmath: Pushing the limits of mathematical reasoning in open language models","author":"Shao","year":"2024","journal-title":"arXiv preprint arXiv:2402.03300"},{"key":"ref21","article-title":"Deepseek-r1: Incentivizing reasoning capability in 11 ms via reinforcement learning","author":"Guo","year":"2025","journal-title":"arXiv preprint arXiv:2501.12948"},{"key":"ref22","article-title":"Dapo: An open-source llm reinforcement learning system at scale","author":"Yu","year":"2025","journal-title":"arXiv preprint arXiv:2503.14476"},{"key":"ref23","article-title":"Understanding r1-zero-like training: A critical perspective","author":"Liu","year":"2025","journal-title":"arXiv preprint arXiv:2503.20783"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.997"},{"key":"ref25","article-title":"Qwen2-audio technical report","volume-title":"arXiv preprint arXiv:2407.10759","author":"Chu","year":"2024"},{"key":"ref26","article-title":"Qwen2. 5-omni technical report","volume-title":"arXiv preprint arXiv:2503.20215","author":"Xu","year":"2025"},{"key":"ref27","article-title":"Omni-r1: Reinforcement learning for omnimodal reasoning via two-system collaboration","author":"Zhong","year":"2025","journal-title":"arXiv preprint arXiv:2505.20256"},{"key":"ref28","article-title":"R1-omni: Explainable omni-multimodal emotion recognition with reinforcement learning","author":"Zhao","year":"2025","journal-title":"arXiv preprint arXiv:2503.05379"},{"key":"ref29","article-title":"Omni-r1: Do you really need audio to fine-tune your audio 11 m ?","author":"Rouditchenko","year":"2025","journal-title":"arXiv preprint arXiv:2505.09439"},{"key":"ref30","article-title":"Sari: Structured audio reasoning via curriculum-guided reinforcement learning","author":"Wen","year":"2025","journal-title":"arXiv preprint arXiv:2504.15900"},{"key":"ref31","article-title":"Reinforcement learning outperforms supervised fine-tuning: A case study on audio question answering","author":"Li","year":"2025","journal-title":"arXiv preprint arXiv:2503.11197"},{"key":"ref32","article-title":"F5r-tts: Improving flow-matching based text-to-speech with group relative policy optimization","author":"Sun","year":"2025","journal-title":"arXiv preprint arXiv:2504.02407"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/656"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10023141"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2826"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.80"},{"key":"ref37","article-title":"The people\u2019s speech: A large-scale diverse english speech recognition dataset for commercial usage","author":"Galvez","year":"2021","journal-title":"arXiv preprint arXiv:2111.09344"},{"key":"ref38","article-title":"Common voice: A massively-multilingual speech corpus","author":"Ardila","year":"2019","journal-title":"arXiv preprint arXiv:1912.06670"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-2027"},{"key":"ref40","article-title":"The llama 3 herd of models","author":"Grattafiori","year":"2024","journal-title":"arXiv preprint arXiv:2407.21783"},{"key":"ref41","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2025-1242","article-title":"Durep: Dual-mode speech representation learning via asr-aware distillation","author":"Male","year":"2025"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.52202\/075280-2771"},{"key":"ref43","first-page":"30","article-title":"Spirit-lm: Interleaved spoken and written language model","volume":"13","author":"Nguyen","year":"2025","journal-title":"Transactions of the Association for Computational Linguistics"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447112"},{"key":"ref45","article-title":"RedPajama: an open dataset for training large language models","author":"Computer","year":"2023"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1007\/11677482_3"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-99579-3_21"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434657.pdf?arnumber=11434657","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:57:51Z","timestamp":1775192271000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434657\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":47,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434657","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}