{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T06:20:31Z","timestamp":1774419631032,"version":"3.50.1"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,4,6]]},"DOI":"10.1109\/icassp49660.2025.10888198","type":"proceedings-article","created":{"date-parts":[[2025,3,12]],"date-time":"2025-03-12T17:15:02Z","timestamp":1741799702000},"page":"1-5","source":"Crossref","is-referenced-by-count":8,"title":["AER-LLM: Ambiguity-aware Emotion Recognition Leveraging Large Language Models"],"prefix":"10.1109","author":[{"given":"Xin","family":"Hong","sequence":"first","affiliation":[{"name":"Systems University of Melbourne,School of Computing and Information,Melbourne,Australia"}]},{"given":"Yuan","family":"Gong","sequence":"additional","affiliation":[{"name":"Institute of Technology,CSAIL SLS Massachusetts,Boston,USA"}]},{"given":"Vidhyasaharan","family":"Sethu","sequence":"additional","affiliation":[{"name":"University of New South Wales,School of Electrical Engineering and Telecommunications,Sydney,Australia"}]},{"given":"Ting","family":"Dang","sequence":"additional","affiliation":[{"name":"Systems University of Melbourne,School of Computing and Information,Melbourne,Australia"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448316"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/S19-2006"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/S19-2045"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1155\/2022\/2645381"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448130"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-2282"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ACII59096.2023.10388119"},{"key":"ref8","article-title":"Instructerc: Reforming emotion recognition in conversation with a retrieval multi-task llms framework","author":"Lei","year":"2023"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.4135\/9781483349619.n4"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.11591\/eei.v10i5.3157"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/s10115-020-01449-0"},{"key":"ref12","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref13","article-title":"Gpt-4 technical report","author":"Achiam","year":"2023"},{"key":"ref14","article-title":"Ckerc: Joint large language models with commonsense knowledge for emotion recognition in conversation","author":"Fu","year":"2024"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3675094.3678494"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3145287"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.114"},{"key":"ref18","article-title":"Gemini: a family of highly capable multimodal models","author":"Anil","year":"2023"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2015.2457417"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2444"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.372"},{"key":"ref23","article-title":"Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context","author":"Reid","year":"2024"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2021.3086050"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10022800"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1037\/emo0000558"}],"event":{"name":"ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Hyderabad, India","start":{"date-parts":[[2025,4,6]]},"end":{"date-parts":[[2025,4,11]]}},"container-title":["ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10887540\/10887541\/10888198.pdf?arnumber=10888198","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T05:23:50Z","timestamp":1774416230000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10888198\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,6]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/icassp49660.2025.10888198","relation":{},"subject":[],"published":{"date-parts":[[2025,4,6]]}}}