{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T21:53:57Z","timestamp":1772574837626,"version":"3.50.1"},"reference-count":57,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Development Program of China","award":["2023YFC3604704"],"award-info":[{"award-number":["2023YFC3604704"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62176084"],"award-info":[{"award-number":["62176084"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62176083"],"award-info":[{"award-number":["62176083"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Affective Comput."],"published-print":{"date-parts":[[2026,1]]},"DOI":"10.1109\/taffc.2025.3612991","type":"journal-article","created":{"date-parts":[[2025,9,22]],"date-time":"2025-09-22T17:45:05Z","timestamp":1758563105000},"page":"262-276","source":"Crossref","is-referenced-by-count":0,"title":["EaNet: Enhanced Multimodal Awareness Alignment Network for Multimodal Aspect-Based Sentiment Analysis"],"prefix":"10.1109","volume":"17","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2452-6862","authenticated-orcid":false,"given":"Aoqiang","family":"Zhu","sequence":"first","affiliation":[{"name":"Anhui Province Key Laboratory of Affective Computing and Advanced Intelligent Machine, School of Computer and Information, Hefei University of Technology, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2122-0240","authenticated-orcid":false,"given":"Min","family":"Hu","sequence":"additional","affiliation":[{"name":"Anhui Province Key Laboratory of Affective Computing and Advanced Intelligent Machine, School of Computer and Information, Hefei University of Technology, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1751-2291","authenticated-orcid":false,"given":"Xiaohua","family":"Wang","sequence":"additional","affiliation":[{"name":"Anhui Province Key Laboratory of Affective Computing and Advanced Intelligent Machine, School of Computer and Information, Hefei University of Technology, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8076-0805","authenticated-orcid":false,"given":"Yan","family":"Xing","sequence":"additional","affiliation":[{"name":"School of Mathematics, Hefei University of Technology, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0917-2277","authenticated-orcid":false,"given":"Yiming","family":"Tang","sequence":"additional","affiliation":[{"name":"Anhui Province Key Laboratory of Affective Computing and Advanced Intelligent Machine, School of Computer and Information, Hefei University of Technology, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0233-590X","authenticated-orcid":false,"given":"Jiaoyun","family":"Yang","sequence":"additional","affiliation":[{"name":"National Smart Eldercare International Science and Technology Cooperation Base, School of Computer Science and Information Engineering, Hefei University of Technology, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3317-5299","authenticated-orcid":false,"given":"Ning","family":"An","sequence":"additional","affiliation":[{"name":"National Smart Eldercare International Science and Technology Cooperation Base, School of Computer Science and Information Engineering, Hefei University of Technology, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4860-9184","authenticated-orcid":false,"given":"Fuji","family":"Ren","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.188"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.360"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681163"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.1075"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2022.3230975"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-023-10555-8"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-emnlp.473"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i16.17687"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.3301371"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475692"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2024.102552"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.152"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.735"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2022.3171091"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-acl.761"},{"key":"ref16","article-title":"An image is worth 16 x 16 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Dosovitskiy","year":"2021"},{"key":"ref17","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","volume":"139","author":"Radford","year":"2021"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i17.29852"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.63317\/58q6gxohm4v4"},{"key":"ref20","first-page":"6678","article-title":"Joint multimodal aspect sentiment analysis with aspect enhancement and syntactic adaptive learning","volume-title":"Proc. 33rd Int. Joint Conf. Artif. Intell.","author":"Zhu","year":"2024"},{"key":"ref21","first-page":"32897","article-title":"Vlmo: Unified vision-language pre-training with mixture-of-modality-experts","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Bao","year":"2022"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.519"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2022.09.025"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-60450-9_12"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413650"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i15.17633"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2957872"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/751"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.275"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.306"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.109"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i2.32161"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2024.102304"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2025.111369"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/taffc.2025.3565506"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1525\/9780520940420-020"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i7.16796"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1609\/icwsm.v8i1.14550"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-27674-8_2"},{"key":"ref40","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Brown","year":"2020,"},{"key":"ref41","article-title":"DeepSentiBank: Visual sentiment concept classification with deep convolutional neural networks","author":"Chen","year":"2014"},{"key":"ref42","first-page":"313","article-title":"Multimodal aspect-based sentiment analysis under conditional relation","volume-title":"Proc. 31st Int. Conf. Comput. Linguistics","author":"Liu","year":"2025"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.561"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2024.112331"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/icme57554.2024.10687372"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.671"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i7.25971"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3611899"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.403"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.63317\/2f3apkqkqukh"},{"key":"ref51","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref52","doi-asserted-by":"crossref","first-page":"320","DOI":"10.18653\/v1\/2022.acl-long.26","article-title":"GLM: General language model pretraining with autoregressive blank infilling","volume-title":"Proc. 60th Annu. Meeting Assoc. Comput. Linguistics (Volume 1: Long Papers)","author":"Du","year":"2022"},{"key":"ref53","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"ref55","article-title":"Mmicl: Empowering vision-language model with multi-modal in-context learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhao","year":"2024"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.01239"},{"key":"ref57","article-title":"Gpt-4 technical report","author":"Achiam","year":"2024"}],"container-title":["IEEE Transactions on Affective Computing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/5165369\/11418735\/11175556.pdf?arnumber=11175556","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T20:53:09Z","timestamp":1772571189000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11175556\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,1]]},"references-count":57,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/taffc.2025.3612991","relation":{},"ISSN":["1949-3045","2371-9850"],"issn-type":[{"value":"1949-3045","type":"electronic"},{"value":"2371-9850","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,1]]}}}