{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T18:01:21Z","timestamp":1770832881601,"version":"3.50.1"},"reference-count":41,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11228963","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":1,"title":["EmoAssist: Emotional Assistant for Visual Impairment Community"],"prefix":"10.1109","author":[{"given":"Xingyu","family":"Qi","sequence":"first","affiliation":[{"name":"Beijing University of Posts and Telecommunications"}]},{"given":"He","family":"Li","sequence":"additional","affiliation":[{"name":"Beijing University of Posts and Telecommunications"}]},{"given":"Linjie","family":"Li","sequence":"additional","affiliation":[{"name":"Beijing University of Posts and Telecommunications"}]},{"given":"Zhenyu","family":"Wu","sequence":"additional","affiliation":[{"name":"Beijing University of Posts and Telecommunications"}]}],"member":"263","reference":[{"key":"ref1","volume-title":"Blindness and vision impairment","year":"2024"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1136\/bjophthalmol-2017-311266"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2022.118720"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.3001500"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2019.00312"},{"key":"ref6","article-title":"A multi-world approach to question answering about real-world scenes based on uncertain input","volume":"27","author":"Malinowski","year":"2014","journal-title":"Advances in neural information processing systems"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref8","volume-title":"Be My Eyes - See the world together \u2014 bemyeyes.com"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3656650.3656677"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW60793.2023.00200"},{"key":"ref11","article-title":"Vialm: A survey and benchmark of visually impaired assistance with large models","author":"Zhao","year":"2024"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/FMSys62467.2024.00010"},{"key":"ref13","article-title":"Long-Form Answers to Visual Questions from Blind and Low Vision People","author":"Huh","year":"2024"},{"key":"ref14","first-page":"74","article-title":"ROUGE: A Package for Automatic Evaluation of Summaries","volume-title":"Text Summarization Branches Out","author":"Lin","year":"2004"},{"key":"ref15","first-page":"228","article-title":"METEOR: An Automatic Metric for MT Evaluation with High Levels of Correlation with Human Judgments","volume-title":"Proceedings of the Second Workshop on Statistical Machine Translation","author":"Lavie"},{"key":"ref16","article-title":"Bertscore: Evaluating text generation with bert","author":"Zhang","year":"2019"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i5.28212"},{"key":"ref18","article-title":"Gpt-4 technical report","author":"Achiam","year":"2023"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1076\/vimr.4.1.47.15634"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/j.ics.2005.04.017"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.279"},{"key":"ref22","article-title":"Gpt-4o system card","author":"Hurst","year":"2024"},{"key":"ref23","article-title":"LoRA: Low-Rank Adaptation of Large Language Models","volume":"abs\/2106.09685","author":"Edward Hu","year":"2021"},{"key":"ref24","article-title":"Direct preference optimization: Your language model is secretly a reward model","volume":"36","author":"Rafailov","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref25","article-title":"Visual instruction tuning","volume":"36","author":"Liu","year":"2024","journal-title":"Advances in neural information processing systems"},{"key":"ref26","article-title":"Are you talking to a machine? dataset and methods for multilingual image question","volume":"28","author":"Gao","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.215"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1145\/1866029.1866080"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1177\/18344909231213958"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.326"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.128"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref33","volume-title":"mousi \u2014 mousi.org"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.78"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.195"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1177\/0264619612465168"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.72"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.883"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/p19-1534"},{"key":"ref40","article-title":"Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling","author":"Chen","year":"2024"},{"key":"ref41","article-title":"Qwen2-vl: Enhancing vision-language model\u2019s perception of the world at any resolution","author":"Wang","year":"2024"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","location":"Rome, Italy","start":{"date-parts":[[2025,6,30]]},"end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11228963.pdf?arnumber=11228963","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:14:20Z","timestamp":1763190860000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11228963\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11228963","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}