{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T21:35:09Z","timestamp":1770845709348,"version":"3.50.1"},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,15]],"date-time":"2025-12-15T00:00:00Z","timestamp":1765756800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,15]],"date-time":"2025-12-15T00:00:00Z","timestamp":1765756800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,15]]},"DOI":"10.1109\/bibm66473.2025.11356577","type":"proceedings-article","created":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T21:19:40Z","timestamp":1769721580000},"page":"2044-2050","source":"Crossref","is-referenced-by-count":0,"title":["Reflect Then Reason: Iterative Reflection with Soft Reasoning Feature Enhancement for Medical Visual Question Answering"],"prefix":"10.1109","author":[{"given":"Haoyang","family":"Chen","sequence":"first","affiliation":[{"name":"School of Computer Science, Nanjing University,Nanjing,China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"A Survey of Medical Vision-and-Language Applications and Their Techniques","author":"Chen","year":"2024","journal-title":"arXiv preprint"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/BIBM62325.2024.10822837"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1093\/bioinformatics\/btae238"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.00859"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-032-04978-0_44"},{"key":"ref6","article-title":"GEMeX-ThinkVG: Towards Thinking with Visual Grounding in Medical VQA via Reinforcement Learning","author":"Liu","year":"2025","journal-title":"arXiv preprint"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.962"},{"key":"ref8","article-title":"InternVL3: Exploring Advanced Training and Test-Time Recipes for Open-Source Multimodal Models","author":"Zhu","year":"2025","journal-title":"arXiv preprint"},{"key":"ref9","first-page":"352","article-title":"Med-Flamingo: a Multimodal Medical Few-shot Learner","volume-title":"Proceedings of Machine Learning Research","author":"Moor"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.3389\/frai.2024.1430984"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.naacl-long.635"},{"key":"ref12","article-title":"MMed-RAG: Versatile Multimodal RAG System for Medical Vision Language Models","volume-title":"Proceedings of the International Conference on Learning Representations","author":"Xia"},{"key":"ref13","article-title":"Qilin-Med-VL: Towards Chinese Large Vision-Language Model for General Healthcare","author":"Liu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref14","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proceedings of the International Conference on Learning Representations","author":"Hu"},{"key":"ref15","article-title":"ParameterEfficient Fine-Tuning for Medical Image Analysis: The Missed Opportunity","volume-title":"Proceedings of the International Conference on Medical Imaging with Deep Learning","author":"Dutt"},{"key":"ref16","article-title":"Can Common VLMs Rival Medical VLMs? Evaluation and Strategic Insights","author":"Zhong","year":"2025","journal-title":"Xiv preprint"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.44"},{"key":"ref18","article-title":"Parameter Efficient Fine Tuning: A Comprehensive Analysis Across Applications","author":"Balne","year":"2024","journal-title":"Xiv preprint"},{"key":"ref19","article-title":"LLaVA-Med: Training a Large Language-and-Vision Assistant for Biomedicine in One Day","volume-title":"Proceedings of the International Conference on Neural Information Processing Systems","author":"Li"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1093\/jamia\/ocv080"},{"key":"ref21","article-title":"VQAMed: Overview of the Medical Visual Question Answering Task at ImageCLEF 2019","volume-title":"Proceedings of the Conference and Labs of the Evaluation Forum","author":"Abacha"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/s41019-025-00297-8"},{"key":"ref23","article-title":"GMAI-MMBench: A Comprehensive Multimodal Evaluation Benchmark Towards General Medical AI","volume-title":"Proceedings of the Conference on Neural Information Processing Systems","author":"Chen"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-naacl.402"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3689096.3689458"},{"key":"ref26","article-title":"PMC-VQA: Visual Instruction Tuning for Medical Visual Question Answering","author":"Zhang","year":"2024","journal-title":"Communications Medicine"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02093"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ISBI48211.2021.9434010"},{"key":"ref29","article-title":"PathVQA: 30000+ Questions for Medical Visual Question Answering","author":"He","year":"2020","journal-title":"arXiv preprint"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1038\/sdata.2018.251"},{"key":"ref31","article-title":"More Thinking, Less Seeing? Assessing Amplified Hallucination in Multimodal Reasoning Models","author":"Liu","year":"2025","journal-title":"arXiv preprint"},{"key":"ref32","article-title":"Do LLMs Know about Hallucination? An Empirical Investigation of LLM\u2019s Hidden States","author":"Duan","year":"2024","journal-title":"arXiv preprint"},{"key":"ref33","article-title":"Layer by Layer: Uncovering Hidden Representations in Language Models","volume-title":"Proceedings of the International Conference on Machine Learning","author":"Skean"},{"key":"ref34","first-page":"13050","article-title":"Probing Multimodal Large Language Models for Global and Local Semantic Representations","volume-title":"Proceedings of the International Conference on Language Resources and Evaluation","author":"Tao"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.138"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.1137"},{"key":"ref37","article-title":"Perceiver IO: A General Architecture for Structured Inputs & Outputs","volume-title":"Proceedings of the International Conference on Machine Learning","author":"Jaegle"},{"key":"ref38","article-title":"Qwen2.5-VL Technical Report","author":"Bai","year":"2025","journal-title":"arXiv preprint"},{"key":"ref39","article-title":"Gemma 3 Technical Report","author":"Kamath","year":"2025","journal-title":"arXiv preprint"}],"event":{"name":"2025 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)","location":"Wuhan, China","start":{"date-parts":[[2025,12,15]]},"end":{"date-parts":[[2025,12,18]]}},"container-title":["2025 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11355913\/11355975\/11356577.pdf?arnumber=11356577","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T20:51:28Z","timestamp":1770843088000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11356577\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,15]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/bibm66473.2025.11356577","relation":{},"subject":[],"published":{"date-parts":[[2025,12,15]]}}}