{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T08:38:14Z","timestamp":1771922294448,"version":"3.50.1"},"reference-count":45,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iccvw69036.2025.00124","type":"proceedings-article","created":{"date-parts":[[2026,2,23]],"date-time":"2026-02-23T20:44:02Z","timestamp":1771879442000},"page":"1154-1163","source":"Crossref","is-referenced-by-count":0,"title":["A Dynamic Agent Framework for Large Language Model Reasoning for Medical and Visual Question Answering"],"prefix":"10.1109","author":[{"given":"Ziyan","family":"Xiao","sequence":"first","affiliation":[{"name":"School of Computing and Data Science, University of Hong Kong,Hong Kong"}]},{"given":"Ruiyang","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Computing and Data Science, University of Hong Kong,Hong Kong"}]},{"given":"Yushi","family":"Feng","sequence":"additional","affiliation":[{"name":"School of Computing and Data Science, University of Hong Kong,Hong Kong"}]},{"given":"Lingting","family":"Zhu","sequence":"additional","affiliation":[{"name":"School of Computing and Data Science, University of Hong Kong,Hong Kong"}]},{"given":"Liang","family":"Peng","sequence":"additional","affiliation":[{"name":"School of Computing and Data Science, University of Hong Kong,Hong Kong"}]},{"given":"Lequan","family":"Yu","sequence":"additional","affiliation":[{"name":"School of Computing and Data Science, University of Hong Kong,Hong Kong"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Huatuogpt-vision, towards injecting medical visual knowledge into multimodal llms at scale","author":"Chen","year":"2024","journal-title":"arXiv preprint"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.52202\/079017-1189"},{"key":"ref3","article-title":"Toward adaptive reasoning in large language models with thought rollback","author":"Chen","year":"2024","journal-title":"arXiv preprint"},{"key":"ref4","article-title":"Premise order matters in reasoning with large language models","author":"Chen","year":"2024","journal-title":"arXiv preprint"},{"key":"ref5","article-title":"Vision-language models can self-improve reasoning via reflection","author":"Cheng","year":"2024","journal-title":"arXiv preprint"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.3390\/app15062983"},{"key":"ref7","article-title":"Improving factuality and reasoning in language models through multiagent debate","volume-title":"Forty-first International Conference on Machine Learning","author":"Du","year":"2023"},{"key":"ref8","volume-title":"Gemini 2.5 flash: Best for fast performance on everyday tasks","year":"2025"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-acl.1319"},{"key":"ref10","article-title":"Measuring massive multitask language understanding","author":"Hendrycks","year":"2020","journal-title":"arXiv preprint"},{"key":"ref11","article-title":"Large language models cannot self-correct reasoning yet","author":"Huang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.3390\/app11146421"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1259"},{"key":"ref14","first-page":"22199","article-title":"Large language models are zero-shot reasoners","volume":"35","author":"Kojima","year":"2022","journal-title":"Advances in neural information processing systems"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1038\/s41597-023-02068-4"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1038\/sdata.2018.251"},{"key":"ref17","first-page":"28541","article-title":"Llava-med: Training a large language-and-vision assistant for biomedicine in one day","volume":"36","author":"Li","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ISBI48211.2021.9434010"},{"key":"ref19","first-page":"34892","article-title":"Visual instruction tuning","volume":"36","author":"Liu","year":"2023","journal-title":"Advances in neural information processing systems"},{"key":"ref20","article-title":"Dynamic llm-agent network: An llm-agent collaboration framework with agent team optimization","author":"Liu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref21","first-page":"43447","article-title":"Chameleon: Plug-and-play compositional reasoning with large language models","volume":"36","author":"Lu","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref22","article-title":"Routellm: Learning to route llms from preference data","volume-title":"The Thirteenth International Conference on Learning Representations","author":"Ong","year":"2024"},{"key":"ref23","volume-title":"Gpt-4o mini: advancing cost-efficient intelligence","year":"2024"},{"key":"ref24","first-page":"248","article-title":"Medmcqa: A large-scale multi-subject multi-choice dataset for medical domain question answering","volume-title":"Conference on health, inference, and learning","author":"Pal","year":"2022"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73414-4_22"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1410"},{"key":"ref27","article-title":"Medagents: Large language models as collaborators for zero-shot medical reasoning","author":"Tang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref28","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv preprint"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1016\/j.ipm.2025.104297"},{"key":"ref30","article-title":"Mixture-of-agents enhances large language model capabilities","author":"Wang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref31","article-title":"Qwen2-v1: Enhancing vision-language model\u2019s perception of the world at any resolution","volume":"abs\/2409.12191","author":"Wang","year":"2024","journal-title":"CoRR"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.1170"},{"key":"ref33","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume":"35","author":"Wei","year":"2022","journal-title":"Advances in neural information processing systems"},{"key":"ref34","article-title":"Mc-cot: A modular collaborative cot framework for zero-shot medical-vqa with llm and mllm integration","author":"Wei","year":"2024","journal-title":"arXiv preprint"},{"key":"ref35","article-title":"Medical graph rag: Towards safe medical large language model via graph retrieval-augmented generation","author":"Wu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref36","article-title":"Mmedagent-rl: Optimizing multi-agent collaboration for multimodal medical reasoning","author":"Xia","year":"2025","journal-title":"arXiv preprint"},{"key":"ref37","article-title":"Qwen2.5 technical report","volume":"abs\/2412.15115","author":"Yang","year":"2024","journal-title":"CoRR"},{"key":"ref38","article-title":"Agentnet: Decentralized evolutionary coordination for llm-based multiagent systems","author":"Yang","year":"2025","journal-title":"arXiv preprint"},{"key":"ref39","article-title":"Cut the crap: An economical communication pipeline for llm-based multi-agent systems","author":"Zhang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref40","article-title":"Multimodal chain-of-thought reasoning in language models","author":"Zhang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/3696410.3714782"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.emnlp-main.808"},{"key":"ref43","article-title":"Multi-agent design: Optimizing agents with better prompts and topologies","author":"Zhou","year":"2025","journal-title":"arXiv preprint"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-acl.1298"},{"key":"ref45","article-title":"Uni-med: a unified medical generalist foundation model for multi-task learning via connector-moe","author":"Zhu","year":"2024","journal-title":"arXiv preprint"}],"event":{"name":"2025 IEEE\/CVF International Conference on Computer Vision Workshops (ICCVW)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,10,19]]},"end":{"date-parts":[[2025,10,20]]}},"container-title":["2025 IEEE\/CVF International Conference on Computer Vision Workshops (ICCVW)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11373940\/11374285\/11374379.pdf?arnumber=11374379","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T07:33:01Z","timestamp":1771918381000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11374379\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":45,"URL":"https:\/\/doi.org\/10.1109\/iccvw69036.2025.00124","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}