{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T21:47:48Z","timestamp":1770846468747,"version":"3.50.1"},"reference-count":28,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,15]],"date-time":"2025-12-15T00:00:00Z","timestamp":1765756800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,15]],"date-time":"2025-12-15T00:00:00Z","timestamp":1765756800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001774","name":"University of Sydney","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001774","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,15]]},"DOI":"10.1109\/bibm66473.2025.11356041","type":"proceedings-article","created":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T21:19:40Z","timestamp":1769721580000},"page":"3921-3925","source":"Crossref","is-referenced-by-count":0,"title":["Multi-Scale Visual Prompting for Robust Visual Question Answering in Medical Imaging"],"prefix":"10.1109","author":[{"given":"Yang","family":"Ma","sequence":"first","affiliation":[{"name":"School of Computer Science, The University of Sydney,Sydney,Australia"}]},{"given":"Dongang","family":"Wang","sequence":"additional","affiliation":[{"name":"Brain and Mind Centre, The University of Sydney,Sydney,Australia"}]},{"given":"Peilin","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Mathematics, The University of Sydney,Sydney,Australia"}]},{"given":"Michael","family":"Barnett","sequence":"additional","affiliation":[{"name":"Brain and Mind Centre, The University of Sydney,Sydney,Australia"}]},{"given":"Dingxuan","family":"Zhou","sequence":"additional","affiliation":[{"name":"School of Mathematics, The University of Sydney,Sydney,Australia"}]},{"given":"Weidong","family":"Cai","sequence":"additional","affiliation":[{"name":"School of Computer Science, The University of Sydney,Sydney,Australia"}]},{"given":"Chenyu","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Computer Science, The University of Sydney,Sydney,Australia"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-16443-9_65"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548122"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-87240-3_7"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ISBI53787.2023.10230743"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/tetci.2023.3311333"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-43904-9_70"},{"key":"ref7","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv preprint"},{"key":"ref8","article-title":"Llama2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023","journal-title":"arXiv preprint"},{"key":"ref9","first-page":"27 730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang","year":"2022","journal-title":"Advances in neural information processing systems"},{"key":"ref10","first-page":"2305","article-title":"Biomedgpt: A unified and generalist biomedical generative pre-trained transformer for vision, language, and multimodal tasks","author":"Zhang","year":"2023","journal-title":"arXiv e-prints"},{"key":"ref11","first-page":"353","article-title":"Med-flamingo: a multimodal medical few-shot learner","volume-title":"Machine Learning for Health (MLAH)","author":"Moor"},{"key":"ref12","article-title":"Llava-med: Training a large language-and-vision assistant for biomedicine in one day","volume":"36","author":"Li","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2023.106555"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2022.3204551"},{"key":"ref15","article-title":"Biomedclip: a multimodal biomedical foundation model pretrained from fifteen million scientific image-text pairs","author":"Zhang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref16","article-title":"Medmoe: Mixture of domain-specific experts for lightweight medical visionlanguage models","author":"Jiang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-032-04971-1_62"},{"key":"ref18","article-title":"Msvit: Multi-scale vision transformer for medical visual question answering","volume-title":"Medical Image Computing and ComputerAssisted Intervention (MICCAI)","author":"Chen","year":"2022"},{"key":"ref19","article-title":"Hierarchical cross-modal fusion for multi-scale medical visual question answering","volume-title":"IEEE International Conference on Bioinformatics and Biomedicine (BIBM)","author":"Wang","year":"2023"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1038\/s41598-025-95361-8"},{"key":"ref21","first-page":"12888","article-title":"Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"International conference on machine learning","author":"Li"},{"issue":"2","key":"ref22","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proceedings of naacL-HLT","volume":"1","author":"Kenton"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1038\/sdata.2018.251"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ISBI48211.2021.9434010"},{"key":"ref25","article-title":"Pathvqa: 30000+ questions for medical visual question answering","author":"He","year":"2020","journal-title":"arXiv preprint"},{"issue":"3","key":"ref26","first-page":"6","volume":"2","author":"Chiang","year":"2023","journal-title":"Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref28","article-title":"Decoupled weight decay regularization","author":"Loshchilov","year":"2017","journal-title":"arXiv preprint"}],"event":{"name":"2025 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)","location":"Wuhan, China","start":{"date-parts":[[2025,12,15]]},"end":{"date-parts":[[2025,12,18]]}},"container-title":["2025 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11355913\/11355975\/11356041.pdf?arnumber=11356041","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T20:53:31Z","timestamp":1770843211000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11356041\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,15]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/bibm66473.2025.11356041","relation":{},"subject":[],"published":{"date-parts":[[2025,12,15]]}}}