{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,10]],"date-time":"2026-02-10T19:03:21Z","timestamp":1770750201281,"version":"3.50.0"},"publisher-location":"Cham","reference-count":28,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031820069","type":"print"},{"value":"9783031820076","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-82007-6_7","type":"book-chapter","created":{"date-parts":[[2025,2,7]],"date-time":"2025-02-07T05:46:24Z","timestamp":1738907184000},"page":"64-73","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Targeted Visual Prompting for\u00a0Medical Visual Question Answering"],"prefix":"10.1007","author":[{"given":"Sergio","family":"Tascon-Morales","sequence":"first","affiliation":[]},{"given":"Pablo","family":"M\u00e1rquez-Neila","sequence":"additional","affiliation":[]},{"given":"Raphael","family":"Sznitman","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,8]]},"reference":[{"key":"7_CR1","doi-asserted-by":"crossref","unstructured":"Anderson, P., et al.: Bottom-up and top-down attention for image captioning and visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6077\u20136086 (2018)","DOI":"10.1109\/CVPR.2018.00636"},{"key":"7_CR2","doi-asserted-by":"crossref","unstructured":"Antol, S., et al.: VQA: visual question answering. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2425\u20132433 (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"7_CR3","unstructured":"Chen, C., et al.: Position-enhanced visual instruction tuning for multimodal large language models. arXiv preprint arXiv:2308.13437 (2023)"},{"key":"7_CR4","doi-asserted-by":"crossref","unstructured":"Goyal, Y., Khot, T., Summers-Stay, D., Batra, D., Parikh, D.: Making the V in VQA matter: elevating the role of image understanding in visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6904\u20136913 (2017)","DOI":"10.1109\/CVPR.2017.670"},{"key":"7_CR5","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2020.113993","volume":"164","author":"D Gupta","year":"2021","unstructured":"Gupta, D., Suman, S., Ekbal, A.: Hierarchical deep multi-modal network for medical visual question answering. Expert Syst. Appl. 164, 113993 (2021)","journal-title":"Expert Syst. Appl."},{"key":"7_CR6","unstructured":"Hudson, D.A., Manning, C.D.: GQA: a new dataset for compositional question answering over real-world images. arXiv preprint arXiv:1902.09506 (2019). 3(8)"},{"key":"7_CR7","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"238","DOI":"10.1007\/978-3-030-28577-7_20","volume-title":"Experimental IR Meets Multilinguality, Multimodality, and Interaction","author":"F Liu","year":"2019","unstructured":"Liu, F., Peng, Y., Rosen, M.P.: An effective deep transfer learning and information fusion framework for medical visual question answering. In: Crestani, F., et al. (eds.) CLEF 2019. LNCS, vol. 11696, pp. 238\u2013247. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-28577-7_20"},{"key":"7_CR8","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. arXiv preprint arXiv:2304.08485 (2023)"},{"key":"7_CR9","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"7_CR10","unstructured":"Mani, A., Yoo, N., Hinthorn, W., Russakovsky, O.: Point and ask: incorporating pointing into visual question answering. arXiv preprint arXiv:2011.13681 (2020)"},{"key":"7_CR11","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"522","DOI":"10.1007\/978-3-030-32251-9_57","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2019","author":"BD Nguyen","year":"2019","unstructured":"Nguyen, B.D., Do, T.-T., Nguyen, B.X., Do, T., Tjiputra, E., Tran, Q.D.: Overcoming data limitation in medical visual question answering. In: Shen, D., et al. (eds.) MICCAI 2019. LNCS, vol. 11767, pp. 522\u2013530. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-32251-9_57"},{"key":"7_CR12","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"7_CR13","doi-asserted-by":"crossref","unstructured":"Ribeiro, M.T., Guestrin, C., Singh, S.: Are red roses red? Evaluating consistency of question-answering models. In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6174\u20136184 (2019)","DOI":"10.18653\/v1\/P19-1621"},{"key":"7_CR14","doi-asserted-by":"crossref","unstructured":"Seenivasan, L., Islam, M., Kannan, G., Ren, H.: SurgicalGPT: end-to-end language-vision GPT for visual question answering in surgery. arXiv preprint arXiv:2304.09974 (2023)","DOI":"10.1007\/978-3-031-43996-4_27"},{"key":"7_CR15","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., et al.: Squinting at VQA models: introspecting VQA models with sub-questions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10003\u201310011 (2020)","DOI":"10.1109\/CVPR42600.2020.01002"},{"key":"7_CR16","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"386","DOI":"10.1007\/978-3-031-16452-1_37","volume-title":"MICCAI 2022, Part VIII","author":"S Tascon-Morales","year":"2022","unstructured":"Tascon-Morales, S., M\u00e1rquez-Neila, P., Sznitman, R.: Consistency-preserving visual question answering in medical imaging. In: Wang, L., Dou, Q., Fletcher, P.T., Speidel, S., Li, S. (eds.) MICCAI 2022, Part VIII. LNCS, vol. 13438, pp. 386\u2013395. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-16452-1_37"},{"key":"7_CR17","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"361","DOI":"10.1007\/978-3-031-43895-0_34","volume-title":"MICCAI 2023","author":"S Tascon-Morales","year":"2023","unstructured":"Tascon-Morales, S., M\u00e1rquez-Neila, P., Sznitman, R.: Localized questions in medical visual question answering. In: Greenspan, H., et al. (eds.) MICCAI 2023. LNCS, vol. 14221, pp. 361\u2013370. Springer, Cham (2023). https:\/\/doi.org\/10.1007\/978-3-031-43895-0_34"},{"key":"7_CR18","doi-asserted-by":"crossref","unstructured":"Tong, S., Liu, Z., Zhai, Y., Ma, Y., LeCun, Y., Xie, S.: Eyes wide shut? Exploring the visual shortcomings of multimodal LLMs. arXiv preprint arXiv:2401.06209 (2024)","DOI":"10.1109\/CVPR52733.2024.00914"},{"key":"7_CR19","unstructured":"Touvron, H., et\u00a0al.: Llama 2: open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)"},{"key":"7_CR20","first-page":"200","volume":"34","author":"M Tsimpoukelli","year":"2021","unstructured":"Tsimpoukelli, M., Menick, J.L., Cabi, S., Eslami, S., Vinyals, O., Hill, F.: Multimodal few-shot learning with frozen language models. Adv. Neural. Inf. Process. Syst. 34, 200\u2013212 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"7_CR21","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"issue":"9","key":"7_CR22","doi-asserted-by":"publisher","first-page":"2856","DOI":"10.1109\/TMI.2020.2978284","volume":"39","author":"MH Vu","year":"2020","unstructured":"Vu, M.H., L\u00f6fstedt, T., Nyholm, T., Sznitman, R.: A question-centric model for visual question answering in medical imaging. IEEE Trans. Med. Imaging 39(9), 2856\u20132868 (2020)","journal-title":"IEEE Trans. Med. Imaging"},{"issue":"3","key":"7_CR23","doi-asserted-by":"publisher","DOI":"10.1016\/j.metrad.2023.100033","volume":"1","author":"Z Wang","year":"2023","unstructured":"Wang, Z., Liu, L., Wang, L., Zhou, L.: R2GenGPT: radiology report generation with frozen LLMs. Meta-Radiology 1(3), 100033 (2023)","journal-title":"Meta-Radiology"},{"key":"7_CR24","unstructured":"Yin, S., et al.: A survey on multimodal large language models. arXiv preprint arXiv:2306.13549 (2023)"},{"key":"7_CR25","doi-asserted-by":"crossref","unstructured":"Zhan, L.M., Liu, B., Fan, L., Chen, J., Wu, X.M.: Medical visual question answering via conditional reasoning. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 2345\u20132354 (2020)","DOI":"10.1145\/3394171.3413761"},{"key":"7_CR26","doi-asserted-by":"crossref","unstructured":"Zhang, D., et al.: MM-LLMs: recent advances in multimodal large language models. arXiv preprint arXiv:2401.13601 (2024)","DOI":"10.18653\/v1\/2024.findings-acl.738"},{"key":"7_CR27","unstructured":"Zhang, S., et al.: GPT4RoI: instruction tuning large language model on region-of-interest. arXiv preprint arXiv:2307.03601 (2023)"},{"key":"7_CR28","unstructured":"Zhang, X., et al.: PMC-VQA: visual instruction tuning for medical visual question answering. arXiv preprint arXiv:2305.10415 (2023)"}],"container-title":["Lecture Notes in Computer Science","Applications of Medical Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-82007-6_7","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,7]],"date-time":"2025-02-07T05:46:36Z","timestamp":1738907196000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-82007-6_7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031820069","9783031820076"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-82007-6_7","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"8 February 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"AMAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Workshop on Applications of Medical AI","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Marrakesh","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Morocco","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"6 October 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"6 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"3","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"amai2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/sites.google.com\/view\/amai2024\/home","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}