{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,9]],"date-time":"2026-04-09T20:26:24Z","timestamp":1775766384631,"version":"3.50.1"},"publisher-location":"Cham","reference-count":35,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031439032","type":"print"},{"value":"9783031439049","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-43904-9_70","type":"book-chapter","created":{"date-parts":[[2023,9,30]],"date-time":"2023-09-30T23:08:57Z","timestamp":1696115337000},"page":"726-736","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":51,"title":["Open-Ended Medical Visual Question Answering Through Prefix Tuning of\u00a0Language Models"],"prefix":"10.1007","author":[{"given":"Tom","family":"van Sonsbeek","sequence":"first","affiliation":[]},{"given":"Mohammad Mahdi","family":"Derakhshani","sequence":"additional","affiliation":[]},{"given":"Ivona","family":"Najdenkoska","sequence":"additional","affiliation":[]},{"given":"Cees G. M.","family":"Snoek","sequence":"additional","affiliation":[]},{"given":"Marcel","family":"Worring","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,1]]},"reference":[{"key":"70_CR1","doi-asserted-by":"crossref","unstructured":"Barraco, M., Cornia, M., Cascianelli, S., Baraldi, L., Cucchiara, R.: The unreasonable effectiveness of CLIP features for image captioning: an experimental analysis. In: CVPR, pp. 4662\u20134670 (2022)","DOI":"10.1109\/CVPRW56347.2022.00512"},{"key":"70_CR2","unstructured":"Brown, T., et al.: Language models are few-shot learners. NeurIPS 33, 1877\u20131901 (2020)"},{"key":"70_CR3","doi-asserted-by":"crossref","unstructured":"Cong, F., Xu, S., Guo, L., Tian, Y.: Caption-aware medical VQA via semantic focusing and progressive cross-modality comprehension. In: ACM Multimedia, pp. 3569\u20133577 (2022)","DOI":"10.1145\/3503161.3548122"},{"key":"70_CR4","unstructured":"Derakhshani, M.M., et al.: Variational prompt tuning improves generalization of vision-language models. arXiv:2210.02390 (2022)"},{"key":"70_CR5","doi-asserted-by":"publisher","unstructured":"Do, T., Nguyen, B.X., Tjiputra, E., Tran, M., Tran, Q.D., Nguyen, A.: Multiple meta-model quantifying for medical visual question answering. In: de Bruijne, M., et al. (eds.) MICCAI 2021. LNCS, vol. 12905, pp. 64\u201374. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87240-3_7","DOI":"10.1007\/978-3-030-87240-3_7"},{"key":"70_CR6","unstructured":"Eslami, S., de Melo, G., Meinel, C.: Does CLIP benefit visual question answering in the medical domain as much as it does in the general domain? arXiv:2112.13906 (2021)"},{"key":"70_CR7","unstructured":"Finn, C., Abbeel, P., Levine, S.: Model-agnostic meta-learning for fast adaptation of deep networks. In: ICLR, pp. 1126\u20131135 (2017)"},{"key":"70_CR8","unstructured":"Gao, L., et al.: The pile: an 800\u00a0GB dataset of diverse text for language modeling. arXiv:2101.00027 (2020)"},{"key":"70_CR9","doi-asserted-by":"crossref","unstructured":"Gong, H., Chen, G., Liu, S., Yu, Y., Li, G.: Cross-modal self-attention with multi-task pre-training for medical visual question answering. In: ICMR, pp. 456\u2013460 (2021)","DOI":"10.1145\/3460426.3463584"},{"key":"70_CR10","doi-asserted-by":"crossref","unstructured":"Gong, H., Chen, G., Mao, M., Li, Z., Li, G.: Vqamix: conditional triplet mixup for medical visual question answering. IEEE Trans. Med. Imaging (2022)","DOI":"10.1109\/TMI.2022.3185008"},{"key":"70_CR11","doi-asserted-by":"crossref","unstructured":"He, X., Zhang, Y., Mou, L., Xing, E., Xie, P.: Pathvqa: 30000+ questions for medical visual question answering. arXiv:2003.10286 (2020)","DOI":"10.36227\/techrxiv.13127537"},{"key":"70_CR12","unstructured":"Hu, E.J., et al.: Lora: low-rank adaptation of large language models. arXiv:2106.09685 (2021)"},{"key":"70_CR13","doi-asserted-by":"crossref","unstructured":"Huang, Y., Wang, X., Liu, F., Huang, G.: OVQA: A clinically generated visual question answering dataset. In: ACM SIGIR, pp. 2924\u20132938 (2022)","DOI":"10.1145\/3477495.3531724"},{"key":"70_CR14","doi-asserted-by":"crossref","unstructured":"Khare, Y., Bagal, V., Mathew, M., Devi, A., Priyakumar, U.D., Jawahar, C.: MMBERT: multimodal BERT pretraining for improved medical VQA. In: ISBI, pp. 1033\u20131036. IEEE (2021)","DOI":"10.1109\/ISBI48211.2021.9434063"},{"key":"70_CR15","doi-asserted-by":"crossref","unstructured":"Lester, B., Al-Rfou, R., Constant, N.: The power of scale for parameter-efficient prompt tuning. In: EMNLP, pp. 3045\u20133059 (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"70_CR16","doi-asserted-by":"crossref","unstructured":"Li, X.L., Liang, P.: Prefix-tuning: optimizing continuous prompts for generation. In: ACL, pp. 4582\u20134597 (2021)","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"70_CR17","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: A bi-level representation learning model for medical visual question answering. J. Biomed. Inf. 134, 104183 (2022)","DOI":"10.1016\/j.jbi.2022.104183"},{"key":"70_CR18","unstructured":"Lin, Z., et al.: Medical visual question answering: a survey. arXiv:2111.10056 (2021)"},{"key":"70_CR19","doi-asserted-by":"publisher","unstructured":"Liu, B., Zhan, L.-M., Wu, X.-M.: Contrastive pre-training and\u00a0representation distillation for\u00a0medical visual question answering based on\u00a0radiology images. In: de Bruijne, M., et al. (eds.) MICCAI 2021. LNCS, vol. 12902, pp. 210\u2013220. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87196-3_20","DOI":"10.1007\/978-3-030-87196-3_20"},{"key":"70_CR20","doi-asserted-by":"crossref","unstructured":"Liu, B., Zhan, L.M., Xu, L., Ma, L., Yang, Y., Wu, X.M.: Slake: a semantically-labeled knowledge-enhanced dataset for medical visual question answering. In: ISBI, pp. 1650\u20131654. IEEE (2021)","DOI":"10.1109\/ISBI48211.2021.9434010"},{"key":"70_CR21","doi-asserted-by":"crossref","unstructured":"Luo, R., et al.: BioGPT: generative pre-trained transformer for biomedical text generation and mining. Briefings Bioinformat. 23(6) (2022)","DOI":"10.1093\/bib\/bbac409"},{"key":"70_CR22","unstructured":"Mokady, R., Hertz, A., Bermano, A.H.: Clipcap: clip prefix for image captioning. arXiv:2111.09734 (2021)"},{"key":"70_CR23","unstructured":"Najdenkoska, I., Zhen, X., Worring, M.: Meta learning to bridge vision and language models for multimodal few-shot learning. In: ICLR (2023)"},{"key":"70_CR24","doi-asserted-by":"publisher","unstructured":"Nguyen, B.D., Do, T.-T., Nguyen, B.X., Do, T., Tjiputra, E., Tran, Q.D.: Overcoming data limitation in medical visual question aswering. In: Shen, D., et al. (eds.) MICCAI 2019. LNCS, vol. 11767, pp. 522\u2013530. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-32251-9_57","DOI":"10.1007\/978-3-030-32251-9_57"},{"key":"70_CR25","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: ICML, pp. 8748\u20138763. PMLR (2021)"},{"key":"70_CR26","unstructured":"Radford, A., et al.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019)"},{"key":"70_CR27","doi-asserted-by":"publisher","first-page":"50626","DOI":"10.1109\/ACCESS.2020.2980024","volume":"8","author":"F Ren","year":"2020","unstructured":"Ren, F., Zhou, Y.: Cgmvqa: a new classification and generative model for medical visual question answering. IEEE Access 8, 50626\u201350636 (2020)","journal-title":"IEEE Access"},{"issue":"1","key":"70_CR28","doi-asserted-by":"publisher","first-page":"19826","DOI":"10.1038\/s41598-021-98390-1","volume":"11","author":"D Sharma","year":"2021","unstructured":"Sharma, D., Purushotham, S., Reddy, C.K.: MedFuseNet: an attention-based multimodal deep learning model for visual question answering in the medical domain. Sci. Rep. 11(1), 19826 (2021)","journal-title":"Sci. Rep."},{"key":"70_CR29","unstructured":"Taylor, N., Zhang, Y., Joyce, D., Nevado-Holgado, A., Kormilitzin, A.: Clinical prompt learning with frozen language models. arXiv:2205.05535 (2022)"},{"key":"70_CR30","first-page":"200","volume":"34","author":"M Tsimpoukelli","year":"2021","unstructured":"Tsimpoukelli, M., Menick, J.L., Cabi, S., Eslami, S., Vinyals, O., Hill, F.: Multimodal few-shot learning with frozen language models. NeurIPS 34, 200\u2013212 (2021)","journal-title":"NeurIPS"},{"key":"70_CR31","unstructured":"Venigalla, A., Frankle, J., Carbin, M.: BioMedLM: a domain-specific large language model for biomedicine. www.mosaicml.com\/blog\/introducing-pubmed-gpt (2022). Accessed 06 Mar 2022"},{"key":"70_CR32","doi-asserted-by":"crossref","unstructured":"Wang, J., Huang, S., Du, H., Qin, Y., Wang, H., Zhang, W.: MHKD-MVQA: multimodal hierarchical knowledge distillation for medical visual question answering. In: 2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM), pp. 567\u2013574. IEEE (2022)","DOI":"10.1109\/BIBM55620.2022.9995473"},{"key":"70_CR33","doi-asserted-by":"publisher","unstructured":"Wu, Q., Wang, P., Wang, X., He, X., Zhu, W.: Medical VQA. In: Visual Question Answering: From Theory to Application, pp. 165\u2013176. Springer, Singapore (2022). https:\/\/doi.org\/10.1007\/978-981-19-0964-1_11","DOI":"10.1007\/978-981-19-0964-1_11"},{"key":"70_CR34","doi-asserted-by":"crossref","unstructured":"Zhan, L.M., Liu, B., Fan, L., Chen, J., Wu, X.M.: Medical visual question answering via conditional reasoning. In: ACM Multimedia, pp. 2345\u20132354 (2020)","DOI":"10.1145\/3394171.3413761"},{"key":"70_CR35","unstructured":"Zhang, T., Kishore, V., Wu, F., Weinberger, K.Q., Artzi, Y.: Bertscore: evaluating text generation with bert. In: ICLR (2020)"}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-43904-9_70","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,11]],"date-time":"2024-03-11T14:42:56Z","timestamp":1710168176000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-43904-9_70"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031439032","9783031439049"],"references-count":35,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-43904-9_70","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"1 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vancouver, BC","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Canada","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conferences.miccai.org\/2023\/en\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2250","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"730","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"32% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}