{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,25]],"date-time":"2025-03-25T22:06:57Z","timestamp":1742940417391,"version":"3.40.3"},"publisher-location":"Cham","reference-count":27,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031456725"},{"type":"electronic","value":"9783031456732"}],"license":[{"start":{"date-parts":[[2023,10,15]],"date-time":"2023-10-15T00:00:00Z","timestamp":1697328000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,10,15]],"date-time":"2023-10-15T00:00:00Z","timestamp":1697328000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-45673-2_39","type":"book-chapter","created":{"date-parts":[[2023,10,14]],"date-time":"2023-10-14T08:02:16Z","timestamp":1697270536000},"page":"393-402","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Multi-modal Adapter for\u00a0Medical Vision-and-Language Learning"],"prefix":"10.1007","author":[{"given":"Zheng","family":"Yu","sequence":"first","affiliation":[]},{"given":"Yanyuan","family":"Qiao","sequence":"additional","affiliation":[]},{"given":"Yutong","family":"Xie","sequence":"additional","affiliation":[]},{"given":"Qi","family":"Wu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,15]]},"reference":[{"key":"39_CR1","unstructured":"Abacha, A.B., Gayen, S., Lau, J., Rajaraman, S., Demner-Fushman, D.: Nlm at imageclef 2018 visual question answering in the medical domain. In: CLEF (Working Notes) (2018)"},{"key":"39_CR2","unstructured":"Abacha, A.B., Hasan, S.A., Datla, V., Liu, J., Demner-Fushman, D., M\u00fcller, H.: Vqa-med: overview of the medical visual question answering task at imageclef 2019. In: CLEF (2019)"},{"key":"39_CR3","doi-asserted-by":"publisher","unstructured":"Chen, Z., et al.: Multi-modal masked autoencoders for medical vision-and-language pre-training. In: MICCAI, vol. 13435, pp. 679\u2013689 (2022). https:\/\/doi.org\/10.1007\/978-3-031-16443-9_65","DOI":"10.1007\/978-3-031-16443-9_65"},{"key":"39_CR4","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: pre-training of deep bidirectional transformers for language understanding. ArXiv abs\/ arXiv: 1810.04805 (2019)"},{"key":"39_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"64","DOI":"10.1007\/978-3-030-87240-3_7","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2021","author":"T Do","year":"2021","unstructured":"Do, T., Nguyen, B.X., Tjiputra, E., Tran, M., Tran, Q.D., Nguyen, A.: Multiple meta-model quantifying for medical visual question answering. In: de Bruijne, M., Cattin, P.C., Cotin, S., Padoy, N., Speidel, S., Zheng, Y., Essert, C. (eds.) MICCAI 2021. LNCS, vol. 12905, pp. 64\u201374. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87240-3_7"},{"key":"39_CR6","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. ArXiv abs\/ arXiv: 2010.11929 (2020)"},{"key":"39_CR7","unstructured":"Eslami, S., de Melo, G., Meinel, C.: Does CLIP benefit visual question answering in the medical domain as much as it does in the general domain? CoRR abs\/ arXiv: 2112.13906 (2021)"},{"key":"39_CR8","doi-asserted-by":"crossref","unstructured":"Gong, H., Chen, G., Liu, S., Yu, Y., Li, G.: Cross-modal self-attention with multi-task pre-training for medical visual question answering. In: ICMR, pp. 456\u2013460. ACM (2021)","DOI":"10.1145\/3460426.3463584"},{"key":"39_CR9","unstructured":"He, R., et al.: On the effectiveness of adapter-based tuning for pretrained language model adaptation. In: Zong, C., Xia, F., Li, W., Navigli, R. (eds.) ACL\/IJCNLP, pp. 2208\u20132222 (2021)"},{"key":"39_CR10","unstructured":"Houlsby, N., et al.: Parameter-efficient transfer learning for nlp. In: ICML, pp. 2790\u20132799 (2019)"},{"key":"39_CR11","unstructured":"Hu, E., et al.: Lora: low-rank adaptation of large language models. In: ICLR (2022)"},{"key":"39_CR12","doi-asserted-by":"publisher","first-page":"339","DOI":"10.1162\/tacl_a_00065","volume":"5","author":"M Johnson","year":"2017","unstructured":"Johnson, M., et al.: Google\u2019s multilingual neural machine translation system: Enabling zero-shot translation. Trans. Assoc. Comput. Linguistics 5, 339\u2013351 (2017)","journal-title":"Trans. Assoc. Comput. Linguistics"},{"key":"39_CR13","doi-asserted-by":"crossref","unstructured":"Khare, Y., Bagal, V., Mathew, M., Devi, A., Priyakumar, U.D., Jawahar, C.V.: MMBERT: multimodal BERT pretraining for improved medical VQA. In: ISBI, pp. 1033\u20131036. IEEE (2021)","DOI":"10.1109\/ISBI48211.2021.9434063"},{"key":"39_CR14","doi-asserted-by":"crossref","unstructured":"Lau, J., Gayen, S., Abacha, A.B., Demner-Fushman, D.: A dataset of clinically generated visual questions and answers about radiology images. Sci. Data 5 (2018)","DOI":"10.1038\/sdata.2018.251"},{"key":"39_CR15","doi-asserted-by":"crossref","unstructured":"Li, Y., Wang, H., Luo, Y.: A comparison of pre-trained vision-and-language models for multimodal representation learning across medical images and reports. In: Park, T., et al (eds.) IEEE International Conference on Bioinformatics and Biomedicine, BIBM 2020, Virtual Event, South Korea, 16\u201319 December 2020, pp. 1999\u20132004 (2020)","DOI":"10.1109\/BIBM49941.2020.9313289"},{"key":"39_CR16","doi-asserted-by":"crossref","unstructured":"Liu, B., Zhan, L.M., Xu, L., Ma, L., Yang, Y., Wu, X.M.: Slake: a semantically-labeled knowledge-enhanced dataset for medical visual question answering. 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI), pp. 1650\u20131654 (2021)","DOI":"10.1109\/ISBI48211.2021.9434010"},{"key":"39_CR17","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"210","DOI":"10.1007\/978-3-030-87196-3_20","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2021","author":"B Liu","year":"2021","unstructured":"Liu, B., Zhan, L.-M., Wu, X.-M.: Contrastive pre-training and\u00a0representation distillation for\u00a0medical visual question answering based on\u00a0radiology images. In: de Bruijne, M., Cattin, P.C., Cotin, S., Padoy, N., Speidel, S., Zheng, Y., Essert, C. (eds.) MICCAI 2021. LNCS, vol. 12902, pp. 210\u2013220. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87196-3_20"},{"key":"39_CR18","unstructured":"Mahabadi, R.K., Henderson, J., Ruder, S.: Compacter: efficient low-rank hypercomplex adapter layers. In: NeurIPS, pp. 1022\u20131035 (2021)"},{"key":"39_CR19","doi-asserted-by":"crossref","unstructured":"Moon, J.H., Lee, H., Shin, W., Choi, E.: Multi-modal understanding and generation for medical images and text via vision-language pre-training. CoRR (2021)","DOI":"10.1109\/JBHI.2022.3207502"},{"issue":"12","key":"39_CR20","doi-asserted-by":"publisher","first-page":"6070","DOI":"10.1109\/JBHI.2022.3207502","volume":"26","author":"JH Moon","year":"2022","unstructured":"Moon, J.H., Lee, H., Shin, W., Kim, Y., Choi, E.: Multi-modal understanding and generation for medical images and text via vision-language pre-training. IEEE J. Biomed. Health Informatics 26(12), 6070\u20136080 (2022)","journal-title":"IEEE J. Biomed. Health Informatics"},{"key":"39_CR21","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"522","DOI":"10.1007\/978-3-030-32251-9_57","volume-title":"Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2019","author":"BD Nguyen","year":"2019","unstructured":"Nguyen, B.D., Do, T.-T., Nguyen, B.X., Do, T., Tjiputra, E., Tran, Q.D.: Overcoming data limitation in medical visual question answering. In: Shen, D., et al. (eds.) MICCAI 2019. LNCS, vol. 11767, pp. 522\u2013530. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-32251-9_57"},{"key":"39_CR22","doi-asserted-by":"crossref","unstructured":"Pfeiffer, J., et al.: Adapterhub: a framework for adapting transformers. In: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, EMNLP 2020 - Demos, Online, 16\u201320 November 2020, pp. 46\u201354. Association for Computational Linguistics (2020)","DOI":"10.18653\/v1\/2020.emnlp-demos.7"},{"key":"39_CR23","doi-asserted-by":"publisher","first-page":"50626","DOI":"10.1109\/ACCESS.2020.2980024","volume":"8","author":"F Ren","year":"2020","unstructured":"Ren, F., Zhou, Y.: CGMVQA: a new classification and generative model for medical visual question answering. IEEE Access 8, 50626\u201350636 (2020)","journal-title":"IEEE Access"},{"key":"39_CR24","doi-asserted-by":"crossref","unstructured":"Subramanian, S., et al.: Medicat: a dataset of medical images, captions, and textual references. In: Cohn, T., He, Y., Liu, Y. (eds.) Findings of the Association for Computational Linguistics: EMNLP 2020, Online Event, 16\u201320 November 2020, pp. 2112\u20132120. Findings of ACL, Association for Computational Linguistics (2020)","DOI":"10.18653\/v1\/2020.findings-emnlp.191"},{"key":"39_CR25","doi-asserted-by":"crossref","unstructured":"Wu, T., Singh, S., Paul, S., Burns, G.A., Peng, N.: MELINDA: a multimodal dataset for biomedical experiment method classification. In: AAAI, pp. 14076\u201314084 (2021)","DOI":"10.1609\/aaai.v35i16.17657"},{"key":"39_CR26","unstructured":"Yan, X., Li, L., Xie, C., Xiao, J., Gu, L.: Zhejiang university at imageclef 2019 visual question answering in the medical domain. In: CLEF (2019)"},{"key":"39_CR27","doi-asserted-by":"crossref","unstructured":"Zhan, L., Liu, B., Fan, L., Chen, J., Wu, X.: Medical visual question answering via conditional reasoning. In: ACM MM, pp. 2345\u20132354. ACM (2020)","DOI":"10.1145\/3394171.3413761"}],"container-title":["Lecture Notes in Computer Science","Machine Learning in Medical Imaging"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-45673-2_39","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,13]],"date-time":"2024-03-13T16:58:53Z","timestamp":1710349133000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-45673-2_39"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,15]]},"ISBN":["9783031456725","9783031456732"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-45673-2_39","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023,10,15]]},"assertion":[{"value":"15 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MLMI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Workshop on Machine Learning in Medical Imaging","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vancouver, BC","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Canada","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mlmi-med2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/sites.google.com\/view\/mlmi2023?pli=1","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Microsoft CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"139","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"93","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"67% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}