{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,23]],"date-time":"2025-09-23T00:37:14Z","timestamp":1758587834511,"version":"3.44.0"},"publisher-location":"Cham","reference-count":28,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783032060037","type":"print"},{"value":"9783032060044","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,9,22]],"date-time":"2025-09-22T00:00:00Z","timestamp":1758499200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,22]],"date-time":"2025-09-22T00:00:00Z","timestamp":1758499200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-06004-4_23","type":"book-chapter","created":{"date-parts":[[2025,9,22]],"date-time":"2025-09-22T17:21:42Z","timestamp":1758561702000},"page":"229-238","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["RadFig-VQA: A Multi-imaging-Modality Radiology Benchmark for\u00a0Evaluating Vision-Language Models in\u00a0Clinical Practice"],"prefix":"10.1007","author":[{"given":"Yosuke","family":"Yamagishi","sequence":"first","affiliation":[]},{"given":"Shouhei","family":"Hanaoka","sequence":"additional","affiliation":[]},{"given":"Yuta","family":"Nakamura","sequence":"additional","affiliation":[]},{"given":"Tomohiro","family":"Kikuchi","sequence":"additional","affiliation":[]},{"given":"Akinobu","family":"Shimizu","sequence":"additional","affiliation":[]},{"given":"Takeharu","family":"Yoshikawa","sequence":"additional","affiliation":[]},{"given":"Osamu","family":"Abe","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,22]]},"reference":[{"key":"23_CR1","unstructured":"Bai, S., et\u00a0al.: Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923 (2025)"},{"issue":"1","key":"23_CR2","doi-asserted-by":"publisher","first-page":"141","DOI":"10.1038\/s43856-023-00370-1","volume":"3","author":"J Clusmann","year":"2023","unstructured":"Clusmann, J., et al.: The future landscape of large language models in medicine. Commun. Med. 3(1), 141 (2023)","journal-title":"Commun. Med."},{"key":"23_CR3","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: a large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"23_CR4","unstructured":"Google: Medgemma hugging face"},{"key":"23_CR5","unstructured":"Grattafiori, A., et\u00a0al.: The llama 3 herd of models. arXiv preprint arXiv:2407.21783 (2024)"},{"key":"23_CR6","unstructured":"Hendrycks, D., et al.: Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300 (2020)"},{"key":"23_CR7","doi-asserted-by":"crossref","unstructured":"Hu, Y., et al.: Omnimedvqa: a new large-scale comprehensive evaluation benchmark for medical lvlm. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22170\u201322183 (2024)","DOI":"10.1109\/CVPR52733.2024.02093"},{"key":"23_CR8","doi-asserted-by":"crossref","unstructured":"Kikuchi, T., Nakao, T., Nakamura, Y., Hanaoka, S., Mori, H., Yoshikawa, T.: Toward improved radiologic diagnostics: investigating the utility and limitations of gpt-3.5 turbo and gpt-4 with quiz cases. Am. J. Neuroradiol. 45(10), 1506\u20131511 (2024)","DOI":"10.3174\/ajnr.A8332"},{"issue":"1","key":"23_CR9","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1038\/sdata.2018.251","volume":"5","author":"JJ Lau","year":"2018","unstructured":"Lau, J.J., Gayen, S., Ben Abacha, A., Demner-Fushman, D.: A dataset of clinically generated visual questions and answers about radiology images. Sci. Data 5(1), 1\u201310 (2018)","journal-title":"Sci. Data"},{"key":"23_CR10","first-page":"28541","volume":"36","author":"C Li","year":"2023","unstructured":"Li, C., et al.: Llava-med: training a large language-and-vision assistant for biomedicine in one day. Adv. Neural. Inf. Process. Syst. 36, 28541\u201328564 (2023)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"23_CR11","doi-asserted-by":"crossref","unstructured":"Li, D., Gupta, K., Bhaduri, M., Sathiadoss, P., Bhatnagar, S., Chong, J.: Comparing gpt-3.5 and gpt-4 accuracy and drift in radiology diagnosis please cases. Radiology 310(1), e232411 (2024)","DOI":"10.1148\/radiol.232411"},{"key":"23_CR12","doi-asserted-by":"crossref","unstructured":"Liu, B., Zhan, L.M., Xu, L., Ma, L., Yang, Y., Wu, X.M.: Slake: a semantically-labeled knowledge-enhanced dataset for medical visual question answering. In: 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI), pp. 1650\u20131654. IEEE (2021)","DOI":"10.1109\/ISBI48211.2021.9434010"},{"key":"23_CR13","unstructured":"Pedregosa, F., et\u00a0al.: Scikit-learn: machine learning in python. J. Mach. Learn. Res. 12, 2825\u20132830 (2011)"},{"issue":"3","key":"23_CR14","doi-asserted-by":"publisher","first-page":"289","DOI":"10.1007\/s00799-022-00329-y","volume":"23","author":"T Saikh","year":"2022","unstructured":"Saikh, T., Ghosal, T., Mittal, A., Ekbal, A., Bhattacharyya, P.: Scienceqa: a novel resource for question answering on scholarly articles. Int. J. Digit. Libr. 23(3), 289\u2013301 (2022)","journal-title":"Int. J. Digit. Libr."},{"key":"23_CR15","doi-asserted-by":"crossref","unstructured":"Sonoda, Y., et al.: Diagnostic performances of gpt-4o, claude 3 opus, and gemini 1.5 pro in \u201cdiagnosis please\u201d cases. Jpn. J. Radiol. 42(11), 1231\u20131235 (2024)","DOI":"10.1007\/s11604-024-01619-y"},{"issue":"1","key":"23_CR16","doi-asserted-by":"publisher","DOI":"10.1148\/radiol.240273","volume":"312","author":"PS Suh","year":"2024","unstructured":"Suh, P.S., et al.: Comparing diagnostic accuracy of radiologists versus gpt-4v and gemini pro vision using image inputs from diagnosis please cases. Radiology 312(1), e240273 (2024)","journal-title":"Radiology"},{"key":"23_CR17","doi-asserted-by":"crossref","unstructured":"Suthar, P.P., Kounsal, A., Chhetri, L., Saini, D., Dua, S.G.: Artificial intelligence (ai) in radiology: a deep dive into chatgpt 4.0\u2019s accuracy with the american journal of neuroradiology\u2019s (ajnr)\" case of the month\". Cureus 15(8) (2023)","DOI":"10.7759\/cureus.43958"},{"key":"23_CR18","unstructured":"Tan, M., Le, Q.: Efficientnet: rethinking model scaling for convolutional neural networks. In: International Conference on Machine Learning, pp. 6105\u20136114. PMLR (2019)"},{"key":"23_CR19","unstructured":"Tan, M., Le, Q.: Efficientnetv2: smaller models and faster training. In: International Conference on Machine Learning, pp. 10096\u201310106. PMLR (2021)"},{"key":"23_CR20","unstructured":"Team, G., et\u00a0al.: Gemma 3 technical report. arXiv preprint arXiv:2503.19786 (2025)"},{"issue":"8","key":"23_CR21","doi-asserted-by":"publisher","first-page":"1930","DOI":"10.1038\/s41591-023-02448-8","volume":"29","author":"AJ Thirunavukarasu","year":"2023","unstructured":"Thirunavukarasu, A.J., Ting, D.S.J., Elangovan, K., Gutierrez, L., Tan, T.F., Ting, D.S.W.: Large language models in medicine. Nat. Med. 29(8), 1930\u20131940 (2023)","journal-title":"Nat. Med."},{"key":"23_CR22","doi-asserted-by":"crossref","unstructured":"Tu, T., et\u00a0al.: Towards generalist biomedical ai. Nejm Ai 1(3), AIoa2300138 (2024)","DOI":"10.1056\/AIoa2300138"},{"issue":"1","key":"23_CR23","doi-asserted-by":"publisher","DOI":"10.1148\/radiol.231040","volume":"308","author":"D Ueda","year":"2023","unstructured":"Ueda, D., et al.: Diagnostic performance of chatgpt from patient history and imaging findings on the diagnosis please quizzes. Radiology 308(1), e231040 (2023)","journal-title":"Radiology"},{"key":"23_CR24","unstructured":"Wang, Y., et\u00a0al.: Mmlu-pro: a more robust and challenging multi-task language understanding benchmark. In: The Thirty-Eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track (2024)"},{"key":"23_CR25","doi-asserted-by":"crossref","unstructured":"Yue, X., et\u00a0al.: Mmmu: a massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9556\u20139567 (2024)","DOI":"10.1109\/CVPR52733.2024.00913"},{"key":"23_CR26","unstructured":"Yue, X., et\u00a0al.: Mmmu-pro: a more robust multi-discipline multimodal understanding benchmark. arXiv preprint arXiv:2409.02813 (2024)"},{"issue":"1","key":"23_CR27","doi-asserted-by":"publisher","first-page":"277","DOI":"10.1038\/s43856-024-00709-2","volume":"4","author":"X Zhang","year":"2024","unstructured":"Zhang, X., et al.: Development of a large-scale medical visual question-answering dataset. Commun. Med. 4(1), 277 (2024)","journal-title":"Commun. Med."},{"key":"23_CR28","unstructured":"Zhu, J., et\u00a0al.: Internvl3: exploring advanced training and test-time recipes for open-source multimodal models. arXiv preprint arXiv:2504.10479 (2025)"}],"container-title":["Lecture Notes in Computer Science","AI for Clinical Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-06004-4_23","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,22]],"date-time":"2025-09-22T17:21:51Z","timestamp":1758561711000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-06004-4_23"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,22]]},"ISBN":["9783032060037","9783032060044"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-06004-4_23","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9,22]]},"assertion":[{"value":"22 September 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"CMLLMs","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Workshop on Multimodal Large Language Models in Clinical Practice","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Daejeon","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Korea (Republic of)","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 September 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 September 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"cmllms2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/clinicalmllms.github.io\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}