{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T02:40:33Z","timestamp":1775011233920,"version":"3.50.1"},"reference-count":29,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"Natural Science Foundation of China","doi-asserted-by":"crossref","award":["12505412"],"award-info":[{"award-number":["12505412"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Natural Science Basic Research Program of Shaanxi","award":["2025JC-YBQN-1093"],"award-info":[{"award-number":["2025JC-YBQN-1093"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Med Syst"],"DOI":"10.1007\/s10916-026-02372-7","type":"journal-article","created":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T01:31:21Z","timestamp":1775007081000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Performance of Large Language Models on Exam-style Questions and Case Challenges Across Varying Levels of Complexity"],"prefix":"10.1007","volume":"50","author":[{"given":"Lei","family":"Xu","sequence":"first","affiliation":[]},{"given":"Wenzhe","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Yuxin","family":"Qin","sequence":"additional","affiliation":[]},{"given":"Jingyi","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,4,1]]},"reference":[{"issue":"3","key":"2372_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3641289","volume":"15","author":"Y Chang","year":"2024","unstructured":"Chang Y, Wang X, Wang J, Wu Y, Yang L, Zhu K, et al. A survey on evaluation of large language models. ACM transactions on intelligent systems and technology. 2024;15(3):1\u201345.","journal-title":"ACM transactions on intelligent systems and technology"},{"issue":"2","key":"2372_CR2","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3605943","volume":"56","author":"B Min","year":"2023","unstructured":"Min B, Ross H, Sulem E, Veyseh APB, Nguyen TH, Sainz O, et al. Recent advances in natural language processing via large pre-trained language models: A survey. ACM Computing Surveys. 2023;56(2):1\u201340.","journal-title":"ACM Computing Surveys"},{"key":"2372_CR3","unstructured":"Achiam J, Adler S, Agarwal S, Ahmad L, Akkaya I, Aleman FL, et al. Gpt-4 technical report. arXiv preprint arXiv:230308774. 2023."},{"key":"2372_CR4","unstructured":"Comanici G, Bieber E, Schaekermann M, Pasupat I, Sachdeva N, Dhillon I, et al. Gemini 2.5: Pushing the frontier with advanced reasoning, multimodality, long context, and next generation agentic capabilities. arXiv preprint arXiv:250706261. 2025."},{"issue":"8081","key":"2372_CR5","doi-asserted-by":"publisher","first-page":"633","DOI":"10.1038\/s41586-025-09422-z","volume":"645","author":"D Guo","year":"2025","unstructured":"Guo D, Yang D, Zhang H, Song J, Wang P, Zhu Q, et al. Deepseek-r1 incentivizes reasoning in llms through reinforcement learning. Nature. 2025;645(8081):633\u20138.","journal-title":"Nature"},{"issue":"3","key":"2372_CR6","doi-asserted-by":"publisher","first-page":"e14830","DOI":"10.2196\/14830","volume":"7","author":"F Li","year":"2019","unstructured":"Li F, Jin Y, Liu W, Rawat BPS, Cai P, Yu H. Fine-tuning bidirectional encoder representations from transformers (BERT)\u2013based models on large-scale electronic health record notes: an empirical study. JMIR medical informatics. 2019;7(3):e14830.","journal-title":"JMIR medical informatics"},{"issue":"1","key":"2372_CR7","doi-asserted-by":"publisher","first-page":"e60164","DOI":"10.2196\/60164","volume":"12","author":"M Nunes","year":"2024","unstructured":"Nunes M, Bone J, Ferreira JC, Elvas LB. Health care language models and their fine-tuning for information extraction: Scoping review. JMIR medical informatics. 2024;12(1):e60164.","journal-title":"JMIR medical informatics"},{"key":"2372_CR8","doi-asserted-by":"crossref","unstructured":"Zhang X, Talukdar N, Vemulapalli S, Ahn S, Wang J, Meng H, et al. Comparison of prompt engineering and fine-tuning strategies in large language models in the classification of clinical notes. AMIA Summits on Translational Science Proceedings. 2024;2024:478.","DOI":"10.1101\/2024.02.07.24302444"},{"issue":"1","key":"2372_CR9","doi-asserted-by":"publisher","first-page":"450","DOI":"10.1038\/s41746-025-01824-7","volume":"8","author":"Y Hao","year":"2025","unstructured":"Hao Y, Qiu Z, Holmes J, L\u00f6ckenhoff CE, Liu W, Ghassemi M, et al. Large language model integrations in cancer decision-making: a systematic review and meta-analysis. npj Digital Medicine. 2025;8(1):450.","journal-title":"npj Digital Medicine"},{"issue":"7972","key":"2372_CR10","doi-asserted-by":"publisher","first-page":"172","DOI":"10.1038\/s41586-023-06291-2","volume":"620","author":"K Singhal","year":"2023","unstructured":"Singhal K, Azizi S, Tu T, Mahdavi SS, Wei J, Chung HW, et al. Large language models encode clinical knowledge. Nature. 2023;620(7972):172\u201380.","journal-title":"Nature"},{"issue":"3","key":"2372_CR11","doi-asserted-by":"publisher","first-page":"943","DOI":"10.1038\/s41591-024-03423-7","volume":"31","author":"K Singhal","year":"2025","unstructured":"Singhal K, Tu T, Gottweis J, Sayres R, Wulczyn E, Amin M, et al. Toward expert-level medical question answering with large language models. Nature Medicine. 2025;31(3):943\u201350.","journal-title":"Nature Medicine"},{"issue":"2","key":"2372_CR12","doi-asserted-by":"publisher","first-page":"e0000198","DOI":"10.1371\/journal.pdig.0000198","volume":"2","author":"TH Kung","year":"2023","unstructured":"Kung TH, Cheatham M, Medenilla A, Sillos C, De Leon L, Elepa\u00f1o C, et al. Performance of ChatGPT on USMLE: potential for AI-assisted medical education using large language models. PLoS digital health. 2023;2(2):e0000198.","journal-title":"PLoS digital health"},{"issue":"8","key":"2372_CR13","doi-asserted-by":"publisher","first-page":"e555-e61","DOI":"10.1016\/S2589-7500(24)00097-9","volume":"6","author":"DM Levine","year":"2024","unstructured":"Levine DM, Tuwani R, Kompa B, Varma A, Finlayson SG, Mehrotra A, et al. The diagnostic and triage accuracy of the GPT-3 artificial intelligence model: an observational study. The Lancet Digital Health. 2024;6(8):e555-e61.","journal-title":"The Lancet Digital Health"},{"issue":"1","key":"2372_CR14","doi-asserted-by":"publisher","first-page":"AIp2300031","DOI":"10.1056\/AIp2300031","volume":"1","author":"AV Eriksen","year":"2024","unstructured":"Eriksen AV, M\u00f6ller S, Ryg J. Use of GPT-4 to diagnose complex clinical cases. NEJM AI. 2024;1(1):AIp2300031.","journal-title":"NEJM AI"},{"key":"2372_CR15","doi-asserted-by":"publisher","first-page":"e56110","DOI":"10.2196\/56110","volume":"26","author":"JM Hoppe","year":"2024","unstructured":"Hoppe JM, Auer MK, Str\u00fcven A, Massberg S, Stremmel C. ChatGPT with GPT-4 outperforms emergency department physicians in diagnostic accuracy: retrospective analysis. Journal of medical Internet research. 2024;26:e56110.","journal-title":"Journal of medical Internet research"},{"issue":"1","key":"2372_CR16","doi-asserted-by":"publisher","first-page":"4","DOI":"10.1186\/s44247-023-00058-5","volume":"2","author":"D Ueda","year":"2024","unstructured":"Ueda D, Walston SL, Matsumoto T, Deguchi R, Tatekawa H, Miki Y. Evaluating GPT-4-based ChatGPT\u2019s clinical potential on the NEJM quiz. BMC Digital Health. 2024;2(1):4.","journal-title":"BMC Digital Health"},{"key":"2372_CR17","doi-asserted-by":"publisher","first-page":"e53297","DOI":"10.2196\/53297","volume":"26","author":"L Masanneck","year":"2024","unstructured":"Masanneck L, Schmidt L, Seifert A, K\u00f6lsche T, Huntemann N, Jansen R, et al. Triage performance across large language models, ChatGPT, and untrained doctors in emergency medicine: comparative study. Journal of medical Internet research. 2024;26:e53297.","journal-title":"Journal of medical Internet research"},{"issue":"1","key":"2372_CR18","doi-asserted-by":"publisher","first-page":"72","DOI":"10.1186\/s12911-024-02459-6","volume":"24","author":"Y-J Park","year":"2024","unstructured":"Park Y-J, Pillai A, Deng J, Guo E, Gupta M, Paget M, et al. Assessing the research landscape and clinical utility of large language models: a scoping review. BMC Medical Informatics and Decision Making. 2024;24(1):72.","journal-title":"BMC Medical Informatics and Decision Making"},{"issue":"1","key":"2372_CR19","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1038\/s43856-024-00717-2","volume":"5","author":"F Busch","year":"2025","unstructured":"Busch F, Hoffmann L, Rueger C, van Dijk EH, Kader R, Ortiz-Prado E, et al. Current applications and challenges in large language models for patient care: a systematic review. Communications Medicine. 2025;5(1):26.","journal-title":"Communications Medicine"},{"issue":"1","key":"2372_CR20","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s10916-025-02284-y","volume":"49","author":"L Xu","year":"2025","unstructured":"Xu L, Zhao W, Huang X. Diagnosis and Triage Performance of Contemporary Large Language Models on Short Clinical Vignettes. Journal of Medical Systems. 2025;49(1):1\u201312.","journal-title":"Journal of Medical Systems"},{"issue":"14","key":"2372_CR21","doi-asserted-by":"publisher","first-page":"6421","DOI":"10.3390\/app11146421","volume":"11","author":"D Jin","year":"2021","unstructured":"Jin D, Pan E, Oufattole N, Weng W-H, Fang H, Szolovits P. What disease does this patient have? a large-scale open domain question answering dataset from medical exams. Applied Sciences. 2021;11(14):6421.","journal-title":"Applied Sciences"},{"key":"2372_CR22","unstructured":"The New England Journal of Medicine Case challenges. (https:\/\/www.nejmorg\/case-challenges)."},{"key":"2372_CR23","doi-asserted-by":"crossref","unstructured":"Li\u00e9vin V, Hother CE, Motzfeldt AG, Winther O. Can large language models reason about medical questions? Patterns. 2024;5(3).","DOI":"10.1016\/j.patter.2024.100943"},{"key":"2372_CR24","unstructured":"Crocker L, Algina J. Introduction to classical and modern test theory: ERIC; 1986."},{"issue":"1","key":"2372_CR25","doi-asserted-by":"publisher","first-page":"43","DOI":"10.1186\/s44247-024-00096-7","volume":"2","author":"M Kopka","year":"2024","unstructured":"Kopka M, Feufel MA. Software symptomcheckR: an R package for analyzing and visualizing symptom checker triage performance. BMC Digital Health. 2024;2(1):43.","journal-title":"BMC Digital Health"},{"issue":"1","key":"2372_CR26","doi-asserted-by":"publisher","first-page":"e63430","DOI":"10.2196\/63430","volume":"10","author":"BT Bicknell","year":"2024","unstructured":"Bicknell BT, Butler D, Whalen S, Ricks J, Dixon CJ, Clark AB, et al. ChatGPT-4 Omni performance in USMLE disciplines and clinical skills: comparative analysis. JMIR Medical Education. 2024;10(1):e63430.","journal-title":"JMIR Medical Education"},{"issue":"3","key":"2372_CR27","doi-asserted-by":"publisher","first-page":"e10438","DOI":"10.1002\/lrh2.10438","volume":"8","author":"GW Rutledge","year":"2024","unstructured":"Rutledge GW. Diagnostic accuracy of GPT-4 on common clinical scenarios and challenging cases. Learning Health Systems. 2024;8(3):e10438.","journal-title":"Learning Health Systems"},{"key":"2372_CR28","doi-asserted-by":"crossref","unstructured":"Fox MP, MacLehose RF, Lash TL. Applying quantitative bias analysis to epidemiologic data: Springer; 2021.","DOI":"10.1007\/978-3-030-82673-4"},{"key":"2372_CR29","doi-asserted-by":"crossref","unstructured":"Li Y, Guo Y, Guerin F, Lin C, editors. An open-source data contamination report for large language models. Findings of the Association for Computational Linguistics: EMNLP 2024; 2024.","DOI":"10.18653\/v1\/2024.findings-emnlp.30"}],"container-title":["Journal of Medical Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10916-026-02372-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10916-026-02372-7","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10916-026-02372-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T01:31:25Z","timestamp":1775007085000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10916-026-02372-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4,1]]},"references-count":29,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2026,12]]}},"alternative-id":["2372"],"URL":"https:\/\/doi.org\/10.1007\/s10916-026-02372-7","relation":{},"ISSN":["1573-689X"],"issn-type":[{"value":"1573-689X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,4,1]]},"assertion":[{"value":"28 October 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 March 2026","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 April 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"This study was exempt from institutional review board (IRB) approval, and the requirement for informed consent was waived. The research was conducted exclusively using publicly available, anonymized datasets. Specifically, the MedQA-USMLE dataset is an open-source benchmark derived from publicly available examination preparation materials, and the NEJM Case Challenges dataset was curated from a public, educational case series. No human participants were involved in this study, and no private patient data or protected health information was accessed or used.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics Approval and Consent to Participate"}},{"value":"The authors declare no competing interests.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}},{"value":"Not applicable.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Clinical Trial Number"}}],"article-number":"42"}}