{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T17:42:12Z","timestamp":1772905332427,"version":"3.50.1"},"reference-count":185,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,12,27]],"date-time":"2024-12-27T00:00:00Z","timestamp":1735257600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,27]],"date-time":"2024-12-27T00:00:00Z","timestamp":1735257600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Med Syst"],"DOI":"10.1007\/s10916-024-02132-5","type":"journal-article","created":{"date-parts":[[2024,12,27]],"date-time":"2024-12-27T02:37:15Z","timestamp":1735267035000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":13,"title":["Applications and Future Prospects of Medical LLMs: A Survey Based on the M-KAT Conceptual Framework"],"prefix":"10.1007","volume":"48","author":[{"given":"Ying","family":"Chang","sequence":"first","affiliation":[]},{"given":"Jian-ming","family":"Yin","sequence":"additional","affiliation":[]},{"given":"Jian-min","family":"Li","sequence":"additional","affiliation":[]},{"given":"Chang","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Ling-yong","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Shu-yuan","family":"Lin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,27]]},"reference":[{"key":"2132_CR1","doi-asserted-by":"crossref","unstructured":"Biswas SS (2023) Role of Chat GPT in Public Health. Ann Biomed Eng, 51:868-869.10.1007\/s10439-023-03172-7","DOI":"10.1007\/s10439-023-03172-7"},{"key":"2132_CR2","unstructured":"Zhao WX, Zhou K, Li J, Tang T, Wang X, Hou Y, Min Y, Zhang B, Zhang J, Dong Z, Du Y, Yang C, Chen Y, Chen Z, Jiang J, Ren R, Li Y, Tang X, Liu Z, Liu P, Nie J, Wen J (2023) A Survey of Large Language Models. arXiv e-prints:2303-18223.10.48550\/arXiv.2303.18223"},{"key":"2132_CR3","unstructured":"OpenAI (2024) Learning to Reason with LLMs. OpenAI. https:\/\/openai.com\/index\/learning-to-reason-with-llms\/. Accessed 24 Nov, 2024"},{"key":"2132_CR4","doi-asserted-by":"crossref","unstructured":"He K, Mao R, Lin Q, Ruan Y, Lan X, Feng M, Cambria E (2023) A survey of large language models for healthcare: from data, technology, and applications to accountability and ethics. arXiv preprint arXiv:2310.05694","DOI":"10.2139\/ssrn.4809363"},{"key":"2132_CR5","unstructured":"Singh C, Inala JP, Galley M, Caruana R, Gao J (2024) Rethinking Interpretability in the Era of Large Language Models. arXiv preprint arXiv:2402.01761"},{"key":"2132_CR6","unstructured":"Devlin J, Chang M, Lee K, Toutanova K (2018) BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. arXiv e-prints:1810-4805.10.48550\/arXiv.1810.04805"},{"key":"2132_CR7","unstructured":"Liu Y, Ott M, Goyal N, Du J, Joshi M, Chen D, Levy O, Lewis M, Zettlemoyer L, Stoyanov V (2019) RoBERTa: A Robustly Optimized BERT Pretraining Approach. arXiv e-prints:1907-11692.10.48550\/arXiv.1907.11692"},{"key":"2132_CR8","first-page":"140","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel C, Shazeer N, Roberts A, Lee K, Narang S, Matena M, Zhou Y, Li W, Liu PJ (2020) Exploring the limits of transfer learning with a unified text-to-text transformer. J Mach Learn Res, 21:140","journal-title":"J Mach Learn Res"},{"key":"2132_CR9","unstructured":"Huang K, Altosaar J, Ranganath R (2019) ClinicalBERT: Modeling Clinical Notes and Predicting Hospital Readmission. arXiv e-prints:1904-5342.10.48550\/arXiv.1904.05342"},{"key":"2132_CR10","doi-asserted-by":"crossref","unstructured":"Gururangan S, Marasovi\u0107 A, Swayamdipta S, Lo K, Beltagy I, Downey D, Smith NA (2020) Don\u2019t Stop Pretraining: Adapt Language Models to Domains and Tasks. In; 1990\/7\/1; Online. Association for Computational Linguistics: 8342\u20138360.","DOI":"10.18653\/v1\/2020.acl-main.740"},{"key":"2132_CR11","doi-asserted-by":"crossref","unstructured":"Lu Q, Dou D, Nguyen T (2022) ClinicalT5: A Generative Language Model for Clinical Text. In; 1990\/12\/1; Abu Dhabi, United Arab Emirates. Association for Computational Linguistics: 5436\u20135443.","DOI":"10.18653\/v1\/2022.findings-emnlp.398"},{"key":"2132_CR12","unstructured":"Wang T, Roberts A, Hesslow D, Le Scao T, Chung HW, Beltagy I, Launay J, Raffel C (2022) What language model architecture and pretraining objective works best for zero-shot generalization? In International Conference on Machine Learning. pp. 22964\u201322984: PMLR;22964\u201322984."},{"key":"2132_CR13","unstructured":"Zhang B, Ghorbani B, Bapna A, Cheng Y, Garcia X, Shen J, Firat O (2022) Examining scaling and transfer of language model architectures for machine translation. In International Conference on Machine Learning. pp. 26176\u201326192: PMLR;26176\u201326192."},{"key":"2132_CR14","unstructured":"Garcia X, Bansal Y, Cherry C, Foster G, Krikun M, Johnson M, Firat O (2023) The unreasonable effectiveness of few-shot learning for machine translation. In International Conference on Machine Learning. pp. 10867\u201310878: PMLR;10867\u201310878."},{"key":"2132_CR15","unstructured":"Dong Y, Cordonnier J, Loukas A (2021) Attention is not all you need: Pure attention loses rank doubly exponentially with depth. In International Conference on Machine Learning. pp. 2793\u20132803: PMLR;2793\u20132803."},{"key":"2132_CR16","unstructured":"Kaplan J, McCandlish S, Henighan T, Brown TB, Chess B, Child R, Gray S, Radford A, Wu J, Amodei D (2020) Scaling laws for neural language models. arXiv preprint arXiv:2001.08361"},{"key":"2132_CR17","unstructured":"Wei J, Tay Y, Bommasani R, Raffel C, Zoph B, Borgeaud S, Yogatama D, Bosma M, Zhou D, Metzler D (2022) Emergent abilities of large language models. arXiv preprint arXiv:2206.07682"},{"key":"2132_CR18","doi-asserted-by":"crossref","first-page":"e50865","DOI":"10.2196\/50865","volume":"25","author":"S Ziegelmayer","year":"2023","unstructured":"Ziegelmayer S, Marka AW, Lenhart N, Nehls N, Reischl S, Harder F, Sauter A, Makowski M, Graf M, Gawlitza J (2023) Evaluation of GPT-4\u2019s Chest X-Ray Impression Generation: A Reader Study on Performance and Perception. J Med Internet Res, 25:e50865","journal-title":"J Med Internet Res"},{"issue":"5","key":"2132_CR19","first-page":"e34752","volume":"15","author":"HM Akhter","year":"2023","unstructured":"Akhter HM, Cooper JS (2023) Acute pulmonary edema after hyperbaric oxygen treatment: a case report written with ChatGPT assistance. Cureus, 15(2):e34752","journal-title":"Cureus"},{"issue":"5","key":"2132_CR20","doi-asserted-by":"crossref","first-page":"1195","DOI":"10.1111\/epi.17570","volume":"64","author":"CM Bo\u00dfelmann","year":"2023","unstructured":"Bo\u00dfelmann CM, Leu C, Lal D (2023) Are AI language models such as ChatGPT ready to improve the care of individuals with epilepsy? Epilepsia (Series 4), 64(5): 1195-1199.","journal-title":"Epilepsia (Series 4)"},{"key":"2132_CR21","doi-asserted-by":"crossref","first-page":"4541","DOI":"10.3390\/ijerph20054541","volume":"20","author":"D Jungwirth","year":"2023","unstructured":"Jungwirth D, Haluza D (2023) Artificial intelligence and public health: an exploratory study. Int J Environ Res Public Health, 20:4541","journal-title":"Int J Environ Res Public Health"},{"key":"2132_CR22","unstructured":"Wu C, Lei J, Zheng Q, Zhao W, Lin W, Zhang X, Zhou X, Zhao Z, Zhang Y, Wang Y (2023) Can gpt-4v (ision) serve medical applications? case studies on gpt-4v for multimodal medical diagnosis. arXiv preprint arXiv:2310.09909"},{"key":"2132_CR23","doi-asserted-by":"crossref","first-page":"e47621","DOI":"10.2196\/47621","volume":"25","author":"T Kuroiwa","year":"2023","unstructured":"Kuroiwa T, Sarcon A, Ibara T, Yamada E, Yamamoto A, Tsukamoto K, Fujita K (2023) The potential of ChatGPT as a self-diagnostic tool in common orthopedic diseases: exploratory study. J Med Internet Res, 25:e47621","journal-title":"J Med Internet Res"},{"key":"2132_CR24","doi-asserted-by":"crossref","first-page":"pkad10","DOI":"10.1093\/jncics\/pkad010","volume":"7","author":"AM Hopkins","year":"2023","unstructured":"Hopkins AM, Logan JM, Kichenadasse G, Sorich MJ (2023) Artificial intelligence chatbots will revolutionize how cancer patients access information: ChatGPT represents a paradigm-shift. JNCI Cancer Spectr, 7:pkad10","journal-title":"JNCI Cancer Spectr"},{"issue":"2","key":"2132_CR25","doi-asserted-by":"crossref","first-page":"108013","DOI":"10.1016\/j.cmpb.2024.108013","volume":"245","author":"J Li","year":"2024","unstructured":"Li J, Dada A, Puladi B, Kleesiek J, Egger J (2024) ChatGPT in healthcare: a taxonomy and systematic review. Comput Methods Programs Biomed, 245(2): 108013","journal-title":"Comput Methods Programs Biomed"},{"key":"2132_CR26","doi-asserted-by":"crossref","first-page":"e50638","DOI":"10.2196\/50638","volume":"25","author":"B Mesk\u00f3","year":"2023","unstructured":"Mesk\u00f3 B (2023) Prompt engineering as an important emerging skill for medical professionals: tutorial. J Med Internet Res, 25:e50638","journal-title":"J Med Internet Res"},{"key":"2132_CR27","doi-asserted-by":"crossref","first-page":"1799","DOI":"10.1016\/j.acra.2023.11.002","volume":"31","author":"D Nguyen","year":"2024","unstructured":"Nguyen D, Swanson D, Newbury A, Kim YH (2024) Evaluation of ChatGPT and Google Bard using prompt engineering in cancer screening algorithms. Acad Radiol, 31:1799\u20131804","journal-title":"Acad Radiol"},{"key":"2132_CR28","volume-title":"Danger, Danger, Gaston Labat! Does zero-shot artificial intelligence correlate with anticoagulation guidelines recommendations for neuraxial anesthesia?","author":"NC Hurley","year":"2024","unstructured":"Hurley NC, Gupta RK, Schroeder KM, Hess AS (2024) Danger, Danger, Gaston Labat! Does zero-shot artificial intelligence correlate with anticoagulation guidelines recommendations for neuraxial anesthesia? Regional Anesthesia & Pain Medicine"},{"key":"2132_CR29","doi-asserted-by":"crossref","first-page":"e52113","DOI":"10.2196\/52113","volume":"26","author":"A Herrmann-Werner","year":"2024","unstructured":"Herrmann-Werner A, Festl-Wietek T, Holderried F, Herschbach L, Griewatz J, Masters K, Zipfel S, Mahling M (2024) Assessing ChatGPT\u2019s Mastery of Bloom\u2019s Taxonomy using psychosomatic medicine exam questions: mixed-methods study. J Med Internet Res, 26:e52113","journal-title":"J Med Internet Res"},{"key":"2132_CR30","doi-asserted-by":"crossref","unstructured":"Chong M, Zihao W, Jiaqi W, Shaochen X, Yaonai W, Fang Z, Zhengliang L, Xi J, Lei G, Xiaoyan C, Shu Z, Tuo Z, Dajiang Z, Dinggang S, Tianming L, Xiang L (2024) An Iterative Optimizing Framework for Radiology Report Summarization With ChatGPT. IEEE Trans Artif Intell, 5:4163-4175.10.1109\/TAI.2024.3364586","DOI":"10.1109\/TAI.2024.3364586"},{"key":"2132_CR31","doi-asserted-by":"crossref","unstructured":"C. M, Z. W, J. W, S. X, Y. W, Z. L, F. Z, X. J, L. G, X. C, S. Z, T. Z, D. Z, D. S, T. L, X. L (2024) An Iterative Optimizing Framework for Radiology Report Summarization With ChatGPT. IEEE Trans Artif Intell, 5:4163-4175.10.1109\/TAI.2024.3364586","DOI":"10.1109\/TAI.2024.3364586"},{"key":"2132_CR32","doi-asserted-by":"crossref","unstructured":"Jiang X, Yan L, Vavekanand R, Hu M (2023) Large Language Models in Healthcare Current Development and Future Directions. In Generative AI Research; 2023\/1\/1; Remote, Hong Kong SAR China.","DOI":"10.20944\/preprints202407.0923.v1"},{"key":"2132_CR33","doi-asserted-by":"crossref","unstructured":"Singhal K, Azizi S, Tu T, Mahdavi SS, Wei J, Chung HW, Scales N, Tanwani A, Cole-Lewis H, Pfohl S, Payne P, Seneviratne M, Gamble P, Kelly C, Babiker A, Sch\u00e4rli N, Chowdhery A, Mansfield P, Demner-Fushman D, Ag\u00fcera Y Arcas B, Webster D, Corrado GS, Matias Y, Chou K, Gottweis J, Tomasev N, Liu Y, Rajkomar A, Barral J, Semturs C, Karthikesalingam A, Natarajan V (2023) Large language models encode clinical knowledge. Nature, 620:172-180.10.1038\/s41586-023-06291-2","DOI":"10.1038\/s41586-023-06291-2"},{"key":"2132_CR34","doi-asserted-by":"crossref","unstructured":"Minssen T, Vayena E, Cohen IG (2023) The Challenges for Regulating Medical Use of ChatGPT and Other Large Language Models. JAMA, 330:315-316.10.1001\/jama.2023.9651","DOI":"10.1001\/jama.2023.9651"},{"key":"2132_CR35","unstructured":"Chen X, Xiang J, Lu S, Liu Y, He M, Shi D (2024) Evaluating large language models in medical applications: a survey. arXiv e-prints:2405-7468.10.48550\/arXiv.2405.07468"},{"key":"2132_CR36","doi-asserted-by":"crossref","unstructured":"Shah NH, Entwistle D, Pfeffer MA (2023) Creation and Adoption of Large Language Models in Medicine. JAMAJAMA, 330:866-869.10.1001\/jama.2023.14217","DOI":"10.1001\/jama.2023.14217"},{"key":"2132_CR37","doi-asserted-by":"crossref","first-page":"24","DOI":"10.1109\/MSPEC.2019.8678513","volume":"56","author":"E Strickland","year":"2019","unstructured":"Strickland E (2019) IBM Watson, heal thyself: How IBM overpromised and underdelivered on AI health care. IEEE Spectr, 56:24\u201331","journal-title":"IEEE Spectr"},{"key":"2132_CR38","doi-asserted-by":"crossref","unstructured":"Gekhman Z, Yona G, Aharoni R, Eyal M, Feder A, Reichart R, Herzig J (2024) Does Fine-Tuning LLMs on New Knowledge Encourage Hallucinations? In; 1990\/11\/1; Miami, Florida, USA. Association for Computational Linguistics: 7765\u20137784.","DOI":"10.18653\/v1\/2024.emnlp-main.444"},{"key":"2132_CR39","doi-asserted-by":"crossref","unstructured":"Luo R, Sun L, Xia Y, Qin T, Zhang S, Poon H, Liu T (2022) BioGPT: generative pre-trained transformer for biomedical text generation and mining. Briefings in BioinformaticsBriefings in Bioinformatics, 23.10.1093\/bib\/bbac409","DOI":"10.1093\/bib\/bbac409"},{"key":"2132_CR40","unstructured":"Bolton E, Venigalla A, Yasunaga M, Hall D, Xiong B, Lee T, Daneshjou R, Frankle J, Liang P, Carbin M, Manning CD (2024) BioMedLM: A 2.7B Parameter Language Model Trained On Biomedical Text. arXiv e-prints:2403-18421.10.48550\/arXiv.2403.18421"},{"key":"2132_CR41","doi-asserted-by":"crossref","unstructured":"Gu Y, Tinn R, Cheng H, Lucas M, Usuyama N, Liu X, Naumann T, Gao J, Poon H (2021) Domain-Specific Language Model Pretraining for Biomedical Natural Language Processing. ACM Trans. Comput. Healthcare, 3:2.10.1145\/3458754","DOI":"10.1145\/3458754"},{"key":"2132_CR42","doi-asserted-by":"crossref","first-page":"210","DOI":"10.1038\/s41746-023-00958-w","volume":"6","author":"C Peng","year":"2023","unstructured":"Peng C, Yang X, Chen A, Smith KE, PourNejatian N, Costa AB, Martin C, Flores MG, Zhang Y, Magoc T (2023) A study of generative large language model for medical research and healthcare. NPJ Digit Med, 6:210","journal-title":"NPJ Digit Med"},{"key":"2132_CR43","unstructured":"Wen C, Sun X, Zhao S, Fang X, Chen L, Zou W (2023) Chathome: Development and evaluation of a domain-specific language model for home renovation. arXiv preprint arXiv:2307.15290"},{"key":"2132_CR44","unstructured":"Xie SM, Pham H, Dong X, Du N, Liu H, Lu Y, Liang PS, Le QV, Ma T, Yu AW (2023) DoReMi: Optimizing Data Mixtures Speeds Up Language Model Pretraining. pp. 69798\u201369818;69798\u201369818."},{"key":"2132_CR45","unstructured":"Shi H, Xu Z, Wang H, Qin W, Wang W, Wang Y, Wang H (2024) Continual Learning of Large Language Models: A Comprehensive Survey. arXiv preprint arXiv:2404.16789"},{"key":"2132_CR46","unstructured":"Li Q, Yang X, Wang H, Wang Q, Liu L, Wang J, Zhang Y, Chu M, Hu S, Chen Y, Shen Y, Fan C, Zhang W, Xu T, Gu J, Zheng J, Group GZA (2023) From Beginner to Expert: Modeling Medical Knowledge into General LLMs. arXiv e-prints:1040-2312.10.48550\/arXiv.2312.01040"},{"key":"2132_CR47","unstructured":"Chen Z, Hern\u00e1ndez Cano A, Romanou A, Bonnet A, Matoba K, Salvi F, Pagliardini M, Fan S, K\u00f6pf A, Mohtashami A, Sallinen A, Sakhaeirad A, Swamy V, Krawczuk I, Bayazit D, Marmet A, Montariol S, Hartley M, Jaggi M, Bosselut A (2023) MEDITRON-70B: Scaling Medical Pretraining for Large Language Models. arXiv e-prints:2311-16079.10.48550\/arXiv.2311.16079"},{"key":"2132_CR48","unstructured":"Yingxin X, Zongbao Y, Yuchen L, Jinlong H, Shoubing D (2024) Interpretable biomedical reasoning based on deep fusion of knowledge graph and pre-trained language model. Acta Scientiarum Naturalium Universitatis Pekinensis, 60:62-70.10.13209\/j.0479-8023.2023.073"},{"key":"2132_CR49","doi-asserted-by":"crossref","unstructured":"Liu F, Zhang T, Dai W, Zhang C, Cai W, Zhou X, Chen D (2024) Few-shot adaptation of multi-modal foundation models: a survey. Artif Intell Rev, 57:268.10.1007\/s10462-024-10915-y","DOI":"10.1007\/s10462-024-10915-y"},{"key":"2132_CR50","unstructured":"Zhang X, Tian C, Yang X, Chen L, Li Z, Petzold LR (2023) AlpaCare:Instruction-tuned Large Language Models for Medical Application. arXiv e-prints:2310-14558.10.48550\/arXiv.2310.14558"},{"key":"2132_CR51","unstructured":"Zhang S, Dong L, Li X, Zhang S, Sun X, Wang S, Li J, Hu R, Zhang T, Wu F, Wang G (2023) Instruction Tuning for Large Language Models: A Survey. arXiv e-prints:2308-10792.10.48550\/arXiv.2308.10792"},{"key":"2132_CR52","doi-asserted-by":"crossref","unstructured":"Liu X, Ji K, Fu Y, Tam WL, Du Z, Yang Z, Tang J (2021) P-Tuning v2: Prompt Tuning Can Be Comparable to Fine-tuning Universally Across Scales and Tasks. arXiv e-prints:2110-7602.10.48550\/arXiv.2110.07602","DOI":"10.18653\/v1\/2022.acl-short.8"},{"key":"2132_CR53","unstructured":"Hu EJ, Shen Y, Wallis P, Allen-Zhu Z, Li Y, Wang S, Wang L, Chen W (2021) LoRA: Low-Rank Adaptation of Large Language Models. arXiv e-prints:2106-9685.10.48550\/arXiv.2106.09685"},{"key":"2132_CR54","doi-asserted-by":"crossref","unstructured":"Wang R, Duan Y, Lam C, Chen J, Xu J, Chen H, Liu X, Pang PC, Tan T (2024) IvyGPT: InteractiVe Chinese Pathway Language Model in Medical Domain. In; Singapore. Springer Nature Singapore: 378\u2013382.","DOI":"10.1007\/978-981-99-9119-8_34"},{"key":"2132_CR55","unstructured":"Yang S, Zhao H, Zhu S, Zhou G, Xu H, Jia Y, Zan H (2023) Zhongjing: Enhancing the Chinese Medical Capabilities of Large Language Model through Expert Feedback and Real-world Multi-turn Dialogue. arXiv e-prints:2308-3549.10.48550\/arXiv.2308.03549"},{"key":"2132_CR56","unstructured":"Ye Q, Liu J, Chong D, Zhou P, Hua Y, Liu F, Cao M, Wang Z, Cheng X, Lei Z, Guo Z (2023) Qilin-Med: Multi-stage Knowledge Injection Advanced Medical Large Language Model. arXiv e-prints:2310-9089.10.48550\/arXiv.2310.09089"},{"key":"2132_CR57","unstructured":"Zhang K, Yu J, Yan Z, Liu Y, Adhikarla E, Fu S, Chen X, Chen C, Zhou Y, Li X (2023) Biomedgpt: A unified and generalist biomedical generative pre-trained transformer for vision, language, and multimodal tasks. arXiv preprint arXiv:2305.17100"},{"key":"2132_CR58","doi-asserted-by":"crossref","unstructured":"Zhang H, Chen J, Jiang F, Yu F, Chen Z, Chen G, Li J, Wu X, Zhiyi Z, Xiao Q, Wan X, Wang B, Li H (2023) HuatuoGPT, Towards Taming Language Model to Be a Doctor. In; 1990\/12\/1; Singapore. Association for Computational Linguistics: 10859\u201310885.","DOI":"10.18653\/v1\/2023.findings-emnlp.725"},{"key":"2132_CR59","unstructured":"Wang G, Bai L, Nah WJ, Wang J, Zhang Z, Chen Z, Wu J, Islam M, Liu H, Ren H (2024) Surgical-LVLM: Learning to Adapt Large Vision-Language Model for Grounded Visual Question Answering in Robotic Surgery. arXiv preprint arXiv:2405.10948"},{"key":"2132_CR60","unstructured":"Li M, Huang J, Yeung J, Blaes A, Johnson S, Liu H, Xu H, Zhang R (2024) CancerLLM: A Large Language Model in Cancer Domain. arXiv e-prints:2406-10459.10.48550\/arXiv.2406.10459"},{"key":"2132_CR61","doi-asserted-by":"crossref","unstructured":"Hua R, Dong X, Wei Y, Shu Z, Yang P, Hu Y, Zhou S, Sun H, Yan K, Yan X, Chang K, Li X, Bai Y, Zhang R, Wang W, Zhou X (2024) Lingdan: enhancing encoding of traditional Chinese medicine knowledge for clinical reasoning tasks with large language models. Journal of the American Medical Informatics AssociationJournal of the American Medical Informatics Association, 31:2019-2029.10.1093\/jamia\/ocae087","DOI":"10.1093\/jamia\/ocae087"},{"key":"2132_CR62","unstructured":"den Hamer DM, Schoor P, Polak TB, Kapitan D (2023) Improving patient pre-screening for clinical trials: assisting physicians with large language models. arXiv"},{"key":"2132_CR63","unstructured":"Gururajan AK, Lopez-Cuena E, Bayarri-Planas J, Tormos A, Hinjos D, Bernabeu-Perez P, Arias-Duart A, Agustin Martin-Torres P, Urcelay-Ganzabal L, Gonzalez-Mallo M, Alvarez-Napagao S, Ayguad\u00e9-Parra E, Garcia-Gasulla UCD (2024) Aloe: A Family of Fine-tuned Open Healthcare LLMs. arXiv e-prints:1886-2405.10.48550\/arXiv.2405.01886"},{"key":"2132_CR64","unstructured":"Han T, Adams LC, Papaioannou J, Grundmann P, Oberhauser T, L\u00f6ser A, Truhn D, Bressem KK (2023) MedAlpaca\u2013an open-source collection of medical conversational AI models and training data. arXiv preprint arXiv:2304.08247"},{"key":"2132_CR65","doi-asserted-by":"crossref","unstructured":"Zhou J, Jiang C, Shen W, Zhou X, He X (2024) Leveraging Web-Crawled Data for High-Quality Fine-Tuning. arXiv e-prints:2408-8003.10.48550\/arXiv.2408.08003","DOI":"10.18653\/v1\/2024.findings-emnlp.660"},{"key":"2132_CR66","unstructured":"Wang Z, Bi B, Pentyala SK, Ramnath K, Chaudhuri S, Mehrotra S, Zixu, Zhu, Mao X, Asur S, Na, Cheng (2024) A Comprehensive Survey of LLM Alignment Techniques: RLHF, RLAIF, PPO, DPO and More. arXiv e-prints:2407-16216.10.48550\/arXiv.2407.16216"},{"key":"2132_CR67","unstructured":"Rafailov R, Sharma A, Mitchell E, Ermon S, Manning CD, Finn C (2024) Direct preference optimization: your language model is secretly a reward model. In Proceedings of the 37th International Conference on Neural Information Processing Systems; New Orleans, LA, USA. Curran Associates Inc.: 2338."},{"key":"2132_CR68","unstructured":"Xu S, Fu W, Gao J, Ye W, Liu W, Mei Z, Wang G, Yu C, Wu Y (2024) Is DPO Superior to PPO for LLM Alignment? A Comprehensive Study. arXiv e-prints:2404-10719.10.48550\/arXiv.2404.10719"},{"key":"2132_CR69","volume-title":"Optimal Treatment Strategies for Critical Patients with Deep Reinforcement Learning","author":"S Job","year":"2024","unstructured":"Job S, Tao X, Li L, Xie H, Cai T, Yong J, Li Q (2024) Optimal Treatment Strategies for Critical Patients with Deep Reinforcement Learning. ACM Trans Intell Syst Technol"},{"key":"2132_CR70","doi-asserted-by":"crossref","first-page":"695","DOI":"10.1021\/acs.jcim.2c01191","volume":"63","author":"YA Ivanenkov","year":"2023","unstructured":"Ivanenkov YA, Polykovskiy D, Bezrukov D, Zagribelnyy B, Aladinskiy V, Kamya P, Aliper A, Ren F, Zhavoronkov A (2023) Chemistry42: an AI-driven platform for molecular design and optimization. J Chem Inf Model, 63:695\u2013701","journal-title":"J Chem Inf Model"},{"key":"2132_CR71","unstructured":"Liu F (2020) Learning to summarize from human feedback. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics."},{"key":"2132_CR72","first-page":"27730","volume":"35","author":"L Ouyang","year":"2022","unstructured":"Ouyang L, Wu J, Jiang X, Almeida D, Wainwright C, Mishkin P, Zhang C, Agarwal S, Slama K, Ray A (2022) Training language models to follow instructions with human feedback. Adv Neural Inf Process Syst, 35:27730\u201327744","journal-title":"Adv Neural Inf Process Syst"},{"key":"2132_CR73","unstructured":"Christiano PF, Leike J, Brown T, Martic M, Legg S, Amodei D (2017) Deep reinforcement learning from human preferences. Adv Neural Inf Process Syst, 30"},{"key":"2132_CR74","unstructured":"Ziegler DM, Stiennon N, Wu J, Brown TB, Radford A, Amodei D, Christiano P, Irving G (2019) Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593"},{"key":"2132_CR75","unstructured":"Wang G, Yang G, Du Z, Fan L, Li X (2023) ClinicalGPT: large language models finetuned with diverse medical data and comprehensive evaluation. arXiv preprint arXiv:2306.09968"},{"key":"2132_CR76","doi-asserted-by":"crossref","first-page":"147","DOI":"10.1007\/978-3-031-53720-2_15","volume-title":"Reinforcement Learning Methods in Speech and Language Technology","author":"B Lin","year":"2025","unstructured":"Lin B (2025) Reinforcement Learning in Large Language Models (LLMs): The Rise of AI Language Giants. In Reinforcement Learning Methods in Speech and Language Technology. Cham: Springer Nature Switzerland; 147\u2013156"},{"key":"2132_CR77","unstructured":"Liu Z, Zhong A, Li Y, Yang L, Ju C, Wu Z, Ma C, Shu P, Chen C, Kim S, Dai H, Zhao L, Sun L, Zhu D, Liu J, Liu W, Shen D, Li X, Li Q, Liu T (2023) Radiology-GPT: A Large Language Model for Radiology. arXiv e-prints:2306-8666.10.48550\/arXiv.2306.08666"},{"key":"2132_CR78","doi-asserted-by":"crossref","unstructured":"Wu C, Lin W, Zhang X, Zhang Y, Xie W, Wang Y (2024) PMC-LLaMA: toward building open-source language models for medicine. Journal of the American Medical Informatics AssociationJournal of the American Medical Informatics Association, 31:1833-1843.10.1093\/jamia\/ocae045","DOI":"10.1093\/jamia\/ocae045"},{"key":"2132_CR79","unstructured":"Wang H, Liu C, Xi N, Qiang Z, Zhao S, Qin B, Liu T (2023) Huatuo: Tuning llama model with chinese medical knowledge. arXiv preprint arXiv:2304.06975"},{"key":"2132_CR80","unstructured":"Christophe C, Kanithi PK, Raha T, Khan S, Pimentel MA (2024) Med42-v2: A Suite of Clinical LLMs. arXiv e-prints:2408-6142.10.48550\/arXiv.2408.06142"},{"key":"2132_CR81","doi-asserted-by":"crossref","unstructured":"Griot M, Hemptinne C, Vanderdonckt J, Yuksel D (2024) Impact of high-quality, mixed-domain data on the performance of medical language models. Journal of the American Medical Informatics AssociationJournal of the American Medical Informatics Association, 31:1875-1883.10.1093\/jamia\/ocae120","DOI":"10.1093\/jamia\/ocae120"},{"key":"2132_CR82","doi-asserted-by":"crossref","unstructured":"Labrak Y, Bazoge A, Morin E, Gourraud P, Rouvier M, Dufour R (2024) BioMistral A Collection of Open-Source Pretrained Large Language Models for Medical Domains. In; 1990\/8\/1; Bangkok, Thailand. Association for Computational Linguistics: 5848\u20135864.","DOI":"10.18653\/v1\/2024.findings-acl.348"},{"key":"2132_CR83","unstructured":"Singhal K, Tu T, Gottweis J, Sayres R, Wulczyn E, Hou L, Clark K, Pfohl S, Cole-Lewis H, Neal D (2023) Towards expert-level medical question answering with large language models. arXiv preprint arXiv:2305.09617"},{"key":"2132_CR84","doi-asserted-by":"crossref","unstructured":"Li Y, Li Z, Zhang K, Dan R, Jiang S, Zhang Y (2023) ChatDoctor: A Medical Chat Model Fine-Tuned on a Large Language Model Meta-AI (LLaMA) Using Medical Domain Knowledge. Cureus, 15:e40895.10.7759\/cureus.40895","DOI":"10.7759\/cureus.40895"},{"key":"2132_CR85","unstructured":"Chen Y, Wang Z, Xing X, Xu Z, Fang K, Wang J, Li S, Wu J, Liu Q, Xu X (2023) Bianque: Balancing the questioning and suggestion ability of health llms with multi-turn health conversations polished by chatgpt. arXiv preprint arXiv:2310.15896"},{"key":"2132_CR86","unstructured":"Xiong H, Wang S, Zhu Y, Zhao Z, Liu Y, Huang L, Wang Q, Shen D (2023) Doctorglm: Fine-tuning your chinese doctor is not a herculean task. arXiv preprint arXiv:2304.01097"},{"key":"2132_CR87","doi-asserted-by":"crossref","unstructured":"Qiu P, Wu C, Zhang X, Lin W, Wang H, Zhang Y, Wang Y, Xie W (2024) Towards building multilingual language model for medicine. Nat Commun, 15:8384.10.1038\/s41467-024-52417-z","DOI":"10.1038\/s41467-024-52417-z"},{"key":"2132_CR88","unstructured":"Luo Y, Zhang J, Fan S, Yang K, Wu Y, Qiao M, Nie Z (2023) Biomedgpt: Open multimodal generative pre-trained transformer for biomedicine. arXiv preprint arXiv:2308.09442"},{"key":"2132_CR89","doi-asserted-by":"crossref","unstructured":"Chen B, Cheng X, Li P, Geng Y, Gong J, Li S, Bei Z, Tan X, Wang B, Zeng X (2024) xTrimoPGLM: unified 100B-scale pre-trained transformer for deciphering the language of protein. arXiv preprint arXiv:2401.06199","DOI":"10.1101\/2023.07.05.547496"},{"key":"2132_CR90","unstructured":"Liu JM, Li D, Cao H, Ren T, Liao Z, Wu J (2023) ChatCounselor: A Large Language Models for Mental Health Support. arXiv e-prints:2309-15461.10.48550\/arXiv.2309.15461"},{"key":"2132_CR91","doi-asserted-by":"crossref","unstructured":"E. T, C. G (2021) A Survey on Explainable Artificial Intelligence (XAI): Toward Medical XAI. IEEE Trans Neural Netw Learn Syst, 32:4793-4813.10.1109\/TNNLS.2020.3027314","DOI":"10.1109\/TNNLS.2020.3027314"},{"key":"2132_CR92","unstructured":"Gu Z, Yin C, Liu F, Zhang P (2024) MedVH: Towards Systematic Evaluation of Hallucination for Large Vision Language Models in the Medical Context. arXiv e-prints:2407-2730.10.48550\/arXiv.2407.02730"},{"key":"2132_CR93","unstructured":"Edwards G, Nilsson S, Rozemberczki B, Papa E (2021) Explainable Biomedical Recommendations via Reinforcement Learning Reasoning on Knowledge Graphs. arXiv e-prints:2111-10625.10.48550\/arXiv.2111.10625"},{"key":"2132_CR94","doi-asserted-by":"crossref","unstructured":"Z. S, W. D, J. S, Z. H (2024) Interpretable Disease Progression Prediction Based on Reinforcement Reasoning Over a Knowledge Graph. IEEE Transactions on Systems, Man, and Cybernetics: Systems, 54:1948-1959.10.1109\/TSMC.2023.3331847","DOI":"10.1109\/TSMC.2023.3331847"},{"key":"2132_CR95","doi-asserted-by":"crossref","first-page":"831","DOI":"10.1001\/jama.297.8.831","volume":"297","author":"S Kripalani","year":"2007","unstructured":"Kripalani S, LeFevre F, Phillips CO, Williams MV, Basaviah P, Baker DW (2007) Deficits in communication and information transfer between hospital-based and primary care physicians: implications for patient safety and continuity of care. JAMA, 297:831\u2013841","journal-title":"JAMA"},{"key":"2132_CR96","doi-asserted-by":"crossref","unstructured":"Wang L, Wan Z, Ni C, Song Q, Li Y, Clayton E, Malin B, Yin Z (2024) Applications and Concerns of ChatGPT and Other Conversational Large Language Models in Health Care: Systematic Review. J Med Internet Res, 26:e22769.10.2196\/22769","DOI":"10.2196\/22769"},{"key":"2132_CR97","doi-asserted-by":"crossref","first-page":"172","DOI":"10.1038\/s41586-023-06291-2","volume":"620","author":"K Singhal","year":"2023","unstructured":"Singhal K, Azizi S, Tu T, Mahdavi SS, Wei J, Chung HW, Scales N, Tanwani A, Cole-Lewis H, Pfohl S (2023) Large language models encode clinical knowledge. Nature, 620:172\u2013180","journal-title":"Nature"},{"key":"2132_CR98","doi-asserted-by":"crossref","first-page":"1234","DOI":"10.1093\/bioinformatics\/btz682","volume":"36","author":"J Lee","year":"2020","unstructured":"Lee J, Yoon W, Kim S, Kim D, Kim S, So CH, Kang J (2020) BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics, 36:1234\u20131240","journal-title":"Bioinformatics"},{"key":"2132_CR99","doi-asserted-by":"crossref","first-page":"891","DOI":"10.3390\/ph16060891","volume":"16","author":"A Blanco-Gonzalez","year":"2023","unstructured":"Blanco-Gonzalez A, Cabezon A, Seco-Gonzalez A, Conde-Torres D, Antelo-Riveiro P, Pineiro A, Garcia-Fandino R (2023) The role of ai in drug discovery: challenges, opportunities, and strategies. Pharmaceuticals (Basel), 16:891","journal-title":"Pharmaceuticals (Basel)"},{"key":"2132_CR100","doi-asserted-by":"crossref","first-page":"4164","DOI":"10.1038\/s41598-023-31412-2","volume":"13","author":"A Lahat","year":"2023","unstructured":"Lahat A, Shachar E, Avidan B, Shatz Z, Glicksberg BS, Klang E (2023) Evaluating the use of large language model in identifying top research questions in gastroenterology. Sci Rep, 13:4164","journal-title":"Sci Rep"},{"key":"2132_CR101","doi-asserted-by":"crossref","unstructured":"Van Veen D, Van Uden C, Blankemeier L, Delbrouck J, Aali A, Bluethgen C, Pareek A, Polacin M, Reis EP, Seehofnerov\u00e1 A, Rohatgi N, Hosamani P, Collins W, Ahuja N, Langlotz CP, Hom J, Gatidis S, Pauly J, Chaudhari AS (2024) Adapted large language models can outperform medical experts in clinical text summarization. Nat Med, 30:1134-1142.10.1038\/s41591-024-02855-5","DOI":"10.1038\/s41591-024-02855-5"},{"key":"2132_CR102","doi-asserted-by":"crossref","unstructured":"White R, Peng T, Sripitak P, Johansen AR, Snyder M (2023) CliniDigest: A Case Study in Large Language Model Based Large-Scale Summarization of Clinical Trial Descriptions. In Proceedings of the 2023 ACM Conference on Information Technology for Social Good; Lisbon, Portugal. Association for Computing Machinery: 396\u2013402.","DOI":"10.1145\/3582515.3609559"},{"key":"2132_CR103","doi-asserted-by":"crossref","unstructured":"Tang L, Sun Z, Idnay B, Nestor JG, Soroush A, Elias PA, Xu Z, Ding Y, Durrett G, Rousseau JF, Weng C, Peng Y (2023) Evaluating large language models on medical evidence summarization. NPJ Digit Med, 6:158.10.1038\/s41746-023-00896-7","DOI":"10.1038\/s41746-023-00896-7"},{"key":"2132_CR104","doi-asserted-by":"crossref","unstructured":"Mugaanyi J, Cai L, Cheng S, Lu C, Huang J (2024) Evaluation of Large Language Model Performance and Reliability for Citations and References in Scholarly Writing: Cross-Disciplinary Study. J Med Internet Res, 26:e52935.10.2196\/52935","DOI":"10.2196\/52935"},{"key":"2132_CR105","unstructured":"Kim Y, Xu X, McDuff D, Breazeal C, Park HW (2024) Health-LLM: Large Language Models for Health Prediction via Wearable Sensor Data. arXiv e-prints:2401-6866.10.48550\/arXiv.2401.06866"},{"key":"2132_CR106","first-page":"1","volume":"Manag","author":"AR Mohamed Yousuff","year":"2024","unstructured":"Mohamed Yousuff AR, Zainulabedin Hasan M, Anand R, Rajasekhara Babu M (2024) Leveraging deep learning models for continuous glucose monitoring and prediction in diabetes management: towards enhanced blood sugar control. Int J Syst Assur Eng Manag:1\u20138","journal-title":"Int J Syst Assur Eng"},{"key":"2132_CR107","unstructured":"Dai H, Li Y, Liu Z, Zhao L, Wu Z, Song S, Shen Y, Zhu D, Li X, Li S (2023) AD-AutoGPT: An Autonomous GPT for Alzheimer\u2019s Disease Infodemiology. arXiv preprint arXiv:2306.10095"},{"key":"2132_CR108","doi-asserted-by":"crossref","first-page":"357","DOI":"10.1038\/s41586-023-06160-y","volume":"619","author":"LY Jiang","year":"2023","unstructured":"Jiang LY, Liu XC, Nejatian NP, Nasir-Moin M, Wang D, Abidin A, Eaton K, Riina HA, Laufer I, Punjabi P (2023) Health system-scale language models are all-purpose prediction engines. Nature, 619:357\u2013362","journal-title":"Nature"},{"key":"2132_CR109","doi-asserted-by":"publisher","first-page":"116026","DOI":"10.1016\/j.psychres.2024.116026","volume":"339","author":"S Volkmer","year":"2024","unstructured":"Volkmer S, Meyer-Lindenberg A, Schwarz E (2024) Large language models in psychiatry: Opportunities and challenges. Psychiatry Res, 339:116026.https:\/\/doi.org\/10.1016\/j.psychres.2024.116026","journal-title":"Psychiatry Res"},{"key":"2132_CR110","unstructured":"Calisto FM (2024) Human-Centered Design of Personalized Intelligent Agents in Medical Imaging Diagnosis."},{"key":"2132_CR111","unstructured":"Radford A, Narasimhan K, Salimans T, Sutskever I (2018) Improving language understanding by generative pre-training."},{"key":"2132_CR112","doi-asserted-by":"crossref","unstructured":"Volovici V, Syn NL, Ercole A, Zhao JJ, Liu N (2022) Steps to avoid overuse and misuse of machine learning in clinical research. Nat Med, 28:1996-1999.10.1038\/s41591-022-01961-6","DOI":"10.1038\/s41591-022-01961-6"},{"key":"2132_CR113","doi-asserted-by":"crossref","unstructured":"Norori N, Hu Q, Aellen FM, Faraci FD, Tzovara A (2021) Addressing bias in big data and AI for health care: A call for open science. Patterns (N Y), 2:100347.10.1016\/j.patter.2021.100347","DOI":"10.1016\/j.patter.2021.100347"},{"key":"2132_CR114","doi-asserted-by":"crossref","unstructured":"Mittelstadt BD, Floridi L (2016) The Ethics of Big Data: Current and Foreseeable Issues in Biomedical Contexts. Sci Eng Ethics, 22:303-341.10.1007\/s11948-015-9652-2","DOI":"10.1007\/s11948-015-9652-2"},{"key":"2132_CR115","doi-asserted-by":"crossref","unstructured":"Holzinger A, Jurisica I (2014) Knowledge Discovery and Data Mining in Biomedical Informatics: The Future Is in Integrative, Interactive Machine Learning Solutions. In Interactive Knowledge Discovery and Data Mining in Biomedical Informatics: State-of-the-Art and Future Challenges. Edited by Holzinger A, Jurisica I. Berlin, Heidelberg: Springer Berlin Heidelberg; 1\u201318","DOI":"10.1007\/978-3-662-43968-5_1"},{"key":"2132_CR116","doi-asserted-by":"crossref","unstructured":"Chen Z, Zhou K, Zhao X, Wan J, Zhang F, Zhang D, Wen J (2024) Improving Large Language Models via Fine-grained Reinforcement Learning with Minimum Editing Constraint. In; 1990\/8\/1; Bangkok, Thailand. Association for Computational Linguistics: 5694\u20135711.","DOI":"10.18653\/v1\/2024.findings-acl.338"},{"key":"2132_CR117","doi-asserted-by":"crossref","unstructured":"Johnson AEW, Pollard TJ, Shen L, Lehman LH, Feng M, Ghassemi M, Moody B, Szolovits P, Anthony Celi L, Mark RG (2016) MIMIC-III, a freely accessible critical care database. Sci Data, 3:160035.10.1038\/sdata.2016.35","DOI":"10.1038\/sdata.2016.35"},{"key":"2132_CR118","unstructured":"Mohan S, Li D (2019) MedMentions: A Large Biomedical Corpus Annotated with UMLS Concepts. arXiv e-prints:1902-9476.10.48550\/arXiv.1902.09476"},{"key":"2132_CR119","doi-asserted-by":"crossref","unstructured":"He J, Fu M, Tu M (2019) Applying deep matching networks to Chinese medical question answering: a study and a dataset. BMC Med Inform Decis Mak, 19:52.10.1186\/s12911-019-0761-8","DOI":"10.1186\/s12911-019-0761-8"},{"key":"2132_CR120","first-page":"74","volume-title":"1990-11-01; 1990-06-01; 1990-11-01; Hong Kong, China; Online; Miami, Florida, USA","author":"Q Jin","year":"2019","unstructured":"Jin Q, Dhingra B, Liu Z, Cohen W, Lu X, Ben Abacha A, Mrabet Y, Zhang Y, Shivade C, Langlotz C, Demner-Fushman D, Chen J, Gui C, Ouyang R, Gao A, Chen S, Chen GH, Wang X, Cai Z, Ji K, Wan X, Wang B (2019) PubMedQA: A Dataset for Biomedical Research Question Answering; Overview of the MEDIQA 2021 Shared Task on Summarization in the Medical Domain; Towards Injecting Medical Visual Knowledge into Multimodal LLMs at Scale. In; 1990-11-01; 1990-06-01; 1990-11-01; Hong Kong, China; Online; Miami, Florida, USA. Association for Computational Linguistics; Association for Computational Linguistics; Association for Computational Linguistics: 2567\u20132577, 74\u201385, 7346\u20137370."},{"key":"2132_CR121","first-page":"74","volume-title":"1990-06-01; 1990-11-01; Online; Miami, Florida, USA","author":"A Ben Abacha","year":"2021","unstructured":"Ben Abacha A, Mrabet Y, Zhang Y, Shivade C, Langlotz C, Demner-Fushman D, Chen J, Gui C, Ouyang R, Gao A, Chen S, Chen GH, Wang X, Cai Z, Ji K, Wan X, Wang B (2021) Overview of the MEDIQA 2021 Shared Task on Summarization in the Medical Domain; Towards Injecting Medical Visual Knowledge into Multimodal LLMs at Scale. In; 1990-06-01; 1990-11-01; Online; Miami, Florida, USA. Association for Computational Linguistics; Association for Computational Linguistics: 74\u201385, 7346\u20137370."},{"key":"2132_CR122","doi-asserted-by":"crossref","unstructured":"Jin D, Pan E, Oufattole N, Weng W, Fang H, Szolovits P (2020) What Disease does this Patient Have? A Large-scale Open Domain Question Answering Dataset from Medical Exams. arXiv e-prints:2009-13081.10.48550\/arXiv.2009.13081","DOI":"10.20944\/preprints202105.0498.v1"},{"key":"2132_CR123","doi-asserted-by":"crossref","unstructured":"Tran H, Yang Z, Yao Z, Yu H (2024) BioInstruct: instruction tuning of large language models for biomedical natural language processing. Journal of the American Medical Informatics AssociationJournal of the American Medical Informatics Association, 31:1821-1832.10.1093\/jamia\/ocae122","DOI":"10.1093\/jamia\/ocae122"},{"key":"2132_CR124","unstructured":"Zhang S, Xu Y, Usuyama N, Xu H, Bagga J, Tinn R, Preston S, Rao R, Wei M, Valluri N (2023) BiomedCLIP: a multimodal biomedical foundation model pretrained from fifteen million scientific image-text pairs. arXiv preprint arXiv:2303.00915"},{"key":"2132_CR125","doi-asserted-by":"crossref","unstructured":"Chen J, Gui C, Ouyang R, Gao A, Chen S, Chen GH, Wang X, Cai Z, Ji K, Wan X, Wang B (2024) Towards Injecting Medical Visual Knowledge into Multimodal LLMs at Scale. In; 1990\/11\/1; Miami, Florida, USA. Association for Computational Linguistics: 7346\u20137370.","DOI":"10.18653\/v1\/2024.emnlp-main.418"},{"key":"2132_CR126","doi-asserted-by":"crossref","unstructured":"Johnson AEW, Pollard TJ, Berkowitz SJ, Greenbaum NR, Lungren MP, Deng C, Mark RG, Horng S (2019) MIMIC-CXR, a de-identified publicly available database of chest radiographs with free-text reports. Sci Data, 6:317.10.1038\/s41597-019-0322-0","DOI":"10.1038\/s41597-019-0322-0"},{"key":"2132_CR127","doi-asserted-by":"crossref","unstructured":"Demner-Fushman D, Kohli MD, Rosenman MB, Shooshan SE, Rodriguez L, Antani S, Thoma GR, McDonald CJ (2016) Preparing a collection of radiology examinations for distribution and retrieval. J Am Med Inform Assoc, 23:304-310.10.1093\/jamia\/ocv080","DOI":"10.1093\/jamia\/ocv080"},{"key":"2132_CR128","doi-asserted-by":"crossref","first-page":"543405","DOI":"10.3389\/frai.2020.543405","volume":"3","author":"A Baker","year":"2020","unstructured":"Baker A, Perov Y, Middleton K, Baxter J, Mullarkey D, Sangar D, Butt M, DoRosario A, Johri S (2020) A comparison of artificial intelligence and human doctors for the purpose of triage and diagnosis. Front Artif Intell, 3:543405","journal-title":"Front Artif Intell"},{"key":"2132_CR129","doi-asserted-by":"crossref","first-page":"e47532","DOI":"10.2196\/47532","volume":"9","author":"N Ito","year":"2023","unstructured":"Ito N, Kadomatsu S, Fujisawa M, Fukaguchi K, Ishizawa R, Kanda N, Kasugai D, Nakajima M, Goto T, Tsugawa Y (2023) The Accuracy and Potential Racial and Ethnic Biases of GPT-4 in the Diagnosis and Triage of Health Conditions: Evaluation Study. JMIR Med Educ, 9:e47532","journal-title":"JMIR Med Educ"},{"key":"2132_CR130","doi-asserted-by":"crossref","unstructured":"Brin D, Sorin V, Barash Y, Konen E, Glicksberg BS, Nadkarni GN, Klang E (2024) Assessing GPT-4 multimodal performance in radiological image analysis. Eur Radiol.10.1007\/s00330-024-11035-5","DOI":"10.1007\/s00330-024-11035-5"},{"key":"2132_CR131","doi-asserted-by":"crossref","unstructured":"Liu Q, Hyland S, Bannur S, Bouzid K, Castro DC, Wetscherek MT, Tinn R, Sharma H, P\u00e9rez-Garc\u00eda F, Schwaighofer A (2023) Exploring the Boundaries of GPT-4 in Radiology. arXiv preprint arXiv:2310.14573","DOI":"10.18653\/v1\/2023.emnlp-main.891"},{"key":"2132_CR132","doi-asserted-by":"crossref","first-page":"721","DOI":"10.3350\/cmh.2023.0089","volume":"29","author":"YH Yeo","year":"2023","unstructured":"Yeo YH, Samaan JS, Ng WH, Ting P, Trivedi H, Vipani A, Ayoub W, Yang JD, Liran O, Spiegel B (2023) Assessing the performance of ChatGPT in answering questions regarding cirrhosis and hepatocellular carcinoma. Clin Mol Hepatol, 29:721","journal-title":"Clin Mol Hepatol"},{"key":"2132_CR133","doi-asserted-by":"crossref","first-page":"44","DOI":"10.1038\/s41523-023-00557-8","volume":"9","author":"V Sorin","year":"2023","unstructured":"Sorin V, Klang E, Sklair-Levy M, Cohen I, Zippel DB, Balint Lahat N, Konen E, Barash Y (2023) Large language model (ChatGPT) as a support tool for breast tumor board. NPJ Breast Cancer, 9:44","journal-title":"NPJ Breast Cancer"},{"key":"2132_CR134","doi-asserted-by":"crossref","first-page":"3395","DOI":"10.1007\/s40123-023-00789-8","volume":"12","author":"X Hu","year":"2023","unstructured":"Hu X, Ran AR, Nguyen TX, Szeto S, Yam JC, Chan CK, Cheung CY (2023) What can Gpt-4 do for diagnosing rare eye diseases? A pilot study. Ophthalmol Ther, 12:3395\u20133402","journal-title":"Ophthalmol Ther"},{"key":"2132_CR135","doi-asserted-by":"crossref","first-page":"3378","DOI":"10.3390\/ijerph20043378","volume":"20","author":"T Hirosawa","year":"2023","unstructured":"Hirosawa T, Harada Y, Yokose M, Sakamoto T, Kawamura R, Shimizu T (2023) Diagnostic accuracy of differential-diagnosis lists generated by generative pretrained transformer 3 chatbot for clinical vignettes with common chief complaints: a pilot study. Int J Environ Res Public Health, 20:3378","journal-title":"Int J Environ Res Public Health"},{"key":"2132_CR136","doi-asserted-by":"crossref","first-page":"e48659","DOI":"10.2196\/48659","volume":"25","author":"A Rao","year":"2023","unstructured":"Rao A, Pang M, Kim J, Kamineni M, Lie W, Prasad AK, Landman A, Dreyer K, Succi MD (2023) Assessing the utility of ChatGPT throughout the entire clinical workflow: development and usability study. J Med Internet Res, 25:e48659","journal-title":"J Med Internet Res"},{"key":"2132_CR137","doi-asserted-by":"crossref","unstructured":"Liu S, Wright AP, Patterson BL, Wanderer JP, Turer RW, Nelson SD, McCoy AB, Sittig DF, Wright A (2023) Assessing the value of ChatGPT for clinical decision support optimization. MedRxiv:2022\u20132023","DOI":"10.1101\/2023.02.21.23286254"},{"key":"2132_CR138","doi-asserted-by":"crossref","first-page":"100324","DOI":"10.1016\/j.xops.2023.100324","volume":"3","author":"F Antaki","year":"2023","unstructured":"Antaki F, Touma S, Milad D, El-Khoury J, Duval R (2023) Evaluating the performance of ChatGPT in ophthalmology: an analysis of its successes and shortcomings. Ophthalmology science, 3:100324","journal-title":"Ophthalmology science"},{"key":"2132_CR139","doi-asserted-by":"crossref","first-page":"e45312","DOI":"10.2196\/45312","volume":"9","author":"A Gilson","year":"2023","unstructured":"Gilson A, Safranek CW, Huang T, Socrates V, Chi L, Taylor RA, Chartash D (2023) How Does ChatGPT Perform on the United States Medical Licensing Examination (USMLE)? The Implications of Large Language Models for Medical Education and Knowledge Assessment. JMIR Med Educ, 9:e45312","journal-title":"JMIR Med Educ"},{"key":"2132_CR140","first-page":"e198","volume":"2","author":"TH Kung","year":"2023","unstructured":"Kung TH, Cheatham M, Medenilla A, Sillos C, De Leon L, Elepa\u00f1o C, Madriaga M, Aggabao R, Diaz-Candido G, Maningo J (2023) Performance of ChatGPT on USMLE: potential for AI-assisted medical education using large language models. PLoS digital health, 2:e198","journal-title":"PLoS digital health"},{"key":"2132_CR141","first-page":"15","volume-title":"Applicability of ChatGPT in assisting to solve higher order problems in pathology","author":"RK Sinha","year":"2023","unstructured":"Sinha RK, Roy AD, Kumar N, Mondal H (2023) Applicability of ChatGPT in assisting to solve higher order problems in pathology. Cureus, 15"},{"key":"2132_CR142","doi-asserted-by":"crossref","first-page":"466","DOI":"10.1038\/s41431-023-01396-8","volume":"32","author":"D Duong","year":"2024","unstructured":"Duong D, Solomon BD (2024) Analysis of large-language model versus human performance for genetics questions. Eur J Hum Genet, 32:466\u2013468","journal-title":"Eur J Hum Genet"},{"key":"2132_CR143","doi-asserted-by":"crossref","unstructured":"Johnson D, Goodman R, Patrinely J, Stone C, Zimmerman E, Donald R, Chang S, Berkowitz S, Finn A, Jahangir E (2023) Assessing the accuracy and reliability of AI-generated medical responses: an evaluation of the Chat-GPT model. Research square","DOI":"10.21203\/rs.3.rs-2566942\/v1"},{"key":"2132_CR144","doi-asserted-by":"crossref","first-page":"158","DOI":"10.1038\/s41746-023-00896-7","volume":"6","author":"L Tang","year":"2023","unstructured":"Tang L, Sun Z, Idnay B, Nestor JG, Soroush A, Elias PA, Xu Z, Ding Y, Durrett G, Rousseau JF (2023) Evaluating large language models on medical evidence summarization. NPJ Digit Med, 6:158","journal-title":"NPJ Digit Med"},{"key":"2132_CR145","doi-asserted-by":"crossref","unstructured":"Yu H, Fan L, Li L, Zhou J, Ma Z, Xian L, Hua W, He S, Jin M, Zhang Y, Gandhi A, Ma X (2024) Large Language Models in Biomedical and Health Informatics: A Review with Bibliometric Analysis. J Healthc Inform Res, 8:658-711.10.1007\/s41666-024-00171-8","DOI":"10.1007\/s41666-024-00171-8"},{"key":"2132_CR146","doi-asserted-by":"crossref","unstructured":"Xu J, Lu L, Peng X, Pang J, Ding J, Yang L, Song H, Li K, Sun X, Zhang S (2024) Data Set and Benchmark (MedGPTEval) to Evaluate Responses From Large Language Models in Medicine: Evaluation Development and Validation. JMIR Med Inform, 12:e57674.10.2196\/57674","DOI":"10.2196\/57674"},{"key":"2132_CR147","unstructured":"Shi X, Xu J, Ding J, Pang J, Liu S, Luo S, Peng X, Lu L, Yang H, Hu M (2023) Llm-mini-cex: Automatic evaluation of large language model for diagnostic conversation. arXiv preprint arXiv:2308.07635"},{"key":"2132_CR148","doi-asserted-by":"crossref","unstructured":"Giuffr\u00e8 M, Kresevic S, Pugliese N, You K, Shung DL (2024) Optimizing large language models in digestive disease: strategies and challenges to improve clinical outcomes. Liver Int, 44:2114-2124.10.1111\/liv.15974","DOI":"10.1111\/liv.15974"},{"key":"2132_CR149","unstructured":"Wu C, Qiu P, Liu J, Gu H, Li N, Zhang Y, Wang Y, Xie W (2024) Towards Evaluating and Building Versatile Large Language Models for Medicine. arXiv e-prints:2408-12547.10.48550\/arXiv.2408.12547"},{"key":"2132_CR150","unstructured":"Xu J, Lu L, Yang S, Liang B, Peng X, Pang J, Ding J, Shi X, Yang L, Song H, Li K, Sun X, Zhang S (2023) MedGPTEval: A Dataset and Benchmark to Evaluate Responses of Large Language Models in Medicine. arXiv e-prints:2305-7340.10.48550\/arXiv.2305.07340"},{"key":"2132_CR151","unstructured":"Mao R, Chen G, Zhang X, Guerin F, Cambria E (2023) Gpteval: A survey on assessments of chatgpt and gpt-4. arXiv preprint arXiv:2308.12488"},{"key":"2132_CR152","doi-asserted-by":"crossref","unstructured":"Krithara A, Nentidis A, Bougiatiotis K, Paliouras G (2023) BioASQ-QA: A manually curated corpus for Biomedical Question Answering. Sci Data, 10:170.10.1038\/s41597-023-02068-4","DOI":"10.1038\/s41597-023-02068-4"},{"key":"2132_CR153","unstructured":"Pal A, Umapathi LK, Sankarasubbu M (2022) MedMCQA: A Large-scale Multi-Subject Multi-Choice Dataset for Medical domain Question Answering. arXiv e-prints:2203-14371.10.48550\/arXiv.2203.14371"},{"key":"2132_CR154","doi-asserted-by":"crossref","unstructured":"Liu M, Ding J, Xu J, Hu W, Li X, Zhu L, Bai Z, Shi X, Wang B, Song H, Liu P, Zhang X, Wang S, Li K, Wang H, Ruan T, Huang X, Sun X, Zhang S (2024) MedBench: A Comprehensive, Standardized, and Reliable Benchmarking System for Evaluating Chinese Medical Large Language Models. arXiv e-prints:2407-10990.10.48550\/arXiv.2407.10990","DOI":"10.26599\/BDMA.2024.9020044"},{"key":"2132_CR155","unstructured":"Wang X, Chen GH, Song D, Zhang Z, Chen Z, Xiao Q, Jiang F, Li J, Wan X, Wang B, Others (2023) CMB: A Comprehensive Medical Benchmark in Chinese. arXiv preprint arXiv:2308.08833"},{"key":"2132_CR156","first-page":"1137","volume":"3","author":"Y Bengio","year":"2003","unstructured":"Bengio Y, Ducharme R, Vincent P, Janvin C (2003) A neural probabilistic language model. J Mach Learn Res, 3:1137\u20131155","journal-title":"J Mach Learn Res"},{"key":"2132_CR157","unstructured":"Bai Y, Kadavath S, Kundu S, Askell A, Kernion J, Jones A, Chen A, Goldie A, Mirhoseini A, McKinnon C, Chen C, Olsson C, Olah C, Hernandez D, Drain D, Ganguli D, Li D, Tran-Johnson E, Perez E, Kerr J, Mueller J, Ladish J, Landau J, Ndousse K, Lukosuite K, Lovitt L, Sellitto M, Elhage N, Schiefer N, Mercado N, DasSarma N, Lasenby R, Larson R, Ringer S, Johnston S, Kravec S, El Showk S, Fort S, Lanham T, Telleen-Lawton T, Conerly T, Henighan T, Hume T, Bowman SR, Hatfield-Dodds Z, Mann B, Amodei D, Joseph N, McCandlish S, Brown T, Kaplan J (2022) Constitutional AI: Harmlessness from AI Feedback. arXiv e-prints:2212-8073.10.48550\/arXiv.2212.08073"},{"key":"2132_CR158","unstructured":"Stiennon N, Ouyang L, Wu J, Ziegler DM, Lowe R, Voss C, Radford A, Amodei D, Christiano P (2020) Learning to summarize from human feedback. In Proceedings of the 34th International Conference on Neural Information Processing Systems; Vancouver, BC, Canada. Curran Associates Inc.: 253."},{"key":"2132_CR159","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1038\/s41597-018-0002-5","volume":"5","author":"JJ Lau","year":"2018","unstructured":"Lau JJ, Gayen S, Ben Abacha A, Demner-Fushman D (2018) A dataset of clinically generated visual questions and answers about radiology images. Sci Data, 5:1\u201310","journal-title":"Sci Data"},{"key":"2132_CR160","unstructured":"Yin S, Fu C, Zhao S, Li K, Sun X, Xu T, Chen E (2023) A Survey on Multimodal Large Language Models. arXiv e-prints:2306-13549.10.48550\/arXiv.2306.13549"},{"key":"2132_CR161","doi-asserted-by":"crossref","unstructured":"Xiao H, Zhou F, Liu X, Liu T, Li Z, Liu X, Huang X (2024) A Comprehensive Survey of Large Language Models and Multimodal Large Language Models in Medicine. arXiv e-prints:2405-8603.10.48550\/arXiv.2405.08603","DOI":"10.2139\/ssrn.5031720"},{"key":"2132_CR162","first-page":"36","volume-title":"Llava-med: Training a large language-and-vision assistant for biomedicine in one day","author":"C Li","year":"2024","unstructured":"Li C, Wong C, Zhang S, Usuyama N, Liu H, Yang J, Naumann T, Poon H, Gao J (2024) Llava-med: Training a large language-and-vision assistant for biomedicine in one day. Adv Neural Inf Process Syst, 36"},{"key":"2132_CR163","unstructured":"Ma L, Han J, Wang Z, Zhang D (2023) Cephgpt-4: An interactive multimodal cephalometric measurement and diagnostic system with visual large language model. arXiv preprint arXiv:2307.07518"},{"key":"2132_CR164","unstructured":"Sun Y, Zhu C, Zheng S, Zhang K, Shui Z, Yu X, Zhao Y, Li H, Zhang Y, Zhao R (2023) Pathasst: Redefining pathology through generative foundation ai assistant for pathology. arXiv preprint arXiv:2305.15072"},{"key":"2132_CR165","doi-asserted-by":"crossref","unstructured":"Thawakar OC, Shaker AM, Mullappilly SS, Cholakkal H, Anwer RM, Khan S, Laaksonen J, Khan F (2024) XrayGPT: Chest Radiographs Summarization using Large Medical Vision-Language Models. In; 1990\/8\/1; Bangkok, Thailand. Association for Computational Linguistics: 440\u2013448.","DOI":"10.18653\/v1\/2024.bionlp-1.35"},{"key":"2132_CR166","unstructured":"Wu C, Zhang X, Zhang Y, Wang Y, Xie W (2023) Towards generalist foundation model for radiology. arXiv preprint arXiv:2308.02463"},{"key":"2132_CR167","doi-asserted-by":"crossref","unstructured":"Seenivasan L, Islam M, Kannan G, Ren H (2023) SurgicalGPT: End-to-End Language-Vision GPT for Visual Question Answering in Surgery. arXiv e-prints:2304-9974.10.48550\/arXiv.2304.09974","DOI":"10.1007\/978-3-031-43996-4_27"},{"key":"2132_CR168","first-page":"AIoa2300138","volume":"1","author":"T Tu","year":"2024","unstructured":"Tu T, Azizi S, Driess D, Schaekermann M, Amin M, Chang P, Carroll A, Lau C, Tanno R, Ktena I (2024) Towards generalist biomedical ai. NEJM AI, 1:AIoa2300138","journal-title":"NEJM AI"},{"key":"2132_CR169","unstructured":"Saab K, Tu T, Weng W, Tanno R, Stutz D, Wulczyn E, Zhang F, Strother T, Park C, Vedadi E, Zambrano Chaves J, Hu S, Schaekermann M, Kamath A, Cheng Y, Barrett DGT, Cheung C, Mustafa B, Palepu A, McDuff D, Hou L, Golany T, Liu L, Alayrac J, Houlsby N, Tomasev N, Freyberg J, Lau C, Kemp J, Lai J, Azizi S, Kanada K, Man S, Kulkarni K, Sun R, Shakeri S, He L, Caine B, Webson A, Latysheva N, Johnson M, Mansfield P, Lu J, Rivlin E, Anderson J, Green B, Wong R, Krause J, Shlens J, Dominowska E, Eslami SMA, Chou K, Cui C, Vinyals O, Kavukcuoglu K, Manyika J, Dean J, Hassabis D, Matias Y, Webster D, Barral J, Corrado G, Semturs C, Mahdavi SS, Gottweis J, Karthikesalingam A, Natarajan V (2024) Capabilities of Gemini Models in Medicine. arXiv e-prints:2404-18416.10.48550\/arXiv.2404.18416"},{"key":"2132_CR170","doi-asserted-by":"crossref","unstructured":"Li J, Guan Z, Wang J, Cheung CY, Zheng Y, Lim L, Lim CC, Ruamviboonsuk P, Raman R, Corsino L, Echouffo-Tcheugui JB, Luk AOY, Chen LJ, Sun X, Hamzah H, Wu Q, Wang X, Liu R, Wang YX, Chen T, Zhang X, Yang X, Yin J, Wan J, Du W, Quek TC, Goh JHL, Yang D, Hu X, Nguyen TX, Szeto SKH, Chotcomwongse P, Malek R, Normatova N, Ibragimova N, Srinivasan R, Zhong P, Huang W, Deng C, Ruan L, Zhang C, Zhang C, Zhou Y, Wu C, Dai R, Koh SWC, Abdullah A, Hee NKY, Tan HC, Liew ZH, Tien CS, Kao SL, Lim AYL, Mok SF, Sun L, Gu J, Wu L, Li T, Cheng D, Wang Z, Qin Y, Dai L, Meng Z, Shu J, Lu Y, Jiang N, Hu T, Huang S, Huang G, Yu S, Liu D, Ma W, Guo M, Guan X, Yang X, Bascaran C, Cleland CR, Bao Y, Ekinci EI, Jenkins A, Chan JCN, Bee YM, Sivaprasad S, Shaw JE, Sim\u00f3 R, Keane PA, Cheng C, Tan GSW, Jia W, Tham Y, Li H, Sheng B, Wong TY (2024) Integrated image-based deep learning and language models for primary diabetes care. Nat Med, 30:2886-2896.10.1038\/s41591-024-03139-8","DOI":"10.1038\/s41591-024-03139-8"},{"key":"2132_CR171","first-page":"1","volume":"18","author":"L Wang","year":"2024","unstructured":"Wang L, Ma C, Feng X, Zhang Z, Yang H, Zhang J, Chen Z, Tang J, Chen X, Lin Y (2024) A survey on large language model based autonomous agents. Front Comput Sci, 18:1\u201326","journal-title":"Front Comput Sci"},{"key":"2132_CR172","unstructured":"Gangavarapu A, Gangavarapu A (2024) IMAS: A Comprehensive Agentic Approach to Rural Healthcare Delivery. arXiv e-prints:2410-12868.10.48550\/arXiv.2410.12868"},{"key":"2132_CR173","unstructured":"Sumers TR, Yao S, Narasimhan K, Griffiths TL (2023) Cognitive Architectures for Language Agents. arXiv e-prints:2309-2427.10.48550\/arXiv.2309.02427"},{"key":"2132_CR174","doi-asserted-by":"crossref","unstructured":"Guo T, Chen X, Wang Y, Chang R, Pei S, Chawla NV, Wiest O, Zhang X (2024) Large Language Model Based Multi-agents: A Survey of Progress and Challenges. pp. 8048\u20138057;8048\u20138057.","DOI":"10.24963\/ijcai.2024\/890"},{"key":"2132_CR175","unstructured":"Zhou W, Ou Y, Ding S, Li L, Wu J, Wang T, Chen J, Wang S, Xu X, Zhang N, Chen H, Jiang YE (2024) Symbolic Learning Enables Self-Evolving Agents. arXiv e-prints:2406-18532.10.48550\/arXiv.2406.18532"},{"key":"2132_CR176","doi-asserted-by":"crossref","unstructured":"Zhou J, Chen X, Gao X (2023) Path to medical agi: Unify domain-specific medical llms with the lowest cost. medRxiv:2023\u20132026","DOI":"10.1101\/2023.06.23.23291802"},{"key":"2132_CR177","unstructured":"Boiko DA, MacKnight R, Gomes G (2023) Emergent autonomous scientific research capabilities of large language models. arXiv preprint arXiv:2304.05332"},{"key":"2132_CR178","doi-asserted-by":"crossref","unstructured":"Tang X, Zou A, Zhang Z, Li Z, Zhao Y, Zhang X, Cohan A, Gerstein M (2024) MedAgents: Large Language Models as Collaborators for Zero-shot Medical Reasoning. In; 1990\/8\/1; Bangkok, Thailand. Association for Computational Linguistics: 599\u2013621.","DOI":"10.18653\/v1\/2024.findings-acl.33"},{"key":"2132_CR179","unstructured":"Zhu Y, Wei S, Wang X, Xue K, Zhang X, Zhang S (2024) MeNTi: Bridging Medical Calculator and LLM Agent with Nested Tool Calling. arXiv e-prints:2410-13610.10.48550\/arXiv.2410.13610"},{"key":"2132_CR180","unstructured":"Wei J, Yang D, Li Y, Xu Q, Chen Z, Li M, Jiang Y, Hou X, Zhang L (2024) MedAide: Towards an Omni Medical Aide via Specialized LLM-based Multi-Agent Collaboration. arXiv e-prints:2410-12532.10.48550\/arXiv.2410.12532"},{"key":"2132_CR181","unstructured":"Su X, Wang Y, Gao S, Liu X, Giunchiglia V, Clevert D, Zitnik M (2024) Knowledge Graph Based Agent for Complex, Knowledge-Intensive QA in Medicine. arXiv e-prints:2410-4660.10.48550\/arXiv.2410.04660"},{"key":"2132_CR182","unstructured":"Han S, Choi W (2024) Development of a Large Language Model-based Multi-Agent Clinical Decision Support System for Korean Triage and Acuity Scale (KTAS)-Based Triage and Treatment Planning in Emergency Departments. arXiv e-prints:2408-7531.10.48550\/arXiv.2408.07531"},{"key":"2132_CR183","doi-asserted-by":"crossref","unstructured":"Bani-Harouni D, Navab N, Keicher M (2025) MAGDA: Multi-agent Guideline-Driven Diagnostic Assistance. In; Cham. Springer Nature Switzerland: 163\u2013172.","DOI":"10.1007\/978-3-031-73471-7_17"},{"key":"2132_CR184","unstructured":"Li J, Wang S, Zhang M, Li W, Lai Y, Kang X, Ma W, Liu Y (2024) Agent Hospital: A Simulacrum of Hospital with Evolvable Medical Agents. arXiv preprint arXiv:2405.02957"},{"key":"2132_CR185","doi-asserted-by":"crossref","first-page":"186","DOI":"10.2340\/00015555-1906","volume":"95","author":"A B\u00f6RVE","year":"2015","unstructured":"B\u00f6RVE A, Gyllencreutz JD, Terstappen K, Backman EJ, Aldenbratt A, Danielsson M, Gillstedt M, Sandberg C, Paoli J (2015) Smartphone teledermoscopy referrals: a novel process for improved triage of skin cancer patients. Acta Derm Venereol, 95:186\u2013190","journal-title":"Acta Derm Venereol"}],"container-title":["Journal of Medical Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10916-024-02132-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10916-024-02132-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10916-024-02132-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,14]],"date-time":"2025-01-14T07:29:31Z","timestamp":1736839771000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10916-024-02132-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,27]]},"references-count":185,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2024,12]]}},"alternative-id":["2132"],"URL":"https:\/\/doi.org\/10.1007\/s10916-024-02132-5","relation":{},"ISSN":["1573-689X"],"issn-type":[{"value":"1573-689X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12,27]]},"assertion":[{"value":"16 September 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 December 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 December 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"Not applicable.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Informed Consent"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics Approval"}},{"value":"The authors declare no competing interests.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing Interests"}}],"article-number":"112"}}