{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,8]],"date-time":"2026-05-08T15:57:10Z","timestamp":1778255830025,"version":"3.51.4"},"reference-count":190,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2023,12,1]],"date-time":"2023-12-01T00:00:00Z","timestamp":1701388800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"Research Grants Council (RGC) of Hong Kong SAR","award":["ECS24211020"],"award-info":[{"award-number":["ECS24211020"]}]},{"name":"Research Grants Council (RGC) of Hong Kong SAR","award":["GRF14203821"],"award-info":[{"award-number":["GRF14203821"]}]},{"name":"Research Grants Council (RGC) of Hong Kong SAR","award":["GRF14216222"],"award-info":[{"award-number":["GRF14216222"]}]},{"name":"Innovation and Technology Fund (ITF) of Hong Kong SAR","award":["ITS\/240\/21"],"award-info":[{"award-number":["ITS\/240\/21"]}]},{"name":"Science, Technology and Innovation Commission (STIC) of Shenzhen Municipality","award":["SGDX20220530111005039"],"award-info":[{"award-number":["SGDX20220530111005039"]}]},{"name":"Bill &amp; Melinda Gates Foundation","award":["OPP1171395"],"award-info":[{"award-number":["OPP1171395"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE J. Biomed. Health Inform."],"published-print":{"date-parts":[[2023,12]]},"DOI":"10.1109\/jbhi.2023.3316750","type":"journal-article","created":{"date-parts":[[2023,9,22]],"date-time":"2023-09-22T17:55:25Z","timestamp":1695405325000},"page":"6074-6087","source":"Crossref","is-referenced-by-count":200,"title":["Large AI Models in Health Informatics: Applications, Challenges, and the Future"],"prefix":"10.1109","volume":"27","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4166-3428","authenticated-orcid":false,"given":"Jianing","family":"Qiu","sequence":"first","affiliation":[{"name":"Precision Robotics (Hong Kong) Ltd., Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6369-2663","authenticated-orcid":false,"given":"Lin","family":"Li","sequence":"additional","affiliation":[{"name":"Department of Informatics, King&#x0027;s College London, London, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5633-1739","authenticated-orcid":false,"given":"Jiankai","family":"Sun","sequence":"additional","affiliation":[{"name":"School of Engineering, Stanford University, Stanford, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2209-0348","authenticated-orcid":false,"given":"Jiachuan","family":"Peng","sequence":"additional","affiliation":[{"name":"Department of Engineering Science, University of Oxford, Oxford, U.K."}]},{"given":"Peilun","family":"Shi","sequence":"additional","affiliation":[{"name":"Department of Biomedical Engineering, The Chinese University of Hong Kong, Hong Kong"}]},{"given":"Ruiyang","family":"Zhang","sequence":"additional","affiliation":[{"name":"Precision Robotics (Hong Kong) Ltd., Hong Kong"}]},{"given":"Yinzhao","family":"Dong","sequence":"additional","affiliation":[{"name":"Faculty of Engineering, The University of Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6407-4912","authenticated-orcid":false,"given":"Kyle","family":"Lam","sequence":"additional","affiliation":[{"name":"Department of Surgery and Cancer, Imperial College London, London, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0358-6567","authenticated-orcid":false,"given":"Frank P.-W.","family":"Lo","sequence":"additional","affiliation":[{"name":"Hamlyn Centre for Robotic Surgery, Imperial College London, London, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9361-4340","authenticated-orcid":false,"given":"Bo","family":"Xiao","sequence":"additional","affiliation":[{"name":"Hamlyn Centre for Robotic Surgery, Imperial College London, London, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9405-519X","authenticated-orcid":false,"given":"Wu","family":"Yuan","sequence":"additional","affiliation":[{"name":"Department of Biomedical Engineering, The Chinese University of Hong Kong, Hong Kong"}]},{"given":"Ningli","family":"Wang","sequence":"additional","affiliation":[{"name":"Beijing Tongren Eye Center, Beijing Tongren Hospital, Capital Medical University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4809-0514","authenticated-orcid":false,"given":"Dong","family":"Xu","sequence":"additional","affiliation":[{"name":"Department of Electrical Engineering, University of Missouri, Columbia, MO, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5080-108X","authenticated-orcid":false,"given":"Benny","family":"Lo","sequence":"additional","affiliation":[{"name":"Facualty of Medicine, Imperial College London, London, U.K."}]}],"member":"263","reference":[{"key":"ref57","first-page":"25278","article-title":"LAION-5B: An open large-scale dataset for training next generation image-text models","author":"schuhmann","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref56","article-title":"Florence: A new foundation model for computer vision","author":"yuan","year":"2021"},{"key":"ref59","article-title":"SimVLM: Simple visual language model pretraining with weak supervision","author":"wang","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01519"},{"key":"ref53","article-title":"GPipe: Efficient training of giant neural networks using pipeline parallelism","volume":"32","author":"huang","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref52","article-title":"Self-supervised pretraining of visual features in the wild","author":"goyal","year":"2021"},{"key":"ref55","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","author":"jia","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref168","article-title":"How your data is used to improve model performance","year":"2023"},{"key":"ref54","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","author":"radford","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref169","article-title":"March 20 chatGPT outage: Here's what happened","year":"2023"},{"key":"ref170","article-title":"More than you've asked for: A comprehensive analysis of novel prompt injection threats to application-integrated large language models","author":"greshake","year":"2023"},{"key":"ref177","article-title":"Aligning AI with shared human values","author":"hendrycks","year":"2020"},{"key":"ref178","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.301"},{"key":"ref51","first-page":"10096","article-title":"EfficientNetV2: Smaller models and faster training","author":"tan","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref175","article-title":"The self-perception and political biases of chatGPT","author":"rutinowski","year":"2023"},{"key":"ref50","first-page":"6105","article-title":"Efficientnet: Rethinking model scaling for convolutional neural networks","author":"tan","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref176","article-title":"Exploring ai ethics of chatgpt: A diagnostic analysis","author":"zhuo","year":"2023"},{"key":"ref173","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-020-0288-5"},{"key":"ref174","doi-asserted-by":"publisher","DOI":"10.1056\/NEJMp1714229"},{"key":"ref171","doi-asserted-by":"publisher","DOI":"10.2105\/AJPH.2015.302903"},{"key":"ref172","doi-asserted-by":"publisher","DOI":"10.1126\/science.aax2342"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01385"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1088\/1742-5468\/ac9830"},{"key":"ref47","first-page":"3965","article-title":"CoAtNet: Marrying convolution and attention for all data sizes","volume":"34","author":"dai","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref41","first-page":"15908","article-title":"Transformer in transformer","volume":"34","author":"han","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref44","first-page":"7480","article-title":"Scaling vision transformers to 22 billion parameters","author":"dehghani","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref179","article-title":"Discovering the hidden vocabulary of dalle-2","author":"daras","year":"2022"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01170"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58558-7_29"},{"key":"ref8","article-title":"Improving language understanding by generative pre-training","author":"radford","year":"2018"},{"key":"ref180","article-title":"Investigating the existence of &#x201C;secret language&#x201D;in language models","author":"wang","year":"2023"},{"key":"ref7","article-title":"On the opportunities and risks of foundation models","author":"bommasani","year":"2021"},{"key":"ref181","first-page":"22199","article-title":"Large language models are zero-shot reasoners","volume":"35","author":"kojima","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref9","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2018"},{"key":"ref4","article-title":"Gpt-4 technical report","year":"2023"},{"key":"ref3","article-title":"Attention is all you need","author":"vaswani","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1001\/jama.2016.17216"},{"key":"ref5","author":"lee","year":"2023","journal-title":"The AI Revolution in Medicine GPT-4 and Beyond"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-023-00840-9"},{"key":"ref188","article-title":"Understanding artificial intelligence ethics and safety","author":"leslie","year":"2019"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1038\/s41598-020-62922-y"},{"key":"ref189","article-title":"Metadata archaeology: Unearthing data subsets by leveraging training dynamics","author":"siddiqui","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref40","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"dosovitskiy","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref186","article-title":"A path towards autonomous machine intelligence version 0.9.2,2022-06-27","volume":"62","author":"lecun","year":"2022","journal-title":"Open Review"},{"key":"ref187","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"},{"key":"ref184","doi-asserted-by":"publisher","DOI":"10.1109\/MC.2022.3148714"},{"key":"ref185","doi-asserted-by":"publisher","DOI":"10.1016\/j.clsr.2017.08.007"},{"key":"ref182","article-title":"A mathematical framework for transformer circuits","volume":"1","author":"elhage","year":"2021","journal-title":"Transformer Circuits Thread"},{"key":"ref183","article-title":"Carbon emissions and large neural network training","author":"patterson","year":"2021"},{"key":"ref35","first-page":"1691","article-title":"Generative pretraining from pixels","author":"chen","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00088"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01212"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01499"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.97"},{"key":"ref148","article-title":"ChatGPT for robotics: Design principles and model abilities","author":"vemprala","year":"2023"},{"key":"ref30","article-title":"ImageNet-21 K pretraining for the masses","author":"ridnik","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref149","article-title":"A generalist agent","author":"reed","year":"2022","journal-title":"Trans Mach Learn Res"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01216-8_12"},{"key":"ref146","doi-asserted-by":"publisher","DOI":"10.3390\/robotics11060127"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01179"},{"key":"ref147","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-43996-4_27"},{"key":"ref39","first-page":"1597","article-title":"A simple framework for contrastive learning of visual representations","author":"chen","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref155","article-title":"Rt-1: Robotics transformer for real-world control at scale","author":"brohan","year":"2022"},{"key":"ref156","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-023-00626-4"},{"key":"ref153","article-title":"Palm-e: An embodied multimodal language model","author":"driess","year":"2023"},{"key":"ref154","article-title":"Vima: General robot manipulation with multimodal prompts","author":"jiang","year":"2022"},{"key":"ref151","article-title":"Perceiver-actor: A multi-task transformer for robotic manipulation","author":"shridhar","year":"2022"},{"key":"ref152","article-title":"Do as I can and not as I say: Grounding language in robotic affordances","author":"ahn","year":"2022"},{"key":"ref150","first-page":"894","article-title":"CLIPort: What and where pathways for robotic manipulation","author":"shridhar","year":"0","journal-title":"Proc Conf Robot Learn"},{"key":"ref24","article-title":"Deep reinforcement learning from human preferences","author":"christiano","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref23","first-page":"2206","article-title":"Improving language models by retrieving from trillions of tokens","author":"borgeaud","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref26","article-title":"Improving alignment of dialogue agents via targeted human judgements","author":"glaese","year":"2022"},{"key":"ref25","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017"},{"key":"ref20","article-title":"Scaling language models: Methods, analysis & insights from training gopher","author":"rae","year":"2021"},{"key":"ref159","doi-asserted-by":"publisher","DOI":"10.1056\/NEJMsr2214184"},{"key":"ref22","article-title":"Chain of thought prompting elicits reasoning in large language models","author":"wei","year":"2022"},{"key":"ref157","doi-asserted-by":"publisher","DOI":"10.1038\/s41591-023-02412-6"},{"key":"ref21","article-title":"Multitask prompted training enables zero-shot task generalization","author":"sanh","year":"2021"},{"key":"ref158","article-title":"In chatGPT we trust? measuring and characterizing the reliability of chatGPT","author":"shen","year":"2023"},{"key":"ref28","article-title":"How transferable are features in deep neural networks?","author":"yosinski","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref27","article-title":"Tuning computer vision models with task rewards","author":"pinto","year":"2023"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref166","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.41"},{"key":"ref167","article-title":"Are diffusion models vulnerable to membership inference attacks?","author":"duan","year":"2023"},{"key":"ref164","article-title":"Quantifying memorization across neural language models","author":"carlini","year":"2022"},{"key":"ref165","doi-asserted-by":"crossref","DOI":"10.18653\/v1\/2023.findings-emnlp.272","article-title":"Multi-step jailbreaking privacy attacks on chatGPT","author":"li","year":"2023"},{"key":"ref162","article-title":"Data augmentation alone can improve adversarial training","author":"li","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref163","first-page":"2633","article-title":"Extracting training data from large language models","volume":"6","author":"carlini","year":"0","journal-title":"Proc Usenix Secur Symp"},{"key":"ref160","article-title":"Capabilities of GPT-4 on medical challenge problems","author":"nori","year":"2023"},{"key":"ref161","article-title":"On the robustness of chatGPT: An adversarial and out-of-distribution perspective","author":"wang","year":"2023"},{"key":"ref13","article-title":"Palm: Scaling language modeling with pathways","author":"chowdhery","year":"2022"},{"key":"ref12","article-title":"Scaling instruction-finetuned language models","author":"chung","year":"2022"},{"key":"ref15","article-title":"Using deepspeed and megatron to train megatron-turing nlg 530b, a large-scale generative language model","author":"smith","year":"2022"},{"key":"ref128","article-title":"Can large language models reason about medical questions?","author":"li\u00e9vin","year":"2022"},{"key":"ref14","article-title":"Training compute-optimal large language models","author":"hoffmann","year":"2022"},{"key":"ref129","doi-asserted-by":"publisher","DOI":"10.1016\/S2589-7500(23)00021-3"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.7759\/cureus.40895"},{"key":"ref126","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.130"},{"key":"ref96","article-title":"ChatCAD : Towards a universal and reliable interactive CAD using LLMS","author":"zhao","year":"2023"},{"key":"ref127","article-title":"Towards expert-level medical question answering with large language models","author":"singhal","year":"2023"},{"key":"ref11","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"touvron","year":"2023"},{"key":"ref99","article-title":"HuaTuo: Tuning LLaMA model with Chinese medical knowledge","author":"wang","year":"2023"},{"key":"ref124","article-title":"MedAlpaca&#x2013;An open-source collection of medical conversational AI models and training data","author":"han","year":"2023"},{"key":"ref10","article-title":"Llama: Open and efficient foundation language models","author":"touvron","year":"2023"},{"key":"ref98","article-title":"XrayGPT: Chest radiographs summarization using medical vision-language models","author":"thawkar","year":"2023"},{"key":"ref125","article-title":"Emergent abilities of large language models","author":"wei","year":"2022"},{"key":"ref17","article-title":"LaMDA: Language models for dialog applications","author":"thoppilan","year":"2022"},{"key":"ref16","article-title":"Bloom: A 176b-parameter open-access multilingual language model","author":"scao","year":"2022"},{"key":"ref19","article-title":"Training language models to follow instructions with human feedback","author":"ouyang","year":"2022"},{"key":"ref18","article-title":"OPT: Open pre-trained transformer language models","author":"zhang","year":"2022"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1038\/s41594-021-00714-2"},{"key":"ref133","article-title":"Chataug: Leveraging chatGPT for text data augmentation","author":"dai","year":"2023"},{"key":"ref92","article-title":"E2Efold-3D: End-to-end deep learning method for accurate de novo RNA 3D structure prediction","author":"shen","year":"2022"},{"key":"ref134","article-title":"DetectGPT: Zero-shot machine-generated text detection using probability curvature","author":"mitchell","year":"2023"},{"key":"ref95","article-title":"ChatCAD: Interactive computer-aided diagnosis on medical image using large language models","author":"wang","year":"2023"},{"key":"ref131","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pdig.0000198"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1038\/s41551-022-00936-9"},{"key":"ref132","doi-asserted-by":"publisher","DOI":"10.15302\/J-QB-023-0327"},{"key":"ref130","article-title":"Fairway health - process prior authorization faster","year":"2023"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1093\/nar\/gkab1074"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1093\/nar\/gkaa921"},{"key":"ref89","article-title":"Interpretable RNA foundation model from unannotated data for highly accurate rna structure and function predictions","author":"chen","year":"2022","journal-title":"BioRxiv"},{"key":"ref139","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2023.3243999"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1126\/science.abj8754"},{"key":"ref137","doi-asserted-by":"publisher","DOI":"10.2196\/19273"},{"key":"ref85","article-title":"High-resolution de novo structure prediction from primary sequence","author":"wu","year":"2022","journal-title":"BioRxiv"},{"key":"ref138","doi-asserted-by":"publisher","DOI":"10.1109\/BHI56158.2022.9926927"},{"key":"ref88","article-title":"Ankh: Optimized protein language model unlocks general-purpose modelling","author":"elnaggar","year":"2023","journal-title":"BioRxiv"},{"key":"ref135","article-title":"Pangu drug model: Learn a molecule like a human","author":"lin","year":"2022","journal-title":"BioRxiv"},{"key":"ref87","article-title":"xTrimoPGLM: Unified 100b-scale pre-trained transformer for deciphering the language of protein","author":"chen","year":"2023","journal-title":"BioRxiv"},{"key":"ref136","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-021-00464-x"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-018-04964-5"},{"key":"ref144","doi-asserted-by":"publisher","DOI":"10.3390\/s22082861"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3095381"},{"key":"ref145","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3188101"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1126\/science.ade2574"},{"key":"ref142","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-023-06185-3"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1093\/bioinformatics\/btu739"},{"key":"ref143","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-43996-4_10"},{"key":"ref140","doi-asserted-by":"publisher","DOI":"10.1016\/S0140-6736(19)32497-3"},{"key":"ref141","article-title":"ClimaX: A foundation model for weather and climate","author":"nguyen","year":"2023"},{"key":"ref80","doi-asserted-by":"crossref","DOI":"10.1101\/2020.03.07.982272","article-title":"Progen: Language modeling for protein generation","author":"madani","year":"2020"},{"key":"ref79","article-title":"Protein complex prediction with alphafold-multimer","author":"evans","year":"2021","journal-title":"BioRxiv"},{"key":"ref108","article-title":"Adapting pretrained vision-language foundational models to medical imaging domains","author":"chambon","year":"2022"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-021-03819-2"},{"key":"ref109","article-title":"Stu-Net: Scalable and transferable medical image segmentation models empowered by large-scale supervised pre-training","author":"huang","year":"2023"},{"key":"ref106","doi-asserted-by":"publisher","DOI":"10.1038\/s41591-023-02504-3"},{"key":"ref107","article-title":"Med3D: Transfer learning for 3D medical image analysis","author":"chen","year":"2019"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1016\/j.tibs.2014.10.005"},{"key":"ref104","article-title":"Medical SAM adapter: Adapting segment anything model for medical image segmentation","author":"wu","year":"2023"},{"key":"ref74","article-title":"Meta-transformer: A unified framework for multimodal learning","author":"zhang","year":"2023"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.256"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1107\/S2059798317016709"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-021-00455-y"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1038\/nsb1101-923"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.3390\/diagnostics13111947"},{"key":"ref2","doi-asserted-by":"crossref","DOI":"10.1109\/ICCV51070.2023.00371","article-title":"Segment anything","author":"kirillov","year":"2023"},{"key":"ref1","article-title":"ChatGPT: Optimizing language models for dialogue","year":"2022"},{"key":"ref190","article-title":"A survey of safety and trustworthiness of large language models through the lens of verification and validation","author":"huang","year":"2023"},{"key":"ref71","first-page":"2256","article-title":"Deep unsupervised learning using nonequilibrium thermodynamics","author":"sohl-dickstein","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref111","article-title":"Pubmed abstract","year":"2023"},{"key":"ref70","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","author":"saharia","year":"2022"},{"key":"ref112","article-title":"Pubmed central","year":"2023"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01457"},{"key":"ref72","article-title":"Visual chatGPT: Talking, drawing and editing with visual foundation models","author":"wu","year":"2023"},{"key":"ref110","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"brown","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref119","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.551"},{"key":"ref67","article-title":"Hierarchical text-conditional image generation with clip latents","author":"ramesh","year":"2022"},{"key":"ref117","first-page":"143","article-title":"Bioelectra: Pretrained biomedical text encoder using discriminators","author":"kanakarajan","year":"0","journal-title":"Proc 20th Workshop Biomed Lang Process"},{"key":"ref69","first-page":"16784","article-title":"GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models","author":"nichol","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref118","first-page":"1","article-title":"Domain-specific language model pretraining for biomedical natural language processing","volume":"3","author":"gu","year":"2021","journal-title":"Health"},{"key":"ref64","article-title":"Combined scaling for open-vocabulary image classification","author":"pham","year":"2021"},{"key":"ref115","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.379"},{"key":"ref63","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","author":"li","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref116","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.740"},{"key":"ref66","article-title":"Scaling autoregressive models for content-rich text-to-image generation","author":"yu","year":"2022","journal-title":"Trans Mach Learn Res"},{"key":"ref113","doi-asserted-by":"publisher","DOI":"10.1093\/bioinformatics\/btz682"},{"key":"ref65","first-page":"8821","article-title":"Zero-shot text-to-image generation","author":"ramesh","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref114","article-title":"Publicly available clinical BERT embeddings","author":"alsentzer","year":"2019"},{"key":"ref60","article-title":"PaLI: A jointly-scaled multilingual language-image model","author":"chen","year":"2022"},{"key":"ref122","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-022-00742-2"},{"key":"ref123","article-title":"LoRA: Low-rank adaptation of large language models","author":"hu","year":"2021"},{"key":"ref62","article-title":"Language is not all you need: Aligning perception with language models","author":"huang","year":"2023"},{"key":"ref120","doi-asserted-by":"publisher","DOI":"10.1093\/bib\/bbac409"},{"key":"ref61","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","author":"li","year":"2023"},{"key":"ref121","article-title":"Large language models encode clinical knowledge","author":"singhal","year":"2022"}],"container-title":["IEEE Journal of Biomedical and Health Informatics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6221020\/10345388\/10261199.pdf?arnumber=10261199","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T19:39:20Z","timestamp":1732736360000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10261199\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12]]},"references-count":190,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/jbhi.2023.3316750","relation":{},"ISSN":["2168-2194","2168-2208"],"issn-type":[{"value":"2168-2194","type":"print"},{"value":"2168-2208","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,12]]}}}