{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T17:14:01Z","timestamp":1776100441717,"version":"3.50.1"},"reference-count":45,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100000266","name":"Engineering & Physical Sciences Research Council (EPSRC) Center for Doctoral Training in Health Data Science","doi-asserted-by":"publisher","award":["EP\/S02428X\/1"],"award-info":[{"award-number":["EP\/S02428X\/1"]}],"id":[{"id":"10.13039\/501100000266","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100014461","name":"National Institute for Health and Care Research \u201cAI for Health and Social Care Award\u201d","doi-asserted-by":"publisher","award":["NIHR-AI-AWARD0-2183"],"award-info":[{"award-number":["NIHR-AI-AWARD0-2183"]}],"id":[{"id":"10.13039\/100014461","id-type":"DOI","asserted-by":"publisher"}]},{"name":"GlaxoSmithKline"},{"DOI":"10.13039\/501100000833","name":"Rosetrees","doi-asserted-by":"crossref","award":["A2926"],"award-info":[{"award-number":["A2926"]}],"id":[{"id":"10.13039\/501100000833","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2024,11]]},"DOI":"10.1109\/tnnls.2023.3294633","type":"journal-article","created":{"date-parts":[[2023,8,11]],"date-time":"2023-08-11T17:27:03Z","timestamp":1691774823000},"page":"16453-16463","source":"Crossref","is-referenced-by-count":27,"title":["Clinical Prompt Learning With Frozen Language Models"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6682-334X","authenticated-orcid":false,"given":"Niall","family":"Taylor","sequence":"first","affiliation":[{"name":"Department of Psychiatry, University of Oxford, Oxford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0523-3877","authenticated-orcid":false,"given":"Yi","family":"Zhang","sequence":"additional","affiliation":[{"name":"Department of Psychiatry, University of Oxford, Oxford, U.K."}]},{"given":"Dan W.","family":"Joyce","sequence":"additional","affiliation":[{"name":"Department of Primary Care and Mental Health, University of Liverpool, Liverpool, U.K."}]},{"given":"Ziming","family":"Gao","sequence":"additional","affiliation":[{"name":"Department of Psychiatry, University of Oxford, Oxford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3555-9181","authenticated-orcid":false,"given":"Andrey","family":"Kormilitzin","sequence":"additional","affiliation":[{"name":"Department of Psychiatry, University of Oxford, Oxford, U.K."}]},{"given":"Alejo","family":"Nevado-Holgado","sequence":"additional","affiliation":[{"name":"Department of Psychiatry, University of Oxford, Oxford, U.K."}]}],"member":"263","reference":[{"key":"ref1","article-title":"The pile: An 800GB dataset of diverse text for language modeling","author":"Gao","year":"2021","journal-title":"arXiv:2101.00027"},{"key":"ref2","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proc. Conf. North Amer. Chapter Assoc. Comput. Linguistics, Hum. Lang. Technol.","volume":"1","author":"Devlin"},{"key":"ref3","first-page":"140","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref4","article-title":"Language models are few-shot learners","author":"Brown","year":"2020","journal-title":"arXiv:2005.14165"},{"key":"ref5","first-page":"276","article-title":"What does BERT look at? An analysis of BERT\u2019s attention","volume-title":"Proc. ACL Workshop BlackboxNLP, Analyzing Interpreting Neural Netw. (NLP)","author":"Clark"},{"key":"ref6","article-title":"PaLM: Scaling language modeling with pathways","author":"Chowdhery","year":"2022","journal-title":"arXiv:2204.02311"},{"key":"ref7","article-title":"GPT-4 technical report","volume-title":"OpenAI","year":"2023"},{"key":"ref8","first-page":"38","article-title":"Transformers: State-of-the-art natural language processing","volume-title":"Proc. Conf. Empirical Methods Natural Lang. Syst. Demonstrations","author":"Wolf"},{"key":"ref9","first-page":"854","article-title":"Robust transfer learning with pretrained language models through adapters","volume-title":"Proc. 59th Annu. Meeting Assoc. Comput. Linguistics 11th Int. Joint Conf. Natural Lang. Process.","author":"Han"},{"key":"ref10","article-title":"Lightweight transformers for clinical natural language processing","author":"Rohanian","year":"2023","journal-title":"arXiv:2302.04725"},{"key":"ref11","article-title":"BioBERT: A pre-trained biomedical language representation model for biomedical text mining","author":"Lee","year":"2019","journal-title":"arXiv:1901.08746"},{"key":"ref12","article-title":"ClinicalBERT: Modeling clinical notes and predicting hospital readmission","author":"Huang","year":"2019","journal-title":"arXiv:1904.05342"},{"key":"ref13","article-title":"Do we still need clinical language models?","author":"Lehman","year":"2023","journal-title":"arXiv:2302.08091"},{"key":"ref14","first-page":"72","article-title":"Publicly available clinical BERT embeddings","volume-title":"Proc. 2nd Clin. Natural Lang. Process. Workshop","author":"Alsentzer"},{"key":"ref15","first-page":"58","article-title":"Transfer learning in biomedical natural language processing: An evaluation of BERT and ELMo on ten benchmarking datasets","volume-title":"Proc. 18th BioNLP Workshop Shared Task","author":"Peng"},{"key":"ref16","first-page":"8342","article-title":"Don\u2019t stop pretraining: Adapt language models to domains and tasks","volume-title":"Proc. 58th Annu. Meeting Assoc. Comput. Linguistics","author":"Gururangan"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.3389\/fpsyt.2020.00268"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1192\/bjp.2020.136"},{"key":"ref19","first-page":"7870","article-title":"Recall and learn: Fine-tuning deep pretrained language models with less forgetting","volume-title":"Proc. Conf. Empirical Methods Natural Lang. Process. (EMNLP)","author":"Chen"},{"key":"ref20","first-page":"107","article-title":"Annotation artifacts in natural language inference data","volume-title":"Proc. Conf. North Amer. Chapter Assoc. Comput. Linguistics, Human Lang. Technol.","author":"Gururangan"},{"key":"ref21","first-page":"4658","article-title":"Probing neural network comprehension of natural language arguments","volume-title":"Proc. 57th Annu. Meeting Assoc. Comput. Linguistics","author":"Niven"},{"key":"ref22","article-title":"Few-shot learning for named entity recognition in medical text","author":"Hofer","year":"2018","journal-title":"arXiv:1811.05468"},{"key":"ref23","first-page":"4582","article-title":"Prefix-tuning: Optimizing continuous prompts for generation","volume-title":"Proc. 59th Annu. Meeting Assoc. Comput. Linguistics 11th Int. Joint Conf. Natural Lang. Process.","author":"Li"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"},{"key":"ref25","first-page":"3045","article-title":"The power of scale for parameter-efficient prompt tuning","volume-title":"Proc. Conf. Empirical Methods Natural Lang. Process.","author":"Lester"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.366"},{"key":"ref27","article-title":"P-tuning v2: Prompt tuning can be comparable to fine-tuning universally across scales and tasks","author":"Liu","year":"2021","journal-title":"arXiv:2110.07602"},{"key":"ref28","article-title":"Chain-of-thought prompting elicits reasoning in large language models","author":"Wei","year":"2022","journal-title":"arXiv:2201.11903"},{"key":"ref29","article-title":"Reflexion: An autonomous agent with dynamic memory and self-reflection","author":"Shinn","year":"2023","journal-title":"arXiv:2303.11366"},{"key":"ref30","first-page":"1","article-title":"Multitask prompted training enables zero-shot task generalization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Sanh"},{"key":"ref31","article-title":"GPT-3 models are poor few-shot learners in the biomedical domain","author":"Moradi","year":"2021","journal-title":"arXiv:2109.02555"},{"key":"ref32","article-title":"HealthPrompt: A zero-shot learning paradigm for clinical natural language processing","author":"Sivarajkumar","year":"2022","journal-title":"arXiv:2203.05061"},{"key":"ref33","article-title":"Parameter-efficient transfer learning for NLP","author":"Houlsby","year":"2019","journal-title":"arXiv:1902.00751"},{"key":"ref34","article-title":"LoRA: Low-rank adaptation of large language models","author":"Hu","year":"2021","journal-title":"arXiv:2106.09685"},{"key":"ref35","first-page":"255","article-title":"Exploiting cloze-questions for few-shot text classification and natural language inference","volume-title":"Proc. 16th Conf. Eur. Chapter Assoc. Comput. Linguistics","author":"Schick"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1038\/sdata.2016.35"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.eacl-main.75"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-short.91"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1145\/3368555.3384469"},{"key":"ref40","first-page":"26","article-title":"What\u2019s in a note? Unpacking predictive value in clinical note representations","volume":"2018","author":"Boag","year":"2018","journal-title":"AMIA Summits Transl. Sci. Proc."},{"key":"ref41","first-page":"5721","article-title":"Template-free prompt tuning for few-shot NER","volume-title":"Proc. Conf. North Amer. Chapter Assoc. Comput. Linguistics, Human Lang. Technol.","author":"Ma"},{"key":"ref42","article-title":"OpenPrompt: An open-source framework for prompt-learning","author":"Ding","year":"2021","journal-title":"arXiv:2111.01998"},{"key":"ref43","first-page":"4596","article-title":"Adafactor: Adaptive learning rates with sublinear memory cost","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","volume":"80","author":"Shazeer"},{"key":"ref44","first-page":"1","article-title":"Decoupled weight decay regularization","volume-title":"Proc. 7th Int. Conf. Learn. Represent. (ICLR)","author":"Loshchilov"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1145\/3458754"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10737991\/10215061.pdf?arnumber=10215061","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T19:23:49Z","timestamp":1732735429000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10215061\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11]]},"references-count":45,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2023.3294633","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11]]}}}