{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,29]],"date-time":"2026-03-29T17:30:48Z","timestamp":1774805448483,"version":"3.50.1"},"reference-count":48,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2024,2,29]],"date-time":"2024-02-29T00:00:00Z","timestamp":1709164800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,2,29]],"date-time":"2024-02-29T00:00:00Z","timestamp":1709164800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Healthc Inform Res"],"published-print":{"date-parts":[[2024,6]]},"DOI":"10.1007\/s41666-024-00162-9","type":"journal-article","created":{"date-parts":[[2024,2,29]],"date-time":"2024-02-29T16:02:28Z","timestamp":1709222548000},"page":"206-224","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":24,"title":["Prompt Tuning in Biomedical Relation Extraction"],"prefix":"10.1007","volume":"8","author":[{"given":"Jianping","family":"He","sequence":"first","affiliation":[]},{"given":"Fang","family":"Li","sequence":"additional","affiliation":[]},{"given":"Jianfu","family":"Li","sequence":"additional","affiliation":[]},{"given":"Xinyue","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Yi","family":"Nian","sequence":"additional","affiliation":[]},{"given":"Yang","family":"Xiang","sequence":"additional","affiliation":[]},{"given":"Jingqi","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Qiang","family":"Wei","sequence":"additional","affiliation":[]},{"given":"Yiming","family":"Li","sequence":"additional","affiliation":[]},{"given":"Hua","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Cui","family":"Tao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,2,29]]},"reference":[{"key":"162_CR1","unstructured":"SyTrue (2015) Why unstructured data holds the key to intelligent healthcare systems. Consultant HIT. https:\/\/hitconsultant.net\/2015\/03\/31. Accessed 24 Jun 2023"},{"key":"162_CR2","doi-asserted-by":"publisher","unstructured":"Lim S, Kang J (2018) Chemical\u2013gene relation extraction using recursive neural network. Database. https:\/\/doi.org\/10.1093\/database\/bay060","DOI":"10.1093\/database\/bay060"},{"key":"162_CR3","unstructured":"Zelenko D, Aone C, Richardella A (2003) Kernel methods for relation extraction.\u00a0J Mach Learn Res\u00a03:1083\u20131106"},{"key":"162_CR4","doi-asserted-by":"publisher","unstructured":"Nasar Z, Jaffry SW, Malik MK (2021) Named entity recognition and relation extraction: state-of-the-art. ACM. Comput Surv.\u00a0https:\/\/doi.org\/10.1145\/3445965","DOI":"10.1145\/3445965"},{"key":"162_CR5","doi-asserted-by":"publisher","unstructured":"Shi Y, Xiao Y, Quan P, Lei M, Niu L (2021) Distant supervision relation extraction via adaptive dependency-path and additional knowledge graph supervision. Neural networks: the official journal of the International Neural Network Society. https:\/\/doi.org\/10.1016\/j.neunet.2020.10.012","DOI":"10.1016\/j.neunet.2020.10.012"},{"key":"162_CR6","doi-asserted-by":"publisher","unstructured":"Devlin J, Chang MW, Lee K, Toutanova K (2019) BERT: Pre-training of deep bidirectional transformers for language understanding. Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies. https:\/\/doi.org\/10.18653\/v1\/N19-1423","DOI":"10.18653\/v1\/N19-1423"},{"key":"162_CR7","doi-asserted-by":"publisher","unstructured":"Liu P, Yuan W, Fu J, Jiang Z, Hayashi H, Neubig G (2023) Pre-train, prompt, and predict: a systematic survey of prompting methods in natural language processing. ACM Comput Surv. https:\/\/doi.org\/10.1145\/3560815","DOI":"10.1145\/3560815"},{"key":"162_CR8","doi-asserted-by":"publisher","unstructured":"Li C, Gao F, Bu J, Xu L, Chen X, Gu Y, Shao Z, Zheng Q, Zhang N, Wang Y, Yu Z (2021) SentiPrompt: sentiment knowledge enhanced prompt-tuning for aspect-based sentiment analysis. arXiv. https:\/\/doi.org\/10.48550\/arXiv.2109.08306","DOI":"10.48550\/arXiv.2109.08306"},{"key":"162_CR9","doi-asserted-by":"publisher","unstructured":"Zheng C, Huang M (2021) Exploring prompt-based few-shot learning for grounded dialog generation. arXiv.\nhttps:\/\/doi.org\/10.48550\/arXiv.2109.06513","DOI":"10.48550\/arXiv.2109.06513"},{"key":"162_CR10","doi-asserted-by":"publisher","unstructured":"Zhong Z, Friedman D, Chen D (2021) Factual probing is [MASK]: learning vs. learning to recall. arXiv.\nhttps:\/\/doi.org\/10.48550\/arXiv.2104.05240","DOI":"10.48550\/arXiv.2104.05240"},{"key":"162_CR11","doi-asserted-by":"publisher","unstructured":"Han X, Zhao W, Ding N, Liu Z, Sun M (2021) PTR: prompt tuning with rules for text classification. arXiv.\nhttps:\/\/doi.org\/10.1016\/j.aiopen.2022.11.003","DOI":"10.1016\/j.aiopen.2022.11.003"},{"key":"162_CR12","doi-asserted-by":"publisher","unstructured":"Schick T, Sch\u00fctze H (2020) Exploiting cloze questions for few shot text classification and natural language inference. arXiv. https:\/\/doi.org\/10.48550\/arXiv.2001.07676","DOI":"10.48550\/arXiv.2001.07676"},{"key":"162_CR13","doi-asserted-by":"publisher","unstructured":"\u00a0Schick T, Schmid H, Sch\u00fctze H (2020) Automatically identifying words that can serve as labels for few-shot text classification. arXiv. https:\/\/doi.org\/10.48550\/arXiv.2010.13641","DOI":"10.48550\/arXiv.2010.13641"},{"key":"162_CR14","unstructured":"dmis-lab (2020) Biobert-large-cased-v1.1. Hugging face. https:\/\/huggingface.co\/dmis-lab\/biobert-large-cased-v1.1. Accessed 15 Oct 2023"},{"key":"162_CR15","unstructured":"bionlp (2020) Bluebert_pubmed_mimic_uncased_L-12_H-768_A-12. Hugging face. https:\/\/huggingface.co\/bionlp\/bluebert_pubmed_mimic_uncased_L-12_H-768_A-12. Accessed 15 Oct 2023"},{"key":"162_CR16","unstructured":"emilyalsentzer (2020) Bio_ClinicalBERT. Hugging face. https:\/\/huggingface.co\/emilyalsentzer\/Bio_ClinicalBERT. Accessed 15 Oct 2023"},{"key":"162_CR17","unstructured":"Microsoft (2021) BiomedNLP-BiomedBERT-base-uncased-abstract-fulltext. hugging face. https:\/\/huggingface.co\/microsoft\/BiomedNLP-BiomedBERT-base-uncased-abstract-fulltext. Accessed 19 Nov 2023"},{"key":"162_CR18","unstructured":"Krallinger M, Rabal O, Akhondi SA, Perez M, Santamaria J, Rodr\u00edguez GP, Tsatsaronis G, Intxaurrondo A, L\u00f3pez JAB, Nandal U, Buel EV, Chandrasekhar A, Rodenburg M, L\u00e6greid A, Doornenbal MA, Oyarz\u00e1bal J, Louren\u00e7o A, Valencia A (2017) Overview of the BioCreative VI chemical-protein interaction track. Semantic Scholar. https:\/\/www.semanticscholar.org\/paper\/Overview-of-the-BioCreative-VI-chemical-protein-Krallinger-Rabal\/eed781f498b563df5a9e8a241c67d63dd1d92ad5. Accessed 15 Oct 2021"},{"key":"162_CR19","doi-asserted-by":"publisher","unstructured":"Herrero-Zazo M, Segura-Bedmar I, Mart\u00ednez P, Declerck T (2013) The DDI corpus: an annotated corpus with pharmacological substances and drug\u2013drug interactions. J Biomed Inform. https:\/\/doi.org\/10.1016\/j.jbi.2013.07.011","DOI":"10.1016\/j.jbi.2013.07.011"},{"key":"162_CR20","doi-asserted-by":"publisher","unstructured":"Li Z, Lin H, Shen C, Zheng W, Yang Z, Wang J (2020) Cross2Self-attentive bidirectional recurrent neural network with BERT for biomedical semantic text similarity. 2020 IEEE International Conference on Bioinformatics and Biomedicine. https:\/\/doi.org\/10.1109\/BIBM49941.2020.9313452","DOI":"10.1109\/BIBM49941.2020.9313452"},{"key":"162_CR21","doi-asserted-by":"publisher","unstructured":"Warikoo N, Chang YC, Hsu WL (2018) LPTK: a linguistic pattern-aware dependency tree kernel approach for the BioCreative VI CHEMPROT task. Database. https:\/\/doi.org\/10.1093\/database\/bay108","DOI":"10.1093\/database\/bay108"},{"key":"162_CR22","doi-asserted-by":"publisher","unstructured":"Ben Abacha A, Chowdhury MFM, Karanasiou A, Mrabet Y, Lavelli A, Zweigenbaum P (2015) Text mining for pharmacovigilance: using machine learning for drug name recognition and drug-drug interaction extraction and classification.\u00a0J Biomed Inform. https:\/\/doi.org\/10.1016\/j.jbi.2015.09.015","DOI":"10.1016\/j.jbi.2015.09.015"},{"key":"162_CR23","doi-asserted-by":"publisher","unstructured":"Corbett P, Boyle J (2018) Improving the learning of chemical-protein interactions from literature using transfer learning and specialized word embeddings. Database. https:\/\/doi.org\/10.1093\/database\/bay066","DOI":"10.1093\/database\/bay066"},{"key":"162_CR24","doi-asserted-by":"publisher","unstructured":"Peng Y, Rios A, Kavuluru R, Lu Z (2018) Extracting chemical\u2013protein relations with ensembles of SVM and deep learning models. Database. https:\/\/doi.org\/10.1093\/database\/bay073","DOI":"10.1093\/database\/bay073"},{"key":"162_CR25","doi-asserted-by":"publisher","unstructured":"Liu S, Shen F, Komandur Elayavilli R, Wang Y, Rastegar-Mojarad M, Chaudhary V, Liu H (2018) Extracting chemical-protein relations using attention-based neural networks. Database: the journal of biological databases and curation. https:\/\/doi.org\/10.1093\/database\/bay102","DOI":"10.1093\/database\/bay102"},{"key":"162_CR26","doi-asserted-by":"publisher","unstructured":"Mehryary F, Bj\u00f6rne J, Salakoski T, Ginter F (2018) Potent pairing: ensemble of long short-term memory networks and support vector machine for chemical-protein relation extraction. Database: the journal of biological databases and curation. https:\/\/doi.org\/10.1093\/database\/bay120","DOI":"10.1093\/database\/bay120"},{"key":"162_CR27","doi-asserted-by":"publisher","unstructured":"Zhang Y, Lin H, Yang Z, Wang J, Sun Y (2019) Chemical\u2013protein interaction extraction via contextualized word representations and multihead attention. Database. https:\/\/doi.org\/10.1093\/database\/baz054","DOI":"10.1093\/database\/baz054"},{"key":"162_CR28","doi-asserted-by":"publisher","unstructured":"Antunes R, Matos S (2019) Extraction of chemical\u2013protein interactions from the literature using neural networks and narrow instance representation. Database. https:\/\/doi.org\/10.1093\/database\/baz095","DOI":"10.1093\/database\/baz095"},{"key":"162_CR29","doi-asserted-by":"publisher","unstructured":"Wang E, Wang F, Yang Z, Wang L, Zhang Y, Lin H, Wang J (2020) A graph convolutional network-based method for chemical-protein interaction extraction: algorithm development. JMIR medical informatics. https:\/\/doi.org\/10.2196\/17643","DOI":"10.2196\/17643"},{"key":"162_CR30","doi-asserted-by":"publisher","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser L, Polosukhin I (2017) Attention is all you need. arXiv. https:\/\/doi.org\/10.48550\/arXiv.1706.03762","DOI":"10.48550\/arXiv.1706.03762"},{"key":"162_CR31","doi-asserted-by":"publisher","unstructured":"Sun C, Yang Z, Wang L, Zhang Y, Lin H, Wang J (2020) Attention guided capsule networks for chemical-protein interaction extraction.\u00a0J Biomed Inform. https:\/\/doi.org\/10.1016\/j.jbi.2020.103392","DOI":"10.1016\/j.jbi.2020.103392"},{"key":"162_CR32","doi-asserted-by":"publisher","unstructured":"Sun C, Yang Z, Su L, Wang L, Zhang Y, Lin H, Wang J (2020) Chemical\u2013protein interaction extraction via gaussian probability distribution and external biomedical knowledge. Bioinformatics. https:\/\/doi.org\/10.1093\/bioinformatics\/btaa491","DOI":"10.1093\/bioinformatics\/btaa491"},{"key":"162_CR33","doi-asserted-by":"publisher","unstructured":"Zuo M, Zhang Y (2021) A span-based joint model for extracting entities and relations of bacteria biotopes. Bioinformatics. https:\/\/doi.org\/10.1093\/bioinformatics\/btab593","DOI":"10.1093\/bioinformatics\/btab593"},{"key":"162_CR34","unstructured":"Corpus Statistics (2019) BB 2019. https:\/\/sites.google.com\/view\/bb-2019\/dataset\/. Accessed 19 Jan 2024"},{"key":"162_CR35","doi-asserted-by":"publisher","unstructured":"Sun C, Yang Z, Wang L, Zhang Y, Lin H, Wang J (2022) MRC4BioER: joint extraction of biomedical entities and relations in the machine reading comprehension framework.\u00a0J Biomed Inform.\nhttps:\/\/doi.org\/10.1016\/j.jbi.2021.103956","DOI":"10.1016\/j.jbi.2021.103956"},{"key":"162_CR36","unstructured":"google research (2018) Bert: tensorFlow code and pre-trained models for BERT. Github. https:\/\/github.com\/google-research\/bert. Accessed 17 Sep 2022"},{"key":"162_CR37","doi-asserted-by":"publisher","unstructured":"Guo H, Tan B, Liu Z, Xing EP, Hu Z (2021) Text generation with efficient (soft) Q-learning. arXiv. https:\/\/doi.org\/10.48550\/arXiv.2106.07704","DOI":"10.48550\/arXiv.2106.07704"},{"key":"162_CR38","doi-asserted-by":"publisher","unstructured":"Chen X, Li L, Zhang N, Tan C, Huang F, Si L, Chen H (2022) Relation extraction as open-book examination: retrieval-enhanced prompt tuning. arXiv. https:\/\/doi.org\/10.1145\/3477495.3531746","DOI":"10.1145\/3477495.3531746"},{"key":"162_CR39","doi-asserted-by":"publisher","unstructured":"Chen X, Zhang N, Li L, Yao Y, Deng S, Tan C, Huang F, Si L, Chen H (2022) Good visual guidance make a better extractor: hierarchical visual prefix for multimodal entity and relation extraction. Findings of the Association for Computational Linguistics. https:\/\/doi.org\/10.18653\/v1\/2022.findings-naacl.121","DOI":"10.18653\/v1\/2022.findings-naacl.121"},{"key":"162_CR40","doi-asserted-by":"publisher","unstructured":"\u00a0Chen X, Zhang N, Xie X, Deng S, Yao Y, Tan C, Huang F, Si L, Chen H (2022) KnowPrompt: knowledge-aware prompt-tuning with synergistic optimization for relation extraction. Proceedings of the ACM Web Conference 2022. https:\/\/doi.org\/10.1145\/3485447.3511998","DOI":"10.1145\/3485447.3511998"},{"key":"162_CR41","doi-asserted-by":"publisher","unstructured":"\u00a0Sainz O, de Lacalle OL, Labaka G, Barrena A, Agirre E (2021) Label verbalization and entailment for effective zero and few-shot relation extraction. Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing. https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.92","DOI":"10.18653\/v1\/2021.emnlp-main.92"},{"key":"162_CR42","doi-asserted-by":"publisher","unstructured":"Ma R, Zhou X, Gui T, Tan Y, Li L, Zhang Q, Huang X (2021) Template-free prompt tuning for few-shot NER. arXiv. https:\/\/doi.org\/10.48550\/arXiv.2109.13532","DOI":"10.48550\/arXiv.2109.13532"},{"key":"162_CR43","doi-asserted-by":"publisher","unstructured":"He J, Li F, Hu X, Li J, Nian Y, Wang J, Xiang Y, Wei Q, Xu H, Tao C (2022) Chemical-protein relation extraction with pre-trained prompt tuning. IEEE Int Conf Healthc Inform. https:\/\/doi.org\/10.1109\/ichi54592.2022.00120","DOI":"10.1109\/ichi54592.2022.00120"},{"key":"162_CR44","doi-asserted-by":"publisher","unstructured":"Yeh HS, Lavergne T, Zweigenbaum P (2022) Decorate the examples: a simple method of prompt design for biomedical relation extraction. arXiv. https:\/\/doi.org\/10.48550\/arXiv.2204.10360","DOI":"10.48550\/arXiv.2204.10360"},{"key":"162_CR45","doi-asserted-by":"publisher","unstructured":"Li Q, Wang Y, You T, Lu Y (2022) BioKnowPrompt: incorporating imprecise knowledge into prompt-tuning verbalizer with biomedical text for relation extraction. Inf Sci. https:\/\/doi.org\/10.1016\/j.ins.2022.10.063","DOI":"10.1016\/j.ins.2022.10.063"},{"key":"162_CR46","doi-asserted-by":"publisher","unstructured":"Peng Y, Yan S, Lu Z (2019) Transfer learning in biomedical natural language processing: an evaluation of BERT and ELMo on ten benchmarking datasets. Proceedings of the 18th BioNLP Workshop and Shared Task. https:\/\/doi.org\/10.18653\/v1\/w19-5006","DOI":"10.18653\/v1\/w19-5006"},{"key":"162_CR47","doi-asserted-by":"publisher","unstructured":"Lee J, Yoon W, Kim S, Kim D, Kim S, So CH, Kang J (2020) BioBERT: a pre-trained biomedical language representation model for biomedical text mining. Bioinformatics. https:\/\/doi.org\/10.1093\/bioinformatics\/btz682","DOI":"10.1093\/bioinformatics\/btz682"},{"key":"162_CR48","doi-asserted-by":"publisher","unstructured":"Gu Y, Tinn R, Cheng H, Lucas M, Usuyama N, Liu X, Naumann T, Gao J, Poon H (2022) Domain-specific language model pretraining for biomedical natural language processing. ACM Transactions on Computing for Healthcare.\nhttps:\/\/doi.org\/10.1145\/3458754","DOI":"10.1145\/3458754"}],"container-title":["Journal of Healthcare Informatics Research"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s41666-024-00162-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s41666-024-00162-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s41666-024-00162-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,4,26]],"date-time":"2024-04-26T12:17:02Z","timestamp":1714133822000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s41666-024-00162-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,2,29]]},"references-count":48,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2024,6]]}},"alternative-id":["162"],"URL":"https:\/\/doi.org\/10.1007\/s41666-024-00162-9","relation":{},"ISSN":["2509-4971","2509-498X"],"issn-type":[{"value":"2509-4971","type":"print"},{"value":"2509-498X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,2,29]]},"assertion":[{"value":"1 October 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 February 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 February 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 February 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"Not applicable.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical Approval"}},{"value":"The authors declare no competing interests.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing Interests"}}]}}