{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T11:06:44Z","timestamp":1730200004812,"version":"3.28.0"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,12,5]],"date-time":"2023-12-05T00:00:00Z","timestamp":1701734400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,12,5]],"date-time":"2023-12-05T00:00:00Z","timestamp":1701734400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,12,5]]},"DOI":"10.1109\/bibm58861.2023.10385678","type":"proceedings-article","created":{"date-parts":[[2024,1,18]],"date-time":"2024-01-18T13:27:43Z","timestamp":1705584463000},"page":"2281-2284","source":"Crossref","is-referenced-by-count":0,"title":["Low-Resource Efficient Multi-Stage Tuning Strategy for Biomedical Question Answering Task"],"prefix":"10.1109","author":[{"given":"Binrui","family":"Wang","sequence":"first","affiliation":[{"name":"Beijing University of Technology,Faculty of Information Technology,Beijing,China"}]},{"given":"Yongping","family":"Du","sequence":"additional","affiliation":[{"name":"Beijing University of Technology,Faculty of Information Technology,Beijing,China"}]},{"given":"Xingnan","family":"Jin","sequence":"additional","affiliation":[{"name":"Beijing University of Technology,Faculty of Information Technology,Beijing,China"}]},{"given":"Rui","family":"Yan","sequence":"additional","affiliation":[{"name":"Beijing University of Technology,Faculty of Information Technology,Beijing,China"}]},{"given":"Qi","family":"Zhang","sequence":"additional","affiliation":[{"name":"Beijing University of Technology,Faculty of Information Technology,Beijing,China"}]}],"member":"263","reference":[{"article-title":"A survey of large language models","year":"2023","author":"Zhao","key":"ref1"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3571730"},{"article-title":"Do We Still Need Clinical Language Models?","year":"2023","author":"Lehman","key":"ref3"},{"article-title":"Bert: Pretraining of deep bidirectional transformers for language understanding","year":"2018","author":"Devlin","key":"ref4"},{"key":"ref5","article-title":"Attention is all you need","volume":"30","author":"Vaswani","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1259"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W19-5006"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/w19-1909"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1371"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3458754"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.551"},{"article-title":"Improving language understanding by generative pre-training","year":"2018","author":"Radford","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1093\/bib\/bbac409"},{"key":"ref14","article-title":"Stanford crfm introduces pubmedgpt 2.7 b","author":"Bolton","year":"2022","journal-title":"Stanford Human-Centered Artificial Intelligence"},{"issue":"1","key":"ref15","first-page":"5485","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"The Journal of Machine Learning Research"},{"key":"ref16","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume":"35","author":"Wei","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1186\/s12859-015-0564-6"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.bionlp-1.16"},{"key":"ref19","first-page":"37309","article-title":"Deep bidirectional language-knowledge graph pretraining","volume":"35","author":"Yasunaga","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Pmcllama: Further finetuning llama on medical papers","year":"2023","author":"Wu","key":"ref20"},{"article-title":"Lora: Low-rank adaptation of large language models","volume-title":"International Conference on Learning Representations","author":"Hu","key":"ref21"},{"article-title":"Capabilities of gpt-4 on medical challenge problems","year":"2023","author":"Nori","key":"ref22"},{"article-title":"Adaptive budget allocation for parameter-efficient finetuning","volume-title":"11th International Conference on Learning Representations","author":"Zhang","key":"ref23"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"}],"event":{"name":"2023 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)","start":{"date-parts":[[2023,12,5]]},"location":"Istanbul, Turkiye","end":{"date-parts":[[2023,12,8]]}},"container-title":["2023 IEEE International Conference on Bioinformatics and Biomedicine (BIBM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10385250\/10385251\/10385678.pdf?arnumber=10385678","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,19]],"date-time":"2024-01-19T13:36:33Z","timestamp":1705671393000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10385678\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,5]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/bibm58861.2023.10385678","relation":{},"subject":[],"published":{"date-parts":[[2023,12,5]]}}}