{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,11]],"date-time":"2025-10-11T13:38:13Z","timestamp":1760189893922,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":33,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819606948"},{"type":"electronic","value":"9789819606955"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-96-0695-5_32","type":"book-chapter","created":{"date-parts":[[2025,2,19]],"date-time":"2025-02-19T05:48:40Z","timestamp":1739944120000},"page":"399-410","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["BioMed-LLaMa-3: Instruction-Efficient Fine-Tuning of\u00a0Large Language Models for\u00a0Improved Biomedical Language Understanding"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8326-8333","authenticated-orcid":false,"given":"Nour Eddine","family":"Zekaoui","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6647-5098","authenticated-orcid":false,"given":"Mounia","family":"Mikram","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0147-8466","authenticated-orcid":false,"given":"Maryem","family":"Rhanoui","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0745-8327","authenticated-orcid":false,"given":"Siham","family":"Yousfi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,20]]},"reference":[{"key":"32_CR1","unstructured":"Abdin, M., Jacobs, S.A., Awan, A.A., et\u00a0al.: Phi-3 technical report: a highly capable language model locally on your phone (2024)"},{"key":"32_CR2","unstructured":"Banerjee, S., Lavie, A.: METEOR: an automatic metric for MT evaluation with improved correlation with human judgments. In: Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and\/or Summarization, Ann Arbor, Michigan, pp. 65\u201372 (2005)"},{"key":"32_CR3","doi-asserted-by":"crossref","unstructured":"Ben Abacha, A., Shivade, C., Demner-Fushman, D.: Overview of the MEDIQA 2019 shared task on textual inference, question entailment and question answering. In: ACL-BioNLP 2019 (2019)","DOI":"10.18653\/v1\/W19-5039"},{"key":"32_CR4","doi-asserted-by":"publisher","first-page":"1169595","DOI":"10.3389\/frai.2023.1169595","volume":"6","author":"T Dave","year":"2023","unstructured":"Dave, T., Athaluri, S.A., Singh, S.: ChatGPT in medicine: an overview of its applications, advantages, limitations, future prospects, and ethical considerations. Front. Artif. Intell. 6, 1169595 (2023)","journal-title":"Front. Artif. Intell."},{"key":"32_CR5","unstructured":"Dettmers, T., Pagnoni, A., Holtzman, A., Zettlemoyer, L.: QLoRA: efficient finetuning of quantized LLMs. In: Advances in Neural Information Processing Systems, vol.\u00a036, pp. 10088\u201310115 (2023)"},{"key":"32_CR6","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 4171\u20134186 (2019)"},{"key":"32_CR7","unstructured":"Fries, J.A., Weber, L., Seelam, N., Altay, G., et al.: BigBIO: a framework for data-centric biomedical natural language processing (2022)"},{"key":"32_CR8","unstructured":"Han, T., Adams, L.C., Papaioannou, J.M., et\u00a0al.: MedAlpaca \u2013 an open-source collection of medical conversational AI models and training data (2023)"},{"key":"32_CR9","unstructured":"Hu, E.J., et al.: LoRA: low-rank adaptation of large language models (2021)"},{"key":"32_CR10","unstructured":"Jiang, A.Q., Sablayrolles, A., Mensch, A., et\u00a0al.: Mistral 7b (2023)"},{"issue":"14","key":"32_CR11","doi-asserted-by":"publisher","first-page":"6421","DOI":"10.3390\/app11146421","volume":"11","author":"D Jin","year":"2021","unstructured":"Jin, D., Pan, E., Oufattole, N., Weng, W.H., Fang, H., Szolovits, P.: What disease does this patient have? A large-scale open domain question answering dataset from medical exams. Appl. Sci. 11(14), 6421 (2021)","journal-title":"Appl. Sci."},{"key":"32_CR12","doi-asserted-by":"crossref","unstructured":"Labrak, Y., Bazoge, A., Morin, E., Gourraud, P.A., Rouvier, M., Dufour, R.: BioMistral: a collection of open-source pretrained large language models for medical domains (2024)","DOI":"10.18653\/v1\/2024.findings-acl.348"},{"key":"32_CR13","unstructured":"Le\u00a0Scao, T., Fan, A., Akiki, C., Pavlick, E., et al.: BLOOM: a 176B-parameter open-access multilingual language model (2023)"},{"key":"32_CR14","doi-asserted-by":"crossref","unstructured":"Li, Y., Li, Z., Zhang, K., Dan, R., Jiang, S., Zhang, Y.: ChatDoctor: a medical chat model fine-tuned on a large language model meta-AI (LLaMA) using medical domain knowledge (2023)","DOI":"10.7759\/cureus.40895"},{"key":"32_CR15","unstructured":"Lin, C.Y.: ROUGE: a package for automatic evaluation of summaries. In: Text Summarization Branches Out, Barcelona, Spain, pp. 74\u201381 (2004)"},{"key":"32_CR16","unstructured":"Luo, Y., et al.: BiomedGPT: open multimodal generative pre-trained transformer for biomedicine (2023)"},{"key":"32_CR17","unstructured":"AI at Meta: Llama 3 model card (2024). https:\/\/github.com\/meta-llama\/llama3\/"},{"key":"32_CR18","unstructured":"Nori, H., King, N., McKinney, S.M., Carignan, D., Horvitz, E.: Capabilities of GPT-4 on medical challenge problems (2023)"},{"key":"32_CR19","unstructured":"OpenAI, Achiam, J., Adler, S., Agarwal, S., et\u00a0al.: GPT-4 technical report (2024)"},{"key":"32_CR20","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Jing Zhu, W.: Bleu: a method for automatic evaluation of machine translation, pp. 311\u2013318 (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"32_CR21","unstructured":"Radford, A., Narasimhan, K., Salimans, T., Sutskever, I.: Improving language understanding with unsupervised learning. In: Proceedings of the 2018 Conference on Neural Information Processing Systems (2018)"},{"key":"32_CR22","first-page":"1","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res. 21, 1\u201367 (2020)","journal-title":"J. Mach. Learn. Res."},{"key":"32_CR23","unstructured":"Taori, R., et al.: Stanford alpaca: an instruction-following llama model (2023)"},{"key":"32_CR24","unstructured":"Team, G., Mesnard, T., Hardin, C., et\u00a0al.: Gemma: open models based on Gemini research and technology (2024)"},{"key":"32_CR25","unstructured":"Touvron, H., Lavril, T., Izacard, G., Martinet, X., et al.: Llama: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"32_CR26","unstructured":"Tunstall, L., Beeching, E., Lambert, N., et\u00a0al.: Zephyr: direct distillation of LM alignment (2023)"},{"key":"32_CR27","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Proceedings of the 31st Conference on Neural Information Processing Systems, pp. 5998\u20136008 (2017)"},{"key":"32_CR28","unstructured":"Woodard, J., Nelson, J.: An information theoretic measure of speech recognition performance. In: Workshop on Standardisation for Speech I\/O Technology, Naval Air Development Center, Warminster, PA (1982)"},{"key":"32_CR29","doi-asserted-by":"crossref","unstructured":"Wu, C., Lin, W., Zhang, X., Zhang, Y., Wang, Y., Xie, W.: PMC-LLaMA: towards building open-source language models for medicine (2023)","DOI":"10.1093\/jamia\/ocae045"},{"key":"32_CR30","unstructured":"Xu, L., Xie, H., Qin, S.Z.J., Tao, X., Wang, F.L.: Parameter-efficient fine-tuning methods for pretrained language models: a critical review and assessment (2023)"},{"key":"32_CR31","doi-asserted-by":"crossref","unstructured":"Zekaoui, N.E., Yousfi, S., Mikram, M., Rhanoui, M.: Enhancing large language models\u2019 utility for medical question-answering: a patient health question summarization approach. In: 2023 14th International Conference on Intelligent Systems: Theories and Applications (SITA), pp.\u00a01\u20138 (2023)","DOI":"10.1109\/SITA60746.2023.10373720"},{"key":"32_CR32","doi-asserted-by":"crossref","unstructured":"Zekaoui, N.E., Yousfi, S., Rhanoui, M., Mikram, M.: Analysis of the evolution of advanced transformer-based language models: experiments on opinion mining. IAES Int. J. Artif. Intell. (IJ-AI) 12(4), 1995\u20132010 (2023)","DOI":"10.11591\/ijai.v12.i4.pp1995-2010"},{"key":"32_CR33","unstructured":"Zhang, T., Kishore, V., Wu, F., Weinberger, K.Q., Artzi, Y.: BERTScore: evaluating text generation with BERT. In: International Conference on Learning Representations (2020)"}],"container-title":["Lecture Notes in Computer Science","Multi-disciplinary Trends in Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-96-0695-5_32","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,19]],"date-time":"2025-02-19T05:49:11Z","timestamp":1739944151000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-96-0695-5_32"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9789819606948","9789819606955"],"references-count":33,"URL":"https:\/\/doi.org\/10.1007\/978-981-96-0695-5_32","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"20 February 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MIWAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multi-disciplinary Trends in Artificial Intelligence","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Pattaya","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Thailand","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 November 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16 November 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miwai2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/miwai24.miwai.org","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}