{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T03:40:53Z","timestamp":1771299653998,"version":"3.50.1"},"reference-count":32,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,11,22]],"date-time":"2023-11-22T00:00:00Z","timestamp":1700611200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,11,22]],"date-time":"2023-11-22T00:00:00Z","timestamp":1700611200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,11,22]]},"DOI":"10.1109\/sita60746.2023.10373720","type":"proceedings-article","created":{"date-parts":[[2024,1,3]],"date-time":"2024-01-03T19:26:16Z","timestamp":1704309976000},"page":"1-8","source":"Crossref","is-referenced-by-count":6,"title":["Enhancing Large Language Models\u2019 Utility for Medical Question-Answering: A Patient Health Question Summarization Approach"],"prefix":"10.1109","author":[{"given":"Nour Eddine","family":"Zekaoui","sequence":"first","affiliation":[{"name":"School of Information Sciences,Meridian Team, LyRICA Laboratory,Rabat,Morocco"}]},{"given":"Siham","family":"Yousfi","sequence":"additional","affiliation":[{"name":"School of Information Sciences,Meridian Team, LyRICA Laboratory,Rabat,Morocco"}]},{"given":"Mounia","family":"Mikram","sequence":"additional","affiliation":[{"name":"School of Information Sciences,Meridian Team, LyRICA Laboratory,Rabat,Morocco"}]},{"given":"Maryem","family":"Rhanoui","sequence":"additional","affiliation":[{"name":"School of Information Sciences,Meridian Team, LyRICA Laboratory,Rabat,Morocco"}]}],"member":"263","reference":[{"key":"ref1","article-title":"A survey of large language models","author":"Zhao","year":"2023"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/2187980.2188206"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1215"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.jbi.2020.103511"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/2505515.2505677"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.ipm.2015.04.006"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/S16-1083"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1067"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1091"},{"key":"ref10","article-title":"Scaling instruction-finetuned language models","author":"Chung","year":"2022"},{"key":"ref11","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"International Conference on Learning Representations","author":"Hu"},{"key":"ref12","article-title":"Peft: State-of-the-art parameter-efficient fine-tuning methods","author":"Mangrulkar","year":"2022"},{"key":"ref13","article-title":"Finetuned language models are zero-shot learners","volume-title":"International Conference on Learning Representations","author":"Wei"},{"key":"ref14","article-title":"Multitask prompted training enables zero-shot task generalization","volume-title":"International Conference on Learning Representations","author":"Sanh"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1093\/jamia\/ocw024"},{"key":"ref16","first-page":"5998","article-title":"Attention is all you need","volume-title":"Proceedings of the 31st Conference on Neural Information Processing Systems","author":"Vaswani"},{"key":"ref17","article-title":"Improving language understanding with unsupervised learning","volume-title":"Proceedings of the 2018 Conference on Neural Information Processing Systems","author":"Radford"},{"key":"ref18","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","author":"Devlin"},{"issue":"140","key":"ref19","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"Journal of Machine Learning Research"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.11591\/ijai.v12.i4.pp1995-2010"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"ref22","article-title":"PEGASUS: Pre-training with extracted gap-sentences for abstractive summarization","volume-title":"Proceedings of the 37th International Conference on Machine Learning","author":"Zhang"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.jbi.2022.104040"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.nlpmc-1.8"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1611835114"},{"key":"ref26","first-page":"74","article-title":"ROUGE: A package for automatic evaluation of summaries","volume-title":"Text Summarization Branches Out.","author":"Lin","year":"2004"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.bionlp-1.28"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.743"},{"key":"ref29","article-title":"Chq-summ: A dataset for consumer healthcare question summarization","author":"Yadav","year":"2022"},{"key":"ref30","article-title":"Generalized cross entropy loss for training deep neural networks with noisy labels","volume-title":"Advances in Neural Information Processing Systems","volume":"31","author":"Zhang","year":"2018"},{"key":"ref31","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014"},{"key":"ref32","article-title":"Mistral 7b","author":"Jiang","year":"2023"}],"event":{"name":"2023 14th International Conference on Intelligent Systems: Theories and Applications (SITA)","location":"Casablanca, Morocco","start":{"date-parts":[[2023,11,22]]},"end":{"date-parts":[[2023,11,23]]}},"container-title":["2023 14th International Conference on Intelligent Systems: Theories and Applications (SITA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10373588\/10373590\/10373720.pdf?arnumber=10373720","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,13]],"date-time":"2024-01-13T00:25:13Z","timestamp":1705105513000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10373720\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,22]]},"references-count":32,"URL":"https:\/\/doi.org\/10.1109\/sita60746.2023.10373720","relation":{},"subject":[],"published":{"date-parts":[[2023,11,22]]}}}