{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T10:59:10Z","timestamp":1768993150425,"version":"3.49.0"},"reference-count":50,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100014188","name":"Ministry of Science and ICT, South Korea","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100014188","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3384496","type":"journal-article","created":{"date-parts":[[2024,4,3]],"date-time":"2024-04-03T17:50:23Z","timestamp":1712166623000},"page":"1-1","source":"Crossref","is-referenced-by-count":17,"title":["Evaluation and Analysis of Large Language Models for Clinical Text Augmentation and Generation"],"prefix":"10.1109","author":[{"ORCID":"https:\/\/orcid.org\/0009-0007-8384-7272","authenticated-orcid":false,"given":"Atif","family":"Latif","sequence":"first","affiliation":[{"name":"Department of Artificial Intelligence, Dongguk University, Jung-gu, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2358-4021","authenticated-orcid":false,"given":"Jihie","family":"Kim","sequence":"additional","affiliation":[{"name":"College of AI Convergence, Dongguk University, Jung-gu, Seoul, South Korea"}]}],"member":"263","reference":[{"key":"ref1","article-title":"BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension","author":"Lewis","year":"2019","journal-title":"arXiv:1910.13461"},{"issue":"1","key":"ref2","first-page":"5485","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00342"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.5220\/0011744500003393"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.84"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.gltp.2022.04.020"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.shaw.2014.05.004"},{"issue":"10","key":"ref8","doi-asserted-by":"crossref","first-page":"243","DOI":"10.15623\/ijret.2014.0310037","article-title":"NLP based retrieval of medical information for diagnosis of human diseases","volume":"3","author":"D","year":"2014","journal-title":"Int. J. Res. Eng. Technol."},{"key":"ref9","article-title":"An empirical evaluation of prompting strategies for large language models in zero-shot clinical natural language processing","author":"Sivarajkumar","year":"2023","journal-title":"arXiv:2309.08008"},{"key":"ref10","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1907.11692"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.yebeh.2019.02.002"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1093\/bioinformatics\/btab153"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.2196\/25670"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/BIBM55620.2022.9995416"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.101939"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.34"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pdig.0000198"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/S2589-7500(23)00021-3"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1093\/bioinformatics\/btz682"},{"key":"ref21","article-title":"ClinicalBERT: Modeling clinical notes and predicting hospital readmission","author":"Huang","year":"2019","journal-title":"arXiv:1904.05342"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.63"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.47001\/IRJIET\/2020.411003"},{"key":"ref24","article-title":"Improving language understanding by generative pre-training","author":"Radford","year":"2018"},{"issue":"8","key":"ref25","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref26","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Brown"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/S2589-7500(23)00019-5"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00020"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00472"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1670"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D15-1306"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.deelio-1.4"},{"key":"ref33","article-title":"Good-enough compositional data augmentation","author":"Andreas","year":"2019","journal-title":"arXiv:1904.09545"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-2072"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6233"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1253"},{"key":"ref37","article-title":"Data augmentation using pre-trained transformer models","author":"Kumar","year":"2020","journal-title":"arXiv:2003.02245"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.801"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.94"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-023-00958-w"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1148\/radiol.223312"},{"key":"ref42","article-title":"CHARD: Clinical health-aware reasoning across dimensions for text generation models","author":"Feng","year":"2022","journal-title":"arXiv:2210.04191"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.85"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.3115\/1073445.1073465"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.3115\/1626355.1626389"},{"key":"ref47","article-title":"BERTScore: Evaluating text generation with BERT","author":"Zhang","year":"2019","journal-title":"arXiv:1904.09675"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3132724"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1016\/j.jbi.2022.103999"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-023-06291-2"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/6514899\/10489969.pdf?arnumber=10489969","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,4,9]],"date-time":"2024-04-09T19:45:28Z","timestamp":1712691928000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10489969\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":50,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3384496","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}