{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,9]],"date-time":"2026-02-09T23:32:21Z","timestamp":1770679941227,"version":"3.49.0"},"reference-count":71,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62372039"],"award-info":[{"award-number":["62372039"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62002016"],"award-info":[{"award-number":["62002016"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"Guangdong Basic and Applied BasicmResearch Foundation","doi-asserted-by":"publisher","award":["2022A1515240044"],"award-info":[{"award-number":["2022A1515240044"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/access.2025.3605290","type":"journal-article","created":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T17:31:52Z","timestamp":1756834312000},"page":"153713-153727","source":"Crossref","is-referenced-by-count":1,"title":["Entropy-Based Data Selection for Language Models"],"prefix":"10.1109","volume":"13","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-2042-8896","authenticated-orcid":false,"given":"Hongming","family":"Li","sequence":"first","affiliation":[{"name":"School of Computer and Communication Engineering, University of Science and Technology Beijing, Beijing, China"}]},{"given":"Yang","family":"Liu","sequence":"additional","affiliation":[{"name":"National Key Laboratory of General Artificial Intelligence, Beijing Institute for General Artificial Intelligence, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8012-3697","authenticated-orcid":false,"given":"Chao","family":"Huang","sequence":"additional","affiliation":[{"name":"School of Computer and Communication Engineering, University of Science and Technology Beijing, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1080\/00401706.1975.10489266"},{"key":"ref2","article-title":"Compute-constrained data selection","author":"Oscar Yin","year":"2024","journal-title":"arXiv:2410.16208"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1609\/aies.v7i1.31612"},{"key":"ref4","article-title":"Efficient data selection at scale via influence distillation","author":"Nikdan","year":"2025","journal-title":"arXiv:2505.19051"},{"key":"ref5","volume-title":"IDEAL: Influence-based Data Equilibrium Adaptation for Multi-Capability Language Model Alignment","year":"2025"},{"key":"ref6","article-title":"SelectIT: Selective instruction tuning for LLMs via uncertainty-aware self-reflection","author":"Liu","year":"2024","journal-title":"arXiv:2402.16705"},{"key":"ref7","article-title":"MoDS: Model-oriented data selection for instruction tuning","author":"Du","year":"2023","journal-title":"arXiv:2311.15653"},{"key":"ref8","first-page":"1","article-title":"STAFF: Speculative coreset selection for task-specific fine-tuning","volume-title":"Proc. 13th Int. Conf. Learn. Represent. (ICLR)","author":"Zhang"},{"key":"ref9","article-title":"DataMan: Data manager for pre-training large language models","author":"Peng","year":"2025","journal-title":"arXiv:2502.19363"},{"key":"ref10","first-page":"6453","article-title":"Better synthetic data by retrieving and transforming existing datasets","volume-title":"Proc. Findings Assoc. Comput. Linguistics ACL","author":"Gandhi"},{"key":"ref11","first-page":"11065","article-title":"On LLMs-driven synthetic data generation, curation, and evaluation: A survey","volume-title":"Proc. Findings Assoc. Comput. Linguistics ACL","author":"Long"},{"key":"ref12","article-title":"Best practices and lessons learned on synthetic data","author":"Liu","year":"2024","journal-title":"arXiv:2404.07503"},{"key":"ref13","first-page":"2831","article-title":"MEGATRON-CNTRL: Controllable story generation with external knowledge using large-scale language models","volume-title":"Proc. Conf. Empirical Methods Natural Lang. Process. (EMNLP)","author":"Xu"},{"key":"ref14","first-page":"11173","article-title":"Is GPT-3 a good data annotator?","volume-title":"Proc. 61st Annu. Meeting Assoc. Comput. Linguistics","author":"Ding"},{"key":"ref15","first-page":"10443","article-title":"Synthetic data generation with large language models for text classification: Potential and limitations","volume-title":"Proc. Conf. Empirical Methods Natural Lang. Process.","author":"Li"},{"key":"ref16","article-title":"Constitutional AI: Harmlessness from AI feedback","author":"Bai","year":"2022","journal-title":"arXiv:2212.08073"},{"key":"ref17","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023","journal-title":"arXiv:2307.09288"},{"key":"ref18","article-title":"Prometheus: Inducing fine-grained evaluation capability in language models","author":"Kim","year":"2023","journal-title":"arXiv:2310.08491"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-024-07421-0"},{"key":"ref20","article-title":"Fine-tuning large language models to appropriately abstain with semantic entropy","author":"Aaron Tjandra","year":"2024","journal-title":"arXiv:2410.17234"},{"key":"ref21","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proc. Conf. North American Chapter Assoc. Comput. Linguistics, Human Lang. Technol.","author":"Devlin"},{"key":"ref22","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","author":"Raffel","year":"2019","journal-title":"J. Mach. Learn. Res."},{"key":"ref23","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2023.107239","article-title":"AdapterFusion-based multi-task learning for code-mixed and code-switched text classification","volume":"127","author":"Rathnayake","year":"2024","journal-title":"Eng. Appl. Artif. Intell."},{"key":"ref24","first-page":"7841","article-title":"Mitigating boundary ambiguity and inherent bias for text classification in the era of large language models","volume-title":"Proc. Findings Assoc. Comput. Linguistics ACL","author":"Lu"},{"key":"ref25","first-page":"1","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Hu"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-024-06011-x"},{"key":"ref27","first-page":"491","article-title":"Self-regulated data-free knowledge amalgamation for text classification","volume-title":"Proc. Conf. North Amer. Chapter Assoc. Comput. Linguistics, Human Lang. Technol.","author":"Vijayaraghavan"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.460"},{"key":"ref29","article-title":"A survey of pre-trained language models for processing scientific text","author":"Ho","year":"2024","journal-title":"arXiv:2401.17824"},{"key":"ref30","first-page":"18","article-title":"Data augmentation using pre-trained transformer models","volume-title":"Proc. 2nd Workshop Life-long Learn. Spoken Lang. Syst.","author":"Kumar"},{"key":"ref31","first-page":"2225","article-title":"GPT3Mix: Leveraging large-scale language models for text augmentation","volume-title":"Proc. Findings Assoc. Comput. Linguistics","author":"Yoo"},{"key":"ref32","first-page":"3309","article-title":"ToxiGen: A large-scale machine-generated dataset for adversarial and implicit hate speech detection","volume-title":"Proc. 60th Annu. Meeting Assoc. Comput. Linguistics","author":"Hartvigsen"},{"key":"ref33","first-page":"11817","article-title":"Let\u2019s synthesize step by step: Iterative dataset synthesis with large language models by extrapolating errors from small models","volume-title":"Proc. Findings Assoc. Comput. Linguistics","author":"Wang"},{"key":"ref34","first-page":"47","article-title":"Data augmentation for intent classification with off-the-shelf large language models","volume-title":"Proc. 4th Workshop NLP Conversational AI","author":"Sahu"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00330"},{"key":"ref36","first-page":"295","article-title":"Calibration of pre-trained transformers","volume-title":"Proc. Conf. Empirical Methods Natural Lang. Process. (EMNLP)","author":"Desai"},{"key":"ref37","first-page":"962","article-title":"How can we know when language models know? On the calibration of language models for question answering","volume-title":"Proc. Trans. Assoc. Comput. Linguistics","author":"Jiang"},{"key":"ref38","first-page":"2734","article-title":"On hallucination and predictive uncertainty in conditional language generation","volume-title":"Proc. 16th Conf. Eur. Chapter Assoc. Comput. Linguistics: Main Volume","author":"Xiao"},{"key":"ref39","first-page":"1","article-title":"Semantic uncertainty: Linguistic invariances for uncertainty estimation in natural language generation","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Kuhn"},{"key":"ref40","first-page":"8901","article-title":"Kernel language entropy: Fine-grained uncertainty quantification for LLMs from semantic similarities","volume-title":"Proc. 38th Annu. Conf. Neural Inf. Process. Syst.","volume":"37","author":"Nikitin"},{"key":"ref41","article-title":"A survey on data selection for language models","author":"Albalak","year":"2024","journal-title":"arXiv:2402.16827"},{"key":"ref42","article-title":"The llama 3 herd of models","author":"Grattafiori","year":"2024","journal-title":"arXiv:2407.21783"},{"key":"ref43","article-title":"DataComp-LM: In search of the next generation of training sets for language models","author":"Li","year":"2024","journal-title":"arXiv:2406.11794"},{"key":"ref44","article-title":"When less is more: Investigating data pruning for pretraining LLMs at scale","author":"Marion","year":"2023","journal-title":"arXiv:2309.04564"},{"key":"ref45","article-title":"QuRating: Selecting high-quality data for training language models","author":"Wettig","year":"2024","journal-title":"arXiv:2402.09739"},{"key":"ref46","first-page":"5464","article-title":"GRAD-MATCH: Gradient matching based data subset selection for efficient deep model training","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","author":"Killamsetty"},{"key":"ref47","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume-title":"Proc. 36th Int. Conf. Neural Inf. Process. Syst.","volume":"35","author":"Ouyang"},{"key":"ref48","first-page":"1","article-title":"LESS: Selecting influential data for targeted instruction tuning","volume-title":"Proc. 41st Int. Conf. Mach. Learn.","author":"Xia"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3405341"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3305255"},{"key":"ref51","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2024.123258","article-title":"Spatial\u2013temporal feature-based end-to-end Fourier network for 3D sign language recognition","volume":"248","author":"Abdullahi","year":"2024","journal-title":"Expert Syst. Appl."},{"issue":"70","key":"ref52","first-page":"1","article-title":"Scaling instruction-finetuned language models","volume":"25","author":"Chung","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref53","article-title":"Source2Synth: Synthetic data generation and curation grounded in real data sources","author":"Lupidi","year":"2024","journal-title":"arXiv:2409. 08239"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-025-06251-5"},{"key":"ref55","first-page":"328","article-title":"Universal language model fine-tuning for text classification","volume-title":"Proc. 56th Annu. Meeting Assoc. Comput. Linguistics","author":"Howard"},{"key":"ref56","first-page":"142","article-title":"Learning word vectors for sentiment analysis","volume-title":"Proc. 49th Annu. Meeting Assoc. Comput. Linguistics: Human Lang. Technol.","author":"Maas"},{"key":"ref57","first-page":"1631","article-title":"Recursive deep models for semantic compositionality over a sentiment treebank","volume-title":"Proc. Conf. Empirical Methods Natural Lang. Process.","author":"Socher"},{"key":"ref58","first-page":"649","article-title":"Character-level convolutional networks for text classification","volume-title":"Proc. 29th Int. Conf. Neural Inf. Process. Syst.","volume":"1","author":"Zhang"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-377-6.50048-7"},{"key":"ref60","first-page":"785","article-title":"RACE: Large-scale reading comprehension dataset from examinations","volume-title":"Proc. Conf. Empirical Methods Natural Lang. Process.","author":"Lai"},{"key":"ref61","article-title":"Think you have solved question answering? Try ARC, the AI2 reasoning challenge","author":"Clark","year":"2018","journal-title":"arXiv:1803. 05457"},{"key":"ref62","first-page":"1","article-title":"Measuring massive multitask language understanding","volume-title":"Proc. Int. Conf. Learn. Represent. (ICLR)","author":"Hendrycks"},{"key":"ref63","article-title":"MMLU-pro: A more robust and challenging multi-task language understanding benchmark","author":"Wang","year":"2024","journal-title":"arXiv:2406. 01574"},{"key":"ref64","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"},{"key":"ref65","article-title":"Multitask prompted training enables zero-shot task generalization","author":"Sanh","year":"2021","journal-title":"arXiv:2110.08207"},{"key":"ref66","first-page":"22631","article-title":"The flan collection: Designing data and methods for effective instruction tuning","volume-title":"Proc. 40th Int. Conf. Mach. Learn.","author":"Longpre"},{"key":"ref67","article-title":"Fine-tuning BERT with bidirectional LSTM for fine-grained movie reviews sentiment analysis","author":"Nkhata","year":"2025","journal-title":"arXiv:2502.20682"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/tetci.2025.3572150"},{"key":"ref69","article-title":"TWSSenti: A novel hybrid framework for topic-wise sentiment analysis on social media using transformer models","author":"Albladi","year":"2025","journal-title":"arXiv:2504.09896"},{"key":"ref70","first-page":"1","article-title":"TSDS: Data selection for task-specific model finetuning","volume-title":"Proc. 38th Annu. Conf. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref71","article-title":"Federated domain-specific knowledge transfer on large language models using synthetic data","author":"Li","year":"2024","journal-title":"arXiv:2405.14212"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10820123\/11146774.pdf?arnumber=11146774","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T04:46:13Z","timestamp":1757565973000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11146774\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":71,"URL":"https:\/\/doi.org\/10.1109\/access.2025.3605290","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}