{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T20:23:16Z","timestamp":1740169396691,"version":"3.37.3"},"reference-count":43,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["72471203"],"award-info":[{"award-number":["72471203"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"Humanities and Social Sciences Foundation of the Ministry of Education of China","doi-asserted-by":"publisher","award":["22YJA630020"],"award-info":[{"award-number":["22YJA630020"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3498904","type":"journal-article","created":{"date-parts":[[2024,11,15]],"date-time":"2024-11-15T18:49:48Z","timestamp":1731696588000},"page":"170541-170552","source":"Crossref","is-referenced-by-count":0,"title":["ELO-Mask: Effective and Layerwise Optimization of Mask for Sparse LLMs"],"prefix":"10.1109","volume":"12","author":[{"given":"Bingjie","family":"Xiang","sequence":"first","affiliation":[{"name":"Information Center, China Tobacco Fujian Industrial Company Ltd., Xiamen, Fujian, China"}]},{"given":"Jiarui","family":"Wu","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence, Xiamen University, Xiamen, China"}]},{"given":"Xiaoying","family":"Han","sequence":"additional","affiliation":[{"name":"Information Center, China Tobacco Fujian Industrial Company Ltd., Xiamen, Fujian, China"}]},{"given":"Qian","family":"Gu","sequence":"additional","affiliation":[{"name":"Information Center, China Tobacco Fujian Industrial Company Ltd., Xiamen, Fujian, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6928-2638","authenticated-orcid":false,"given":"Fei","family":"Chao","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence, Xiamen University, Xiamen, China"}]},{"given":"Xiao","family":"Yang","sequence":"additional","affiliation":[{"name":"School of Informatics, Xiamen University, Xiamen, China"}]},{"given":"Fan","family":"Wu","sequence":"additional","affiliation":[{"name":"School of Informatics, Xiamen University, Xiamen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7958-8684","authenticated-orcid":false,"given":"Xin","family":"Fu","sequence":"additional","affiliation":[{"name":"Management School, Xiamen University, Xiamen, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Scaling laws for neural language models","author":"Kaplan","year":"2020","journal-title":"arXiv:2001.08361"},{"key":"ref2","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. NIPS","author":"Brown"},{"issue":"1","key":"ref3","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"24","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"volume-title":"Improving Language Understanding by Generative Pre-Training","year":"2018","author":"Radford","key":"ref4"},{"key":"ref5","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","author":"Ouyang"},{"key":"ref6","article-title":"OPT: Open pre-trained transformer language models","author":"Zhang","year":"2022","journal-title":"arXiv:2205.01068"},{"key":"ref7","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv:2302.13971"},{"key":"ref8","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023","journal-title":"arXiv:2307.09288"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.26"},{"key":"ref10","article-title":"Glm-130b: An open bilingual pre-trained model","author":"Zeng","year":"2023","journal-title":"arXiv:2210.02414"},{"key":"ref11","article-title":"Higher kazhdan projections, \u21132-betti numbers and baum-connes conjectures","author":"Li","year":"2020","journal-title":"arXiv:2006.09317"},{"key":"ref12","first-page":"12302","article-title":"Prune once for all: Sparse pre-trained language models","volume-title":"Proc. 2021 Conf. Empirical Methods Natural Lang. Process.","author":"Zafrir"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1103\/physrevlett.128.078302"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3328173"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3256721"},{"key":"ref16","article-title":"GPTQ: Accurate post-training quantization for generative pre-trained transformers","author":"Frantar","year":"2022","journal-title":"arXiv:2210.17323"},{"key":"ref17","first-page":"3950","article-title":"Efficient language model fine-tuning via adaptive task sampling","volume-title":"Proc. 60th Annu. Meeting Assoc. Comput. Linguistics","author":"Li"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-13851-5_26"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3291591"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2022.3188119"},{"key":"ref21","first-page":"1","article-title":"Llm-pruner: On the structural pruning of large language models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Ma"},{"key":"ref22","article-title":"Compresso: Structured pruning with collaborative prompting learns compact large language models","author":"Guo","year":"2023","journal-title":"arXiv:2310.05015"},{"key":"ref23","article-title":"Fluctuation-based adaptive structured pruning for large language models","author":"An","year":"2023","journal-title":"arXiv:2312.11983"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00141"},{"key":"ref25","first-page":"10323","article-title":"SparseGPT: Massive language models can be accurately pruned in one-shot","volume-title":"Proc. 40th Int. Conf. Mach. Learn.","author":"Frantar"},{"key":"ref26","first-page":"4475","article-title":"Optimal brain compression: A framework for accurate post-training quantization and pruning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Frantar"},{"key":"ref27","article-title":"Fast and effective weight update for pruned large language models","author":"Bo\u017ea","year":"2024","journal-title":"arXiv:2401.02938"},{"key":"ref28","article-title":"A simple and effective pruning approach for large language models","author":"Sun","year":"2023","journal-title":"arXiv:2306.11695"},{"key":"ref29","article-title":"Dynamic sparse no training: Training-free fine-tuning for sparse LLMs","author":"Zhang","year":"2023","journal-title":"arXiv:2310.08915"},{"key":"ref30","article-title":"Beyond size: How gradients shape pruning decisions in large language models","author":"Jyoti Das","year":"2023","journal-title":"arXiv:2311.04902"},{"key":"ref31","first-page":"1","article-title":"Plug-and-play: An efficient post-training pruning method for large language models","volume-title":"Proc. 12th Int. Conf. Learn. Represent.","author":"Zhang"},{"key":"ref32","article-title":"LoRA: Low-rank adaptation of large language models","author":"Hu","year":"2021","journal-title":"arXiv:2106.09685"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2022.3230008"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3353313"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3260223"},{"key":"ref36","first-page":"1","article-title":"Learning both weights and connections for efficient neural network","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Han"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICNN.1993.298572"},{"key":"ref38","article-title":"Pointer sentinel mixture models","author":"Merity","year":"2016","journal-title":"arXiv:1609.07843"},{"key":"ref39","article-title":"Think you have solved question answering? Try ARC, the AI2 reasoning challenge","author":"Clark","year":"2018","journal-title":"arXiv:1803.05457"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/n16-1098"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1082"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6399"},{"key":"ref43","article-title":"Brecq: Pushing the limit of post-training quantization by block reconstruction","author":"Li","year":"2021","journal-title":"arXiv:2102.05426"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10380310\/10753603.pdf?arnumber=10753603","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T17:37:49Z","timestamp":1732729069000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10753603\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":43,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3498904","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2024]]}}}