{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,20]],"date-time":"2026-01-20T12:09:48Z","timestamp":1768910988409,"version":"3.49.0"},"reference-count":41,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"European Union (EU) Project VOXReality (Voice-Driven Interaction in Extended Reality","award":["101070521"],"award-info":[{"award-number":["101070521"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2026]]},"DOI":"10.1109\/access.2026.3653132","type":"journal-article","created":{"date-parts":[[2026,1,12]],"date-time":"2026-01-12T22:02:28Z","timestamp":1768255348000},"page":"6106-6120","source":"Crossref","is-referenced-by-count":0,"title":["Compressing What Matters: Neuron Importance Meets Data-Aware Low Rank Approximation for Language Model Compression"],"prefix":"10.1109","volume":"14","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-7260-1348","authenticated-orcid":false,"given":"Athanasios","family":"Ntovas","sequence":"first","affiliation":[{"name":"Centre for Research and Technology Hellas (CERTH), Information Technologies Institute (ITI), Thessaloniki, Greece"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4337-1720","authenticated-orcid":false,"given":"Alexandros","family":"Doumanoglou","sequence":"additional","affiliation":[{"name":"Centre for Research and Technology Hellas (CERTH), Information Technologies Institute (ITI), Thessaloniki, Greece"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3434-3290","authenticated-orcid":false,"given":"Petros","family":"Drakoulis","sequence":"additional","affiliation":[{"name":"Centre for Research and Technology Hellas (CERTH), Information Technologies Institute (ITI), Thessaloniki, Greece"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9649-9306","authenticated-orcid":false,"given":"Dimitris","family":"Zarpalas","sequence":"additional","affiliation":[{"name":"Centre for Research and Technology Hellas (CERTH), Information Technologies Institute (ITI), Thessaloniki, Greece"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Improving language understanding by generative pre-training","author":"Radford","year":"2018"},{"key":"ref2","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proc. Conf. North Amer. Chapter Assoc. Comput. Linguistics, Hum. Lang. Technol.","author":"Devlin"},{"key":"ref3","article-title":"Gemma 2: Improving open language models at a practical size","author":"Rivi\u00e8re","year":"2024","journal-title":"arXiv:2408.00118"},{"key":"ref4","article-title":"Gemma 3 technical report","volume-title":"arXiv:2503.19786","author":"Kamath","year":"2025"},{"key":"ref5","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv:2302.13971"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.iswa.2024.200336"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3487045"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00704"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref10","first-page":"317","article-title":"A generalization of the Eckart-Young-Mirsky matrix approximation theorem","volume":"88","author":"Hoffman","year":"1987","journal-title":"Linear Algebra Appl."},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2928130"},{"key":"ref12","first-page":"1","article-title":"Language model compression with weighted low-rank factorization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Hsu"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.naacl-long.217"},{"key":"ref14","article-title":"DistilBERT, a distilled version of BERT: Smaller, faster, cheaper and lighter","author":"Sanh","year":"2019","journal-title":"arXiv:1910.01108"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.3233\/JIFS-221985"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.372"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1907.11692"},{"key":"ref18","article-title":"DeBERTa: Decoding-enhanced BERT with disentangled attention","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"He"},{"key":"ref19","article-title":"DeBERTaV3: Improving DeBERTa using ELECTRA-style pre-training with gradient-disentangled embedding sharing","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"He"},{"issue":"241","key":"ref20","first-page":"1","article-title":"Sparsity in deep learning: Pruning and growth for efficient inference and training in neural networks","volume":"22","author":"Hoefler","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref21","article-title":"SliceGPT: Compress large language models by deleting rows and columns","volume-title":"Proc. Int. Conf. Learn. Representations (ICLR)","author":"Ashkboos"},{"key":"ref22","first-page":"1","article-title":"The LLM surgeon","volume-title":"Proc. 12th Int. Conf. Learn. Represent.","author":"Van Der Ouderaa"},{"key":"ref23","first-page":"10323","article-title":"SparseGPT: Massive language models can be accurately pruned in one-shot","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Frantar"},{"key":"ref24","first-page":"9782","article-title":"DynaBERT: Dynamic BERT with adaptive width and depth","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Hou"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-021-01453-z"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.3390\/app13042704"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i9.26304"},{"key":"ref28","first-page":"29321","article-title":"DRONE: Data-aware low-rank compression for large NLP models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Chen"},{"key":"ref29","article-title":"ASVD: Activation-aware singular value decomposition for compressing large language models","author":"Yuan","year":"2023","journal-title":"arXiv:2312.05821"},{"key":"ref30","first-page":"1","article-title":"SVD-LLM: Truncation-aware singular value decomposition for large language model compression","volume-title":"Proc. 13th Int. Conf. Learn. Represent.","author":"Wang"},{"key":"ref31","first-page":"884","article-title":"Compressing pre-trained language models by matrix decomposition","volume-title":"Proc. 1st Conf. Asia\u2013Pacific Chapter Assoc. Comput. Linguistics 10th Int. Joint Conf. Natural Lang. Process.","author":"Noach"},{"key":"ref32","first-page":"1","article-title":"SpinQuant: LLM quantization with learned rotations","volume-title":"Proc. 13th Int. Conf. Learn. Represent.","author":"Liu"},{"key":"ref33","article-title":"GPTQ: Accurate post-training quantization for generative pre-trained transformers","author":"Frantar","year":"2022","journal-title":"arXiv:2210.17323"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3714983.3714987"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.334"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3077597"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.498"},{"key":"ref38","volume-title":"Deep Learning","author":"Goodfellow","year":"2016"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1137\/04060593X"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1137\/090771806"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-5446"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/11323511\/11346468.pdf?arnumber=11346468","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,19]],"date-time":"2026-01-19T20:56:07Z","timestamp":1768856167000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11346468\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/access.2026.3653132","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]}}}