{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T15:33:35Z","timestamp":1773329615300,"version":"3.50.1"},"reference-count":35,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11227279","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-9","source":"Crossref","is-referenced-by-count":1,"title":["Semantic Retention and Extreme Compression in LLMs: Can We Have Both?"],"prefix":"10.1109","author":[{"given":"Stanislas","family":"Laborde","sequence":"first","affiliation":[{"name":"ESIEA,Learning, Data and Robotics (LDR) ESIEA Lab,Paris,France"}]},{"given":"Martin","family":"Cousseau","sequence":"additional","affiliation":[{"name":"ESIEA,Learning, Data and Robotics (LDR) ESIEA Lab,Paris,France"}]},{"given":"Antoun","family":"Yaacoub","sequence":"additional","affiliation":[{"name":"ESIEA,Learning, Data and Robotics (LDR) ESIEA Lab,Paris,France"}]},{"given":"Lionel","family":"Prevost","sequence":"additional","affiliation":[{"name":"ESIEA,Learning, Data and Robotics (LDR) ESIEA Lab,Paris,France"}]}],"member":"263","reference":[{"key":"ref1","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. ICLR","author":"Hu"},{"key":"ref2","article-title":"DistilBERT, a distilled version of BERT: Smaller, faster, cheaper and lighter","volume-title":"Proc. NeurIPS EMC2 Workshop","author":"Sanh"},{"key":"ref3","article-title":"A simple and effective pruning approach for large language models","volume-title":"Proc. ICLR","author":"Sun"},{"key":"ref4","article-title":"GPTQ: Accurate post-training quantization for generative pre-trained transformers","volume-title":"Proc. ICLR","author":"Frantar"},{"key":"ref5","article-title":"The era of 1-bit LLMs: All large language models are in 1.58 bits","author":"Ma","year":"2024"},{"key":"ref6","article-title":"Compressing LLMs: The truth is rarely pure and never simple","volume-title":"Proc. ICLR","author":"Jaiswal"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-025-01137-0"},{"key":"ref8","article-title":"SparseGPT: Massive language models can be accurately pruned in one-shot","volume-title":"Proc. ICML","author":"Frantar"},{"key":"ref9","article-title":"Compressing large language models by joint sparsification and quantization","volume-title":"Proc. ICML","author":"Guo"},{"key":"ref10","article-title":"Learning n:m fine-grained structured sparse neural networks from scratch","volume-title":"Proc. ICLR","author":"Zhou"},{"key":"ref11","article-title":"LLM-Pruner: On the structural pruning of large language models","volume-title":"Proc. NeurIPS","author":"Ma"},{"key":"ref12","article-title":"What matters in transformers? Not all attention is needed","author":"He","year":"2024"},{"key":"ref13","article-title":"LLM.int8(): 8-bit matrix multiplication for transformers at scale","volume-title":"Proc. NeurIPS","author":"Dettmers"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3714983.3714987"},{"key":"ref15","article-title":"SmoothQuant: Accurate and efficient post-training quantization for large language models","volume-title":"Proc. ICML","author":"Xiao"},{"key":"ref16","article-title":"QuantEase: Optimization-based quantization for language models","author":"Behdin","year":"2023"},{"key":"ref17","article-title":"OmniQuant: Omnidirectionally calibrated quantization for large language models","volume-title":"Proc. ICLR","author":"Shao"},{"key":"ref18","article-title":"Channel-wise mixed-precision quantization for large language models","author":"Chen","year":"2025"},{"key":"ref19","article-title":"Rethinking channel dimensions to isolate outliers for low-bit weight quantization of large language models","volume-title":"Proc. ICLR","author":"Heo"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i12.29237"},{"key":"ref21","article-title":"SpQR: A sparse-quantized representation for near-lossless LLM weight compression","volume-title":"Proc. ICLR","author":"Dettmers"},{"key":"ref22","article-title":"SqueezeLLM: Dense-and-sparse quantization","volume-title":"Proc. ICML","author":"Kim"},{"key":"ref23","article-title":"SDQ: Sparse decomposed quantization for LLM inference","author":"Jeong","year":"2024"},{"key":"ref24","article-title":"QLoRA: Efficient finetuning of quantized LLMs","volume-title":"Proc. NeurIPS","author":"Dettmers"},{"key":"ref25","article-title":"The Llama 3 herd of models","author":"Grattafiori","year":"2024"},{"key":"ref26","article-title":"Mistral 7B","author":"Jiang","year":"2023"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3641289"},{"key":"ref28","article-title":"Evaluating large language models: A comprehensive survey","author":"Guo","year":"2023"},{"key":"ref29","article-title":"Performances are plateauing, let\u2019s make the leaderboard steep again","author":"Fourrier","year":"2024","journal-title":"HuggingFace Blog"},{"key":"ref30","article-title":"MMLU-Pro: A more robust and challenging multi-task language understanding benchmark","volume-title":"Proc. NeurIPS D&B Track","author":"Wang"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.824"},{"key":"ref32","article-title":"Measuring mathematical problem solving with the MATH dataset","volume-title":"Proc. NeurIPS D&B Track","author":"Hendrycks"},{"key":"ref33","article-title":"Instruction-following evaluation for large language models","author":"Zhou","year":"2023"},{"key":"ref34","article-title":"MuSR: Testing the limits of chain-of-thought with multistep soft reasoning","volume-title":"Proc. ICLR","author":"Sprague"},{"key":"ref35","article-title":"GPQA: A graduate-level Google-proof Q&A benchmark","volume-title":"Proc. COLM","author":"Rein"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","location":"Rome, Italy","start":{"date-parts":[[2025,6,30]]},"end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11227279.pdf?arnumber=11227279","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:09:34Z","timestamp":1763190574000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11227279\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":35,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11227279","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}