{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,21]],"date-time":"2025-11-21T05:51:54Z","timestamp":1763704314984,"version":"3.45.0"},"reference-count":31,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,26]],"date-time":"2025-10-26T00:00:00Z","timestamp":1761436800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,26]],"date-time":"2025-10-26T00:00:00Z","timestamp":1761436800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,26]]},"DOI":"10.1109\/iccad66269.2025.11240704","type":"proceedings-article","created":{"date-parts":[[2025,11,20]],"date-time":"2025-11-20T18:39:34Z","timestamp":1763663974000},"page":"1-9","source":"Crossref","is-referenced-by-count":0,"title":["LLM-Barber: Block-Aware Rebuilder for Sparsity Mask in One-Shot for Large Language Models"],"prefix":"10.1109","author":[{"given":"Yupeng","family":"Su","sequence":"first","affiliation":[{"name":"Southern University of Science and Technology,School of Microelectronics,Shenzhen,China"}]},{"given":"Ziyi","family":"Guan","sequence":"additional","affiliation":[{"name":"University of Hong Kong,Department of Electrical and Electronic Engineering,Hong Kong,China"}]},{"given":"Xiaoqun","family":"Liu","sequence":"additional","affiliation":[{"name":"Southern University of Science and Technology,School of Microelectronics,Shenzhen,China"}]},{"given":"Tianlai","family":"Jin","sequence":"additional","affiliation":[{"name":"Southern University of Science and Technology,School of Microelectronics,Shenzhen,China"}]},{"given":"Dongkuan","family":"Wu","sequence":"additional","affiliation":[{"name":"Southern University of Science and Technology,School of Microelectronics,Shenzhen,China"}]},{"given":"Zhengfei","family":"Chen","sequence":"additional","affiliation":[{"name":"Southern University of Science and Technology,School of Microelectronics,Shenzhen,China"}]},{"given":"Graziano","family":"Chesi","sequence":"additional","affiliation":[{"name":"University of Hong Kong,Department of Electrical and Electronic Engineering,Hong Kong,China"}]},{"given":"Ngai","family":"Wong","sequence":"additional","affiliation":[{"name":"University of Hong Kong,Department of Electrical and Electronic Engineering,Hong Kong,China"}]},{"given":"Hao","family":"Yu","sequence":"additional","affiliation":[{"name":"Southern University of Science and Technology,School of Microelectronics,Shenzhen,China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"article-title":"Pruning convolutional neural networks for resource efficient inference","volume-title":"5th International Conference on Learning Representations, ICLR 2017-Conference Track Proceedings","author":"Molchanov","key":"ref2"},{"article-title":"Snip: single-shot network pruning based on connection sensitivity","volume-title":"International Conference on Learning Representations","author":"Lee","key":"ref3"},{"key":"ref4","first-page":"10323","article-title":"SparseGPT: Massive language models can be accurately pruned in one-shot","volume-title":"International Conference on Machine Learning","author":"Frantar"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00339"},{"article-title":"Lora: Low-rank adaptation of large language models","volume-title":"International Conference on Learning Representations","author":"Hu","key":"ref6"},{"article-title":"Dynamic sparse no training: Training-free fine-tuning for sparse llms","volume-title":"The Twelfth International Conference on Learning Representations","author":"Zhang","key":"ref7"},{"article-title":"A simple and effective pruning approach for large language models","volume-title":"The Twelfth International Conference on Learning Representations","author":"Sun","key":"ref8"},{"key":"ref9","article-title":"Optimal brain damage","volume":"2","author":"LeCun","year":"1989","journal-title":"Advances in neural information processing systems"},{"key":"ref10","article-title":"Optimal brain surgeon: Extensions and performance comparisons","volume":"6","author":"Hassibi","year":"1993","journal-title":"Advances in neural information processing systems"},{"article-title":"GPTQ: Accurate post-training compression for generative pretrained transformers","volume-title":"ICLR","author":"Frantar","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3649329.3658498"},{"article-title":"Besa: Pruning large language models with blockwise parameter-efficient sparsity allocation","volume-title":"The Twelfth International Conference on Learning Representations","author":"Xu","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-018-04316-3"},{"key":"ref15","article-title":"Learning both weights and connections for efficient neural network","volume":"28","author":"Han","year":"2015","journal-title":"Advances in neural information processing systems"},{"article-title":"Llama: Open and efficient foundation language models","year":"2023","author":"Touvron","key":"ref16"},{"article-title":"Llama 2: Open foundation and fine-tuned chat models","year":"2023","author":"Touvron","key":"ref17"},{"year":"2024","key":"ref18","article-title":"Llama 3 model card"},{"article-title":"Opt: Open pre-trained transformer language models","year":"2022","author":"Zhang","key":"ref19"},{"issue":"140","key":"ref20","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"Journal of machine learning research"},{"article-title":"Pointer sentinel mixture models","volume-title":"International Conference on Learning Representations","author":"Merity","key":"ref21"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.3115\/1075812.1075835"},{"article-title":"A framework for few-shot language model evaluation","year":"2023","author":"Gao","key":"ref23"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/n19-1300"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/w18-5446"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/p19-1472"},{"article-title":"Think you have solved question answering? try arc, the ai2 reasoning challenge","year":"2018","author":"Clark","key":"ref27"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/d18-1260"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/MM.2021.3061394"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3714983.3714987"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TCSI.2025.3546256"}],"event":{"name":"2025 IEEE\/ACM International Conference On Computer Aided Design (ICCAD)","start":{"date-parts":[[2025,10,26]]},"location":"Munich, Germany","end":{"date-parts":[[2025,10,30]]}},"container-title":["2025 IEEE\/ACM International Conference On Computer Aided Design (ICCAD)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11240608\/11240621\/11240704.pdf?arnumber=11240704","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,21]],"date-time":"2025-11-21T05:43:18Z","timestamp":1763703798000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11240704\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,26]]},"references-count":31,"URL":"https:\/\/doi.org\/10.1109\/iccad66269.2025.11240704","relation":{},"subject":[],"published":{"date-parts":[[2025,10,26]]}}}