{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,12,25]],"date-time":"2024-12-25T05:13:57Z","timestamp":1735103637828,"version":"3.32.0"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T00:00:00Z","timestamp":1730937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T00:00:00Z","timestamp":1730937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,11,7]]},"DOI":"10.1109\/iscslp63861.2024.10800592","type":"proceedings-article","created":{"date-parts":[[2024,12,23]],"date-time":"2024-12-23T19:11:17Z","timestamp":1734981077000},"page":"349-352","source":"Crossref","is-referenced-by-count":0,"title":["Fusion Pruning for Large Language Models"],"prefix":"10.1109","author":[{"given":"Shixin","family":"Jiang","sequence":"first","affiliation":[{"name":"Harbin Institute of Technology,Harbin"}]},{"given":"Ming","family":"Liu","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology,Harbin"}]},{"given":"Bing","family":"Qin","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology,Harbin"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.3390\/fi15060192"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2021.07.045"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW60793.2023.00161"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/WACV57701.2024.00140"},{"key":"ref5","article-title":"Deebert: Dy-namic early exiting for accelerating bert inference","author":"Xin","year":"2020","journal-title":"arXiv preprint"},{"key":"ref6","first-page":"605","article-title":"Optimal brain damage, advances in neural information processing systems","volume":"598","author":"Cun","year":"1990","journal-title":"Denver 1989, Ed. D. Touretzsky; Morgan Kaufmann"},{"key":"ref7","article-title":"Learning recurrent binary\/ternary weights","author":"Ardakani","year":"2018","journal-title":"arXiv preprint"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2022.3182659"},{"key":"ref9","article-title":"Learning both weights and connections for efficient neural network","volume":"28","author":"Han","year":"2015","journal-title":"Advances in neural information processing systems"},{"volume-title":"Pruning filters for efficient convnets","year":"2017","author":"Li","key":"ref10"},{"key":"ref11","article-title":"What matters in the structured pruning of generative language models?","author":"Santacroce","year":"2023","journal-title":"arXiv preprint"},{"key":"ref12","first-page":"21702","article-title":"Llm-pruner: On the structural pruning of large language models","volume":"36","author":"Ma","year":"2023","journal-title":"Advances in neural information processing systems"},{"key":"ref13","first-page":"10323","article-title":"Sparsegpt: Massive language models can be accurately pruned in one-shot","volume-title":"International Conference on Machine Learning. PMLR","author":"Frantar","year":"2023"},{"key":"ref14","article-title":"Loraprune: Pruning meets low-rank parameter-efficient fine-tuning","author":"Zhang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref15","article-title":"A simple and effective pruning approach for large language models","author":"Sun","year":"2023","journal-title":"arXiv preprint"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.4140\/TCP.n.2015.249"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3579371.3589038"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.26"},{"key":"ref19","article-title":"Lut-gemm: Quantized matrix multi-plication based on luts for efficient inference in large-scale gener-ative language models","author":"Park","year":"2022","journal-title":"arXiv preprint"},{"key":"ref20","article-title":"Speed: Speculative pipelined exe-cution for efficient decoding","author":"Hooper","year":"2023","journal-title":"arXiv preprint"},{"key":"ref21","article-title":"Speculative decoding with big lit-tle decoder","volume":"36","author":"Kim","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref22","article-title":"Medusa: Simple llm inference acceleration framework with multiple decoding heads","author":"Cai","year":"2024","journal-title":"arXiv preprint"},{"key":"ref23","article-title":"H2o: Heavy-hitter oracle for efficient generative inference of large language models","volume":"36","author":"Zhang","year":"2024","journal-title":"Advances in Neural Information Processing Systems"}],"event":{"name":"2024 IEEE 14th International Symposium on Chinese Spoken Language Processing (ISCSLP)","start":{"date-parts":[[2024,11,7]]},"location":"Beijing, China","end":{"date-parts":[[2024,11,10]]}},"container-title":["2024 IEEE 14th International Symposium on Chinese Spoken Language Processing (ISCSLP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10799944\/10799969\/10800592.pdf?arnumber=10800592","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,24]],"date-time":"2024-12-24T06:26:38Z","timestamp":1735021598000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10800592\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,7]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/iscslp63861.2024.10800592","relation":{},"subject":[],"published":{"date-parts":[[2024,11,7]]}}}