{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T16:25:28Z","timestamp":1771950328848,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":53,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T00:00:00Z","timestamp":1730073600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["No. 62306025, No. 92367204"],"award-info":[{"award-number":["No. 62306025, No. 92367204"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100018537","name":"National Science and Technology Major Project","doi-asserted-by":"publisher","award":["2021ZD0110503"],"award-info":[{"award-number":["2021ZD0110503"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100018537","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Beijing Municipal Science and Technology Project","award":["Nos. Z231100010323002"],"award-info":[{"award-number":["Nos. Z231100010323002"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,10,28]]},"DOI":"10.1145\/3664647.3680982","type":"proceedings-article","created":{"date-parts":[[2024,10,26]],"date-time":"2024-10-26T06:59:41Z","timestamp":1729925981000},"page":"5742-5751","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["PTSBench: A Comprehensive Post-Training Sparsity Benchmark Towards Algorithms and Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1259-5377","authenticated-orcid":false,"given":"Zining","family":"Wang","sequence":"first","affiliation":[{"name":"SKLCCSE, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1956-3367","authenticated-orcid":false,"given":"Jinyang","family":"Guo","sequence":"additional","affiliation":[{"name":"SKLCCSE, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6024-7086","authenticated-orcid":false,"given":"Ruihao","family":"Gong","sequence":"additional","affiliation":[{"name":"SKLCCSE, Beihang University &amp; SenseTime Research, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-2804-7801","authenticated-orcid":false,"given":"Yang","family":"Yong","sequence":"additional","affiliation":[{"name":"SenseTime Research, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4224-1318","authenticated-orcid":false,"given":"Aishan","family":"Liu","sequence":"additional","affiliation":[{"name":"SKLCCSE, Beihang University &amp; Institute of Dataspace, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-7898-8402","authenticated-orcid":false,"given":"Yushi","family":"Huang","sequence":"additional","affiliation":[{"name":"Beihang University &amp; SenseTime Research, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5183-8538","authenticated-orcid":false,"given":"Jiaheng","family":"Liu","sequence":"additional","affiliation":[{"name":"Nanjing University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8425-4195","authenticated-orcid":false,"given":"Xianglong","family":"Liu","sequence":"additional","affiliation":[{"name":"SKLCCSE, Beihang University &amp; Institute of Dataspace, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2024,10,28]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"e_1_3_2_1_2_1","unstructured":"Alexey Dosovitskiy Lucas Beyer Alexander Kolesnikov Dirk Weissenborn Xiaohua Zhai Thomas Unterthiner Mostafa Dehghani Matthias Minderer Georg Heigold Sylvain Gelly et al. 2020. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)."},{"key":"e_1_3_2_1_3_1","volume-title":"Neural network matrix factorization. arXiv preprint arXiv:1511.06443","author":"Dziugaite Gintare Karolina","year":"2015","unstructured":"Gintare Karolina Dziugaite and Daniel M Roy. 2015. Neural network matrix factorization. arXiv preprint arXiv:1511.06443 (2015)."},{"key":"e_1_3_2_1_4_1","volume-title":"International Conference on Machine Learning. PMLR, 2943--2952","author":"Evci Utku","year":"2020","unstructured":"Utku Evci, Trevor Gale, Jacob Menick, Pablo Samuel Castro, and Erich Elsen. 2020. Rigging the lottery: Making all tickets winners. In International Conference on Machine Learning. PMLR, 2943--2952."},{"key":"e_1_3_2_1_5_1","volume-title":"Christopher KI Williams, John Winn, and Andrew Zisserman.","author":"Everingham Mark","year":"2010","unstructured":"Mark Everingham, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. 2010. The pascal visual object classes (voc) challenge. International journal of computer vision, Vol. 88 (2010), 303--338."},{"key":"e_1_3_2_1_6_1","volume-title":"Daniel M Roy, and Michael Carbin.","author":"Frankle Jonathan","year":"2019","unstructured":"Jonathan Frankle, Gintare Karolina Dziugaite, Daniel M Roy, and Michael Carbin. 2019. Stabilizing the lottery ticket hypothesis. arXiv preprint arXiv:1903.01611 (2019)."},{"key":"e_1_3_2_1_7_1","first-page":"4475","article-title":"Optimal brain compression: A framework for accurate post-training quantization and pruning","volume":"35","author":"Frantar Elias","year":"2022","unstructured":"Elias Frantar and Dan Alistarh. 2022. Optimal brain compression: A framework for accurate post-training quantization and pruning. Advances in Neural Information Processing Systems, Vol. 35 (2022), 4475--4488.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_8_1","volume-title":"The state of sparsity in deep neural networks. arXiv preprint arXiv:1902.09574","author":"Gale Trevor","year":"2019","unstructured":"Trevor Gale, Erich Elsen, and Sara Hooker. 2019. The state of sparsity in deep neural networks. arXiv preprint arXiv:1902.09574 (2019)."},{"key":"e_1_3_2_1_9_1","volume-title":"LLMC: Benchmarking Large Language Model Quantization with a Versatile Compression Toolkit. arXiv preprint arXiv:2405.06001","author":"Gong Ruihao","year":"2024","unstructured":"Ruihao Gong, Yang Yong, Shiqiao Gu, Yushi Huang, Yunchen Zhang, Xianglong Liu, and Dacheng Tao. 2024. LLMC: Benchmarking Large Language Model Quantization with a Versatile Compression Toolkit. arXiv preprint arXiv:2405.06001 (2024)."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i11.29108"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.569"},{"key":"e_1_3_2_1_12_1","unstructured":"Jinyang Guo Wanli Ouyang and Dong Xu. 2020. Multi-Dimensional Pruning: A Unified Framework for Model Compression. In CVPR."},{"key":"e_1_3_2_1_13_1","volume-title":"Forty-first International Conference on Machine Learning.","author":"Guo Jinyang","year":"2024","unstructured":"Jinyang Guo, Jianyu Wu, Zining Wang, Jiaheng Liu, Ge Yang, Yifu Ding, Ruihao Gong, Haotong Qin, and Xianglong Liu. 2024. Compressing large language models by joint sparsification and quantization. In Forty-first International Conference on Machine Learning."},{"key":"e_1_3_2_1_14_1","volume-title":"Learning both weights and connections for efficient neural network. Advances in neural information processing systems","author":"Han Song","year":"2015","unstructured":"Song Han, Jeff Pool, John Tran, and William Dally. 2015. Learning both weights and connections for efficient neural network. Advances in neural information processing systems, Vol. 28 (2015)."},{"key":"e_1_3_2_1_15_1","volume-title":"Second order derivatives for network pruning: Optimal brain surgeon. Advances in neural information processing systems","author":"Hassibi Babak","year":"1992","unstructured":"Babak Hassibi and David Stork. 1992. Second order derivatives for network pruning: Optimal brain surgeon. Advances in neural information processing systems, Vol. 5 (1992)."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_48"},{"key":"e_1_3_2_1_18_1","volume-title":"PTQD: Accurate Post-Training Quantization for Diffusion Models. arXiv preprint arXiv:2305.10657","author":"He Yefei","year":"2023","unstructured":"Yefei He, Luping Liu, Jing Liu, Weijia Wu, Hong Zhou, and Bohan Zhuang. 2023. PTQD: Accurate Post-Training Quantization for Diffusion Models. arXiv preprint arXiv:2305.10657 (2023)."},{"key":"e_1_3_2_1_19_1","volume-title":"Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems","author":"Heusel Martin","year":"2017","unstructured":"Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. 2017. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, Vol. 30 (2017)."},{"key":"e_1_3_2_1_20_1","volume-title":"Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531","author":"Hinton Geoffrey","year":"2015","unstructured":"Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. 2015. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531 (2015)."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00140"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00745"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00703"},{"key":"e_1_3_2_1_24_1","volume-title":"Accelerated sparse neural training: A provable and efficient method to find n: m transposable masks. Advances in neural information processing systems","author":"Hubara Itay","year":"2021","unstructured":"Itay Hubara, Brian Chmiel, Moshe Island, Ron Banner, Joseph Naor, and Daniel Soudry. 2021. Accelerated sparse neural training: A provable and efficient method to find n: m transposable masks. Advances in neural information processing systems, Vol. 34 (2021), 21099--21111."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.5555\/3122009.3242044"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"crossref","unstructured":"J. Guo W. Ouyang and D. Xu. 2020. Channel pruning guided by classification loss and feature importance. In AAAI.","DOI":"10.1609\/aaai.v34i07.6720"},{"key":"e_1_3_2_1_27_1","volume-title":"IEEE Conf. Comput. Vis. Pattern Recog. 2704--2713","author":"Jacob Benoit","year":"2018","unstructured":"Benoit Jacob, Skirmantas Kligys, Bo Chen, Menglong Zhu, Matthew Tang, Andrew Howard, Hartwig Adam, and Dmitry Kalenichenko. 2018. Quantization and2 training of neural networks for efficient integer-arithmetic-only inference. In IEEE Conf. Comput. Vis. Pattern Recog. 2704--2713."},{"key":"e_1_3_2_1_28_1","volume-title":"Quantizing deep convolutional networks for efficient inference: A whitepaper. arXiv preprint arXiv:1806.08342","author":"Krishnamoorthi Raghuraman","year":"2018","unstructured":"Raghuraman Krishnamoorthi. 2018. Quantizing deep convolutional networks for efficient inference: A whitepaper. arXiv preprint arXiv:1806.08342 (2018)."},{"key":"e_1_3_2_1_29_1","unstructured":"Alex Krizhevsky Geoffrey Hinton et al. 2009. Learning multiple layers of features from tiny images. (2009)."},{"key":"e_1_3_2_1_30_1","volume-title":"International Conference on Machine Learning. PMLR, 5544--5555","author":"Kusupati Aditya","year":"2020","unstructured":"Aditya Kusupati, Vivek Ramanujan, Raghav Somani, Mitchell Wortsman, Prateek Jain, Sham Kakade, and Ali Farhadi. 2020. Soft threshold weight reparameterization for learnable sparsity. In International Conference on Machine Learning. PMLR, 5544--5555."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW54120.2021.00094"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00029"},{"key":"e_1_3_2_1_33_1","volume-title":"Brecq: Pushing the limit of post-training quantization by block reconstruction. arXiv preprint arXiv:2102.05426","author":"Li Yuhang","year":"2021","unstructured":"Yuhang Li, Ruihao Gong, Xu Tan, Yang Yang, Peng Hu, Qi Zhang, Fengwei Yu, Wei Wang, and Shi Gu. 2021. Brecq: Pushing the limit of post-training quantization by block reconstruction. arXiv preprint arXiv:2102.05426 (2021)."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.106"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.324"},{"key":"e_1_3_2_1_36_1","volume-title":"Proceedings, Part V 13","author":"Lin Tsung-Yi","year":"2014","unstructured":"Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C Lawrence Zitnick. 2014. Microsoft coco: Common objects in context. In Computer Vision--ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6--12, 2014, Proceedings, Part V 13. Springer, 740--755."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02340"},{"key":"e_1_3_2_1_38_1","volume-title":"Proceedings, Part I 14","author":"Liu Wei","year":"2016","unstructured":"Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang Fu, and Alexander C Berg. 2016. Ssd: Single shot multibox detector. In Computer Vision--ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11--14, 2016, Proceedings, Part I 14. Springer, 21--37."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.298"},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01509"},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01264-9_8"},{"key":"e_1_3_2_1_42_1","volume-title":"International Conference on Machine Learning. PMLR, 4646--4655","author":"Mostafa Hesham","year":"2019","unstructured":"Hesham Mostafa and Xin Wang. 2019. Parameter efficient training of deep convolutional neural networks by dynamic sparse reparameterization. In International Conference on Machine Learning. PMLR, 4646--4655."},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00141"},{"key":"e_1_3_2_1_44_1","volume-title":"Yelysei Bondarenko, Mart Van Baalen, and Tijmen Blankevoort.","author":"Nagel Markus","year":"2021","unstructured":"Markus Nagel, Marios Fournarakis, Rana Ali Amjad, Yelysei Bondarenko, Mart Van Baalen, and Tijmen Blankevoort. 2021. A white paper on neural network quantization. arXiv preprint arXiv:2106.08295 (2021)."},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01044"},{"key":"e_1_3_2_1_46_1","volume-title":"Comparing rewinding and fine-tuning in neural network pruning. arXiv preprint arXiv:2003.02389","author":"Renda Alex","year":"2020","unstructured":"Alex Renda, Jonathan Frankle, and Michael Carbin. 2020. Comparing rewinding and fine-tuning in neural network pruning. arXiv preprint arXiv:2003.02389 (2020)."},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"e_1_3_2_1_49_1","volume-title":"Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556","author":"Simonyan Karen","year":"2014","unstructured":"Karen Simonyan and Andrew Zisserman. 2014. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)."},{"key":"e_1_3_2_1_50_1","volume-title":"International conference on machine learning. PMLR, 10347--10357","author":"Touvron Hugo","year":"2021","unstructured":"Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Herv\u00e9 J\u00e9gou. 2021. Training data-efficient image transformers & distillation through attention. In International conference on machine learning. PMLR, 10347--10357."},{"key":"e_1_3_2_1_51_1","volume-title":"Picking winning tickets before training by preserving gradient flow. arXiv preprint arXiv:2002.07376","author":"Wang Chaoqi","year":"2020","unstructured":"Chaoqi Wang, Guodong Zhang, and Roger Grosse. 2020. Picking winning tickets before training by preserving gradient flow. arXiv preprint arXiv:2002.07376 (2020)."},{"key":"e_1_3_2_1_52_1","volume-title":"Qdrop: Randomly dropping quantization for extremely low-bit post-training quantization. arXiv preprint arXiv:2203.05740","author":"Wei Xiuying","year":"2022","unstructured":"Xiuying Wei, Ruihao Gong, Yuhang Li, Xianglong Liu, and Fengwei Yu. 2022. Qdrop: Randomly dropping quantization for extremely low-bit post-training quantization. arXiv preprint arXiv:2203.05740 (2022)."},{"key":"e_1_3_2_1_53_1","volume-title":"Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365","author":"Yu Fisher","year":"2015","unstructured":"Fisher Yu, Ari Seff, Yinda Zhang, Shuran Song, Thomas Funkhouser, and Jianxiong Xiao. 2015. Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365 (2015)."}],"event":{"name":"MM '24: The 32nd ACM International Conference on Multimedia","location":"Melbourne VIC Australia","acronym":"MM '24","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 32nd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3680982","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3664647.3680982","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T01:17:35Z","timestamp":1750295855000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3680982"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,28]]},"references-count":53,"alternative-id":["10.1145\/3664647.3680982","10.1145\/3664647"],"URL":"https:\/\/doi.org\/10.1145\/3664647.3680982","relation":{},"subject":[],"published":{"date-parts":[[2024,10,28]]},"assertion":[{"value":"2024-10-28","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}