{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T17:17:12Z","timestamp":1765041432796,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":84,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,10,26]],"date-time":"2023-10-26T00:00:00Z","timestamp":1698278400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,10,26]]},"DOI":"10.1145\/3581783.3611838","type":"proceedings-article","created":{"date-parts":[[2023,10,27]],"date-time":"2023-10-27T07:27:12Z","timestamp":1698391632000},"page":"5204-5213","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":5,"title":["Resource Constrained Model Compression via Minimax Optimization for Spiking Neural Networks"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5348-2192","authenticated-orcid":false,"given":"Jue","family":"Chen","sequence":"first","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-5554-2938","authenticated-orcid":false,"given":"Huan","family":"Yuan","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9862-2654","authenticated-orcid":false,"given":"Jianchao","family":"Tan","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-7953-1651","authenticated-orcid":false,"given":"Bin","family":"Chen","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-3826-8436","authenticated-orcid":false,"given":"Chengru","family":"Song","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-5475-2728","authenticated-orcid":false,"given":"Di","family":"Zhang","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2023,10,27]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Long short-term memory and learning-to-learn in networks of spiking neurons. Advances in neural information processing systems","author":"Bellec Guillaume","year":"2018","unstructured":"Guillaume Bellec, Darjan Salaj, Anand Subramoney, Robert Legenstein, and Wolfgang Maass. 2018. Long short-term memory and learning-to-learn in networks of spiking neurons. Advances in neural information processing systems , Vol. 31 (2018)."},{"key":"e_1_3_2_2_2_1","volume-title":"Estimating or propagating gradients through stochastic neurons for conditional computation. arXiv preprint arXiv:1308.3432","author":"Bengio Yoshua","year":"2013","unstructured":"Yoshua Bengio, Nicholas L\u00e9onard, and Aaron Courville. 2013. Estimating or propagating gradients through stochastic neurons for conditional computation. arXiv preprint arXiv:1308.3432 (2013)."},{"key":"e_1_3_2_2_3_1","doi-asserted-by":"publisher","DOI":"10.1016\/S0925--2312(01)00658-0"},{"key":"e_1_3_2_2_4_1","volume-title":"International Conference on Learning Representations.","author":"Cai Han","year":"2019","unstructured":"Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, and Song Han. 2019. Once-for-All: Train One Network and Specialize it for Efficient Deployment. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-014-0788-3"},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"crossref","unstructured":"Yanqi Chen Zhaofei Yu Wei Fang Tiejun Huang and Yonghong Tian. 2021. Pruning of Deep Spiking Neural Networks through Gradient Rewiring. In IJCAI.","DOI":"10.24963\/ijcai.2021\/236"},{"key":"e_1_3_2_2_7_1","volume-title":"Proceedings of the 39th International Conference on Machine Learning (Proceedings of Machine Learning Research","volume":"3715","author":"Chen Yanqi","year":"2022","unstructured":"Yanqi Chen, Zhaofei Yu, Wei Fang, Zhengyu Ma, Tiejun Huang, and Yonghong Tian. 2022. State Transition of Dendritic Spines Improves Learning of Sparse Spiking Neural Networks. In Proceedings of the 39th International Conference on Machine Learning (Proceedings of Machine Learning Research, Vol. 162), Kamalika Chaudhuri, Stefanie Jegelka, Le Song, Csaba Szepesvari, Gang Niu, and Sivan Sabato (Eds.). PMLR, 3701--3715. https:\/\/proceedings.mlr.press\/v162\/chen22ac.html"},{"key":"e_1_3_2_2_8_1","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9534111"},{"key":"e_1_3_2_2_9_1","volume-title":"Comprehensive snn compression using admm optimization and activity regularization","author":"Deng Lei","year":"2021","unstructured":"Lei Deng, Yujie Wu, Yifan Hu, Ling Liang, Guoqi Li, Xing Hu, Yufei Ding, Peng Li, and Yuan Xie. 2021. Comprehensive snn compression using admm optimization and activity regularization. IEEE transactions on neural networks and learning systems (2021)."},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"publisher","unstructured":"Shikuang Deng Yuhang Li Shanghang Zhang and Shi Gu. 2022. Temporal Efficient Training of Spiking Neural Network via Gradient Re-weighting. https:\/\/doi.org\/10.48550\/ARXIV.2202.11946","DOI":"10.48550\/ARXIV.2202.11946"},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2015.7280696"},{"key":"e_1_3_2_2_12_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00447"},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2015.7280592"},{"key":"e_1_3_2_2_14_1","unstructured":"Wei Fang Yanqi Chen Jianhao Ding Ding Chen Zhaofei Yu Huihui Zhou Yonghong Tian and other contributors. 2020. SpikingJelly. https:\/\/github.com\/fangwei123456\/spikingjelly. Accessed: YYYY-MM-DD."},{"key":"e_1_3_2_2_15_1","volume-title":"Wortman Vaughan (Eds.)","volume":"34","author":"Fang Wei","year":"2021","unstructured":"Wei Fang, Zhaofei Yu, Yanqi Chen, Tiejun Huang, Timoth\u00e9e Masquelier, and Yonghong Tian. 2021a. Deep Residual Learning in Spiking Neural Networks. In Advances in Neural Information Processing Systems, M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan (Eds.), Vol. 34. Curran Associates, Inc., 21056--21069. https:\/\/proceedings.neurips.cc\/paper\/2021\/file\/afe434653a898da20044041262b3ac74-Paper.pdf"},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00266"},{"key":"e_1_3_2_2_17_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11265-022-01772-5"},{"key":"e_1_3_2_2_18_1","volume-title":"The lottery ticket hypothesis: Finding sparse, trainable neural networks. arXiv preprint arXiv:1803.03635","author":"Frankle Jonathan","year":"2018","unstructured":"Jonathan Frankle and Michael Carbin. 2018. The lottery ticket hypothesis: Finding sparse, trainable neural networks. arXiv preprint arXiv:1803.03635 (2018)."},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2014.2304638"},{"key":"e_1_3_2_2_20_1","volume-title":"Kistler","author":"Gerstner Wulfram","year":"2002","unstructured":"Wulfram Gerstner and Werner M. Kistler. 2002. Spiking neuron models: single neurons, populations, plasticity. Cambridge University Press, New York;Cambridge, U.K;. 480--480 pages."},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00519"},{"key":"e_1_3_2_2_22_1","volume-title":"Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv preprint arXiv:1510.00149","author":"Han Song","year":"2015","unstructured":"Song Han, Huizi Mao, and William J Dally. 2015a. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv preprint arXiv:1510.00149 (2015)."},{"key":"e_1_3_2_2_23_1","unstructured":"Song Han Jeff Pool John Tran and William Dally. 2015b. Learning both weights and connections for efficient neural network. In Advances in neural information processing systems. 1135--1143."},{"key":"e_1_3_2_2_24_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_2_25_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_48"},{"key":"e_1_3_2_2_26_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00447"},{"key":"e_1_3_2_2_27_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.155"},{"key":"e_1_3_2_2_28_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01197"},{"key":"e_1_3_2_2_29_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3119238"},{"key":"e_1_3_2_2_30_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3119238"},{"key":"e_1_3_2_2_31_1","first-page":"16305","article-title":"Rethinking the pruning criteria for convolutional neural network","volume":"34","author":"Huang Zhongzhan","year":"2021","unstructured":"Zhongzhan Huang, Wenqi Shao, Xinjiang Wang, Liang Lin, and Ping Luo. 2021. Rethinking the pruning criteria for convolutional neural network. Advances in Neural Information Processing Systems, Vol. 34 (2021), 16305--16318.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01270-0_19"},{"key":"e_1_3_2_2_33_1","doi-asserted-by":"publisher","DOI":"10.5555\/3122009.3242044"},{"key":"e_1_3_2_2_34_1","volume-title":"Minmax Optimization: Stable Limit Points of Gradient Descent Ascent are Locally Optimal. arXiv preprint arXiv:1902.00618","author":"Jin Chi","year":"2019","unstructured":"Chi Jin, Praneeth Netrapalli, and Michael I Jordan. 2019. Minmax Optimization: Stable Limit Points of Gradient Descent Ascent are Locally Optimal. arXiv preprint arXiv:1902.00618 (2019)."},{"key":"e_1_3_2_2_35_1","volume-title":"Lottery Ticket Hypothesis for Spiking Neural Networks. arXiv preprint arXiv:2207.01382","author":"Kim Youngeun","year":"2022","unstructured":"Youngeun Kim, Yuhang Li, Hyoungseob Park, Yeshwanth Venkatesha, Ruokai Yin, and Priyadarshini Panda. 2022. Lottery Ticket Hypothesis for Spiking Neural Networks. arXiv preprint arXiv:2207.01382 (2022)."},{"key":"e_1_3_2_2_36_1","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00400"},{"key":"e_1_3_2_2_37_1","volume-title":"Speeding-up convolutional neural networks using fine-tuned cp-decomposition. arXiv preprint arXiv:1412.6553","author":"Lebedev Vadim","year":"2014","unstructured":"Vadim Lebedev, Yaroslav Ganin, Maksim Rakhuba, Ivan Oseledets, and Victor Lempitsky. 2014. Speeding-up convolutional neural networks using fine-tuned cp-decomposition. arXiv preprint arXiv:1412.6553 (2014)."},{"key":"e_1_3_2_2_38_1","volume-title":"Enabling Spike-Based Backpropagation for Training Deep Neural Network Architectures. Frontiers in neuroscience","author":"Lee Chankyu","year":"2019","unstructured":"Chankyu Lee, Syed S. Sarwar, Priyadarshini Panda, Gopalakrishnan Srinivasan, and Kaushik Roy. 2019. Enabling Spike-Based Backpropagation for Training Deep Neural Network Architectures. Frontiers in neuroscience, Vol. 14 (2019), 119--119."},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"publisher","DOI":"10.3389\/fnins.2020.00119"},{"key":"e_1_3_2_2_40_1","volume-title":"Training deep spiking neural networks using backpropagation. Frontiers in neuroscience","author":"Lee Jun H.","year":"2016","unstructured":"Jun H. Lee, Tobi Delbruck, and Michael Pfeiffer. 2016. Training deep spiking neural networks using backpropagation. Frontiers in neuroscience, Vol. 10 (2016), 508--508."},{"key":"e_1_3_2_2_41_1","volume-title":"Pruning filters for efficient convnets. arXiv preprint arXiv:1608.08710","author":"Li Hao","year":"2016","unstructured":"Hao Li, Asim Kadav, Igor Durdanovic, Hanan Samet, and Hans Peter Graf. 2016. Pruning filters for efficient convnets. arXiv preprint arXiv:1608.08710 (2016)."},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00029"},{"volume-title":"Accurate Spiking Neural Networks Calibration","author":"Li Yuhang","key":"e_1_3_2_2_43_1","unstructured":"Yuhang Li, Shikuang Deng, Xin Dong, Ruihao Gong, and Shi Gu. 2021a. A Free Lunch From ANN: Towards Efficient, Accurate Spiking Neural Networks Calibration, M. Meila and T. Zhang (Eds.), Vol. 139. JMLR-JOURNAL MACHINE LEARNING RESEARCH, SAN DIEGO."},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00572"},{"key":"e_1_3_2_2_45_1","volume-title":"Differentiable Spike: Rethinking Gradient-Descent for Training Spiking Neural Networks. In Advances in Neural Information Processing Systems","author":"Li Yuhang","year":"2021","unstructured":"Yuhang Li, Yufei Guo, Shanghang Zhang, Shikuang Deng, Yongqing Hai, and Shi Gu. 2021b. Differentiable Spike: Rethinking Gradient-Descent for Training Spiking Neural Networks. In Advances in Neural Information Processing Systems, A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan (Eds.). https:\/\/openreview.net\/forum?id=H4e7mBnC9f0"},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746566"},{"key":"e_1_3_2_2_47_1","volume-title":"International Conference on Machine Learning. PMLR, 7021--7032","author":"Liu Liyang","year":"2021","unstructured":"Liyang Liu, Shilong Zhang, Zhanghui Kuang, Aojun Zhou, Jing-Hao Xue, Xinjiang Wang, Yimin Chen, Wenming Yang, Qingmin Liao, and Wayne Zhang. 2021. Group fisher pruning for practical network compression. In International Conference on Machine Learning. PMLR, 7021--7032."},{"key":"e_1_3_2_2_48_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.298"},{"key":"e_1_3_2_2_49_1","volume-title":"Rethinking the value of network pruning. arXiv preprint arXiv:1810.05270","author":"Liu Zhuang","year":"2018","unstructured":"Zhuang Liu, Mingjie Sun, Tinghui Zhou, Gao Huang, and Trevor Darrell. 2018. Rethinking the value of network pruning. arXiv preprint arXiv:1810.05270 (2018)."},{"key":"e_1_3_2_2_50_1","doi-asserted-by":"publisher","DOI":"10.3389\/fnins.2020.00535"},{"key":"e_1_3_2_2_51_1","doi-asserted-by":"publisher","DOI":"10.1016\/S0893--6080(97)00011-7"},{"key":"e_1_3_2_2_52_1","volume-title":"Thorpe","author":"Masquelier Timoth\u00e9e","year":"2007","unstructured":"Timoth\u00e9e Masquelier and Simon J. Thorpe. 2007. Unsupervised learning of visual features through spike timing dependent plasticity. PLoS computational biology, Vol. 3, 2 (2007), 0247--0257."},{"key":"e_1_3_2_2_53_1","volume-title":"And No Retraining. In International Conference on Learning Representations.","author":"Miao Lu","year":"2021","unstructured":"Lu Miao, Xiaolong Luo, Tianlong Chen, Wuyang Chen, Dong Liu, and Zhangyang Wang. 2021. Learning Pruning-Friendly Networks via Frank-Wolfe: One-Shot, Any-Sparsity, And No Retraining. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_54_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01152"},{"key":"e_1_3_2_2_55_1","volume-title":"Pruning convolutional neural networks for resource efficient inference. arXiv preprint arXiv:1611.06440","author":"Molchanov Pavlo","year":"2016","unstructured":"Pavlo Molchanov, Stephen Tyree, Tero Karras, Timo Aila, and Jan Kautz. 2016. Pruning convolutional neural networks for resource efficient inference. arXiv preprint arXiv:1611.06440 (2016)."},{"key":"e_1_3_2_2_56_1","unstructured":"Byunggook Na Jisoo Mok Seongsik Park Dongjin Lee Hyeokjun Choe and Sungroh Yoon. 2022. AutoSNN: Towards Energy-Efficient Spiking Neural Networks. (2022)."},{"key":"e_1_3_2_2_57_1","unstructured":"Atsushi Nitanda. 2014. Stochastic proximal gradient descent with acceleration techniques. In Advances in Neural Information Processing Systems. 1574--1582."},{"key":"e_1_3_2_2_58_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCAD.2018.2819366"},{"key":"e_1_3_2_2_59_1","volume-title":"Nature","volume":"575","author":"Roy Kaushik","year":"2019","unstructured":"Kaushik Roy, Akhilesh Jaiswal, and Priyadarshini Panda. 2019. Towards spike-based machine intelligence with neuromorphic computing. Nature (London), Vol. 575, 7784 (2019), 607--617."},{"key":"e_1_3_2_2_60_1","volume-title":"International Conference on Learning Representations.","author":"Shen Jiayi","year":"2020","unstructured":"Jiayi Shen, Haotao Wang, Shupeng Gui, Jianchao Tan, Zhangyang Wang, and Ji Liu. 2020. UMEC: Unified model and embedding compression for efficient recommendation systems. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_61_1","doi-asserted-by":"publisher","DOI":"10.3389\/fnins.2019.00405"},{"volume-title":"Very Deep Convolutional Networks for Large-Scale Image Recognition. In International Conference on Learning Representations.","author":"Simonyan K.","key":"e_1_3_2_2_62_1","unstructured":"K. Simonyan and A. Zisserman. 2015. Very Deep Convolutional Networks for Large-Scale Image Recognition. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_63_1","volume-title":"Optimizing the energy consumption of spiking neural networks for neuromorphic applications. Frontiers in neuroscience","author":"Sorbaro Martino","year":"2020","unstructured":"Martino Sorbaro, Qian Liu, Massimo Bortone, and Sadique Sheik. 2020. Optimizing the energy consumption of spiking neural networks for neuromorphic applications. Frontiers in neuroscience, Vol. 14 (2020), 662."},{"key":"e_1_3_2_2_64_1","doi-asserted-by":"publisher","DOI":"10.1111\/j.2517-6161.1996.tb02080.x"},{"key":"e_1_3_2_2_65_1","volume-title":"Efficient DC algorithm for constrained sparse optimization. arXiv preprint arXiv:1701.08498","author":"Tono Katsuya","year":"2017","unstructured":"Katsuya Tono, Akiko Takeda, and Jun-ya Gotoh. 2017. Efficient DC algorithm for constrained sparse optimization. arXiv preprint arXiv:1701.08498 (2017)."},{"key":"e_1_3_2_2_66_1","volume-title":"Spatio-temporal backpropagation for training high-performance spiking neural networks. Frontiers in neuroscience","author":"Wu Yujie","year":"2018","unstructured":"Yujie Wu, Lei Deng, Guoqi Li, Jun Zhu, and Luping Shi. 2018. Spatio-temporal backpropagation for training high-performance spiking neural networks. Frontiers in neuroscience, Vol. 12, MAY (2018), 331--331."},{"key":"e_1_3_2_2_67_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33011311"},{"key":"e_1_3_2_2_68_1","volume-title":"Autoprune: Automatic network pruning by regularizing auxiliary parameters. Advances in neural information processing systems","author":"Xiao Xia","year":"2019","unstructured":"Xia Xiao, Zigeng Wang, and Sanguthevar Rajasekaran. 2019. Autoprune: Automatic network pruning by regularizing auxiliary parameters. Advances in neural information processing systems, Vol. 32 (2019)."},{"key":"e_1_3_2_2_69_1","volume-title":"Ultra-low Latency Adaptive Local Binary Spiking Neural Network with Accuracy Loss Estimator. arXiv preprint arXiv:2208.00398","author":"Xu Changqing","year":"2022","unstructured":"Changqing Xu, Yijian Pei, Zili Wu, Yi Liu, and Yintang Yang. 2022. Ultra-low Latency Adaptive Local Binary Spiking Neural Network with Accuracy Loss Estimator. arXiv preprint arXiv:2208.00398 (2022)."},{"key":"e_1_3_2_2_70_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01146"},{"key":"e_1_3_2_2_71_1","volume-title":"NetAdapt: Platform-Aware Neural Network Adaptation for Mobile Applications. arXiv preprint arXiv:1804.03230","author":"Yang Tien-Ju","year":"2018","unstructured":"Tien-Ju Yang, Andrew Howard, Bo Chen, Xiao Zhang, Alec Go, Vivienne Sze, and Hartwig Adam. 2018. NetAdapt: Platform-Aware Neural Network Adaptation for Mobile Applications. arXiv preprint arXiv:1804.03230 (2018)."},{"key":"e_1_3_2_2_72_1","volume-title":"Rethinking the smaller-norm-less-informative assumption in channel pruning of convolution layers. arXiv preprint arXiv:1802.00124","author":"Ye Jianbo","year":"2018","unstructured":"Jianbo Ye, Xin Lu, Zhe Lin, and James Z Wang. 2018. Rethinking the smaller-norm-less-informative assumption in channel pruning of convolution layers. arXiv preprint arXiv:1802.00124 (2018)."},{"key":"e_1_3_2_2_73_1","volume-title":"Drawing early-bird tickets: Towards more efficient training of deep networks. arXiv preprint arXiv:1909.11957","author":"You Haoran","year":"2019","unstructured":"Haoran You, Chaojian Li, Pengfei Xu, Yonggan Fu, Yue Wang, Xiaohan Chen, Richard G Baraniuk, Zhangyang Wang, and Yingyan Lin. 2019. Drawing early-bird tickets: Towards more efficient training of deep networks. arXiv preprint arXiv:1909.11957 (2019)."},{"key":"e_1_3_2_2_74_1","unstructured":"Jiahui Yu and Thomas Huang. 2020. AutoSlim: Towards One-Shot Architecture Search for Channel Numbers. (2020)."},{"key":"e_1_3_2_2_75_1","volume-title":"Unified Visual Transformer Compression. In International Conference on Learning Representations.","author":"Yu Shixing","year":"2021","unstructured":"Shixing Yu, Tianlong Chen, Jiayi Shen, Huan Yuan, Jianchao Tan, Sen Yang, Ji Liu, and Zhangyang Wang. 2021a. Unified Visual Transformer Compression. In International Conference on Learning Representations."},{"key":"e_1_3_2_2_76_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00630"},{"key":"e_1_3_2_2_77_1","volume-title":"International Conference on Machine Learning. 127--135","author":"Yuan Xiaotong","year":"2014","unstructured":"Xiaotong Yuan, Ping Li, and Tong Zhang. 2014. Gradient hard thresholding pursuit for sparsity-constrained optimization. In International Conference on Machine Learning. 127--135."},{"key":"e_1_3_2_2_78_1","doi-asserted-by":"crossref","unstructured":"Duzhen Zhang Tielin Zhang Shuncheng Jia Qingyu Wang and Bo Xu. 2022b. Recent Advances and New Frontiers in Spiking Neural Networks. (2022).","DOI":"10.24963\/ijcai.2022\/790"},{"key":"e_1_3_2_2_79_1","volume-title":"Carrying Out CNN Channel Pruning in a White Box","author":"Zhang Yuxin","year":"2022","unstructured":"Yuxin Zhang, Mingbao Lin, Chia-Wen Lin, Jie Chen, Yongjian Wu, Yonghong Tian, and Rongrong Ji. 2022a. Carrying Out CNN Channel Pruning in a White Box. IEEE Transactions on Neural Networks and Learning Systems (2022)."},{"key":"e_1_3_2_2_80_1","doi-asserted-by":"publisher","unstructured":"Hanle Zheng Yujie Wu Lei Deng Yifan Hu and Guoqi Li. 2020. Going Deeper With Directly-Trained Larger Spiking Neural Networks. https:\/\/doi.org\/10.48550\/ARXIV.2011.05280","DOI":"10.48550\/ARXIV.2011.05280"},{"key":"e_1_3_2_2_81_1","doi-asserted-by":"crossref","unstructured":"Hanle Zheng Yujie Wu Lei Deng Yifan Hu and Guoqi Li. 2021. Going Deeper With Directly-Trained Larger Spiking Neural Networks. In AAAI.","DOI":"10.1609\/aaai.v35i12.17320"},{"key":"e_1_3_2_2_82_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_40"},{"key":"e_1_3_2_2_83_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00358"},{"key":"e_1_3_2_2_84_1","unstructured":"Zhuangwei Zhuang Mingkui Tan Bohan Zhuang Jing Liu Yong Guo Qingyao Wu Junzhou Huang and Jinhui Zhu. 2018. Discrimination-aware channel pruning for deep neural networks. In Advances in Neural Information Processing Systems. 875--886."}],"event":{"name":"MM '23: The 31st ACM International Conference on Multimedia","sponsor":["SIGMM ACM Special Interest Group on Multimedia"],"location":"Ottawa ON Canada","acronym":"MM '23"},"container-title":["Proceedings of the 31st ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3611838","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3581783.3611838","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T23:57:36Z","timestamp":1755820656000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3581783.3611838"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,26]]},"references-count":84,"alternative-id":["10.1145\/3581783.3611838","10.1145\/3581783"],"URL":"https:\/\/doi.org\/10.1145\/3581783.3611838","relation":{},"subject":[],"published":{"date-parts":[[2023,10,26]]},"assertion":[{"value":"2023-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}