{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,13]],"date-time":"2026-02-13T23:45:59Z","timestamp":1771026359583,"version":"3.50.1"},"reference-count":61,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"China Natural Science Foundation","doi-asserted-by":"publisher","award":["62171391"],"award-info":[{"award-number":["62171391"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Opening Foundation of Yulin Research Institute of Big Data","award":["2020YJKY04"],"award-info":[{"award-number":["2020YJKY04"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2023,10]]},"DOI":"10.1109\/tnnls.2022.3141665","type":"journal-article","created":{"date-parts":[[2022,1,24]],"date-time":"2022-01-24T20:45:13Z","timestamp":1643057113000},"page":"7350-7364","source":"Crossref","is-referenced-by-count":54,"title":["Automatic Sparse Connectivity Learning for Neural Networks"],"prefix":"10.1109","volume":"34","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6328-2373","authenticated-orcid":false,"given":"Zhimin","family":"Tang","sequence":"first","affiliation":[{"name":"Department of Electrical and Computer Engineering, Southern Illinois University Carbondale, Carbondale, IL, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7059-9096","authenticated-orcid":false,"given":"Linkai","family":"Luo","sequence":"additional","affiliation":[{"name":"Department of Automation, Xiamen University, Xiamen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8580-4395","authenticated-orcid":false,"given":"Bike","family":"Xie","sequence":"additional","affiliation":[{"name":"Kneron Inc., San Diego, CA, USA"}]},{"given":"Yiyu","family":"Zhu","sequence":"additional","affiliation":[{"name":"Kneron Inc., San Diego, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2717-521X","authenticated-orcid":false,"given":"Rujie","family":"Zhao","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, Southern Illinois University Carbondale, Carbondale, IL, USA"}]},{"given":"Lvqing","family":"Bi","sequence":"additional","affiliation":[{"name":"Research Center for Intelligent Information and Communication Technology, School of Physics and Telecommunication Engineering, Yulin Normal University, Yulin, Guangxi, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6451-8453","authenticated-orcid":false,"given":"Chao","family":"Lu","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, Southern Illinois University Carbondale, Carbondale, IL, USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref2","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Krizhevsky"},{"key":"ref3","first-page":"1","article-title":"Very deep convolutional networks for large-scale image recognition","volume-title":"Proc. ICLR","author":"Simonyan"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1038\/nature14539"},{"key":"ref7","first-page":"1135","article-title":"Learning both weights and connections for efficient neural network","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Han"},{"key":"ref8","first-page":"1379","article-title":"Dynamic network surgery for efficient dnns","volume-title":"Proc. Adv. In Neural Inf. Process. Syst.","author":"Guo"},{"key":"ref9","article-title":"Soft weight-sharing for neural network compression","author":"Ullrich","year":"2017","journal-title":"ICLR"},{"key":"ref10","first-page":"1","article-title":"Variational dropout sparsifies deep neural networks","volume-title":"Proc. ICML","author":"Molchanov"},{"key":"ref11","article-title":"To prune, or not to prune: Exploring the efficacy of pruning for model compression","author":"Zhu","year":"2017","journal-title":"arXiv:1710.01878"},{"key":"ref12","first-page":"3878","article-title":"Learning sparse neural networks via sensitivity-driven regularization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Tartaglione"},{"key":"ref13","first-page":"1","article-title":"The lottery ticket hypothesis: Finding sparse, trainable neural networks","volume-title":"Proc. ICLR","author":"Frankle"},{"key":"ref14","first-page":"1","article-title":"Pruning filters for efficient convnets","volume-title":"Proc. ICLR","author":"Li"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.541"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/309"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/336"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2906563"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00447"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.298"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.155"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9533908"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.sigpro.2018.10.019"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00721"},{"key":"ref25","first-page":"2074","article-title":"Learning structured sparsity in deep neural networks","author":"Wen","year":"2016","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/3005348"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00716"},{"key":"ref28","article-title":"MobileNets: Efficient convolutional neural networks for mobile vision applications","author":"Howard","year":"2017","journal-title":"arXiv:1704.04861"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.634"},{"key":"ref30","article-title":"Rethinking the smaller-norm-less-informative assumption in channel pruning of convolution layers","author":"Ye","year":"2018","journal-title":"arXiv:1802.00124"},{"key":"ref31","first-page":"1","article-title":"Pruning convolutional neural networks for resource efficient inference","volume-title":"Proc. ICLR","author":"Molchanov"},{"key":"ref32","article-title":"Faster gaze prediction with dense networks and Fisher pruning","author":"Theis","year":"2018","journal-title":"arXiv:1801.05787"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00208"},{"key":"ref34","first-page":"1","article-title":"Learning sparse neural networks through $L_{0}$\n regularization","volume-title":"Proc. ICLR","author":"Louizos"},{"key":"ref35","first-page":"5122","article-title":"Operation-aware soft channel pruning using differentiable masks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kang"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58583-9_15"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01270-0_19"},{"key":"ref38","first-page":"1","article-title":"Autoprune: Automatic network pruning by regularizing auxiliary parameters","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Xiao"},{"key":"ref39","first-page":"1","article-title":"Categorical reparameterization with gumbel-softmax","volume-title":"Proc. ICLR","author":"Jang"},{"key":"ref40","first-page":"1","article-title":"The concrete distribution: A continuous relaxation of discrete random variables","volume-title":"Proc. ICLR","author":"Maddison"},{"key":"ref41","article-title":"Estimating or propagating gradients through stochastic neurons for conditional computation","author":"Bengio","year":"2013","journal-title":"arXiv:1308.3432"},{"key":"ref42","article-title":"Binarized neural networks: Training deep neural networks with weights and activations constrained to +1 or \u22121","author":"Courbariaux","year":"2016","journal-title":"arXiv:1602.02830"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/s40687-018-0177-6"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.574"},{"key":"ref45","article-title":"Understanding straight-through estimator in training activation quantized neural nets","author":"Yin","year":"2019","journal-title":"arXiv:1903.05662"},{"key":"ref46","volume-title":"Neural networks for machine learning coursera video lectures","author":"Hinton","year":"2012"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.123"},{"key":"ref48","first-page":"1","article-title":"EfficientNet: Rethinking model scaling for convolutional neural networks","volume-title":"Proc. ICLR","author":"Tan"},{"key":"ref49","volume-title":"The MNIST Database of Handwritten Digits","author":"LeCun","year":"2019"},{"key":"ref50","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref52","first-page":"1","article-title":"Pointer sentinel mixture models","volume-title":"Proc. ICLR","author":"Merity"},{"key":"ref53","first-page":"1","article-title":"Rethinking the value of network pruning","volume-title":"Proc. ICLR","author":"Liu"},{"key":"ref54","first-page":"1","article-title":"Tying word vectors and word classifiers: A loss framework for language modeling","volume-title":"Proc. ICLR","author":"Inan"},{"key":"ref55","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","author":"Ioffe","year":"2015","journal-title":"arXiv:1502.03167"},{"key":"ref56","first-page":"1","article-title":"L2 regularization versus batch and weight normalization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"van Laarhoven"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00958"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.5244\/C.30.87"},{"key":"ref59","article-title":"Network trimming: A data-driven neuron pruning approach towards efficient deep architectures","author":"Hu","year":"2016","journal-title":"arXiv:1607.03250"},{"key":"ref60","first-page":"1","article-title":"Exploring sparsity in recurrent neural networks","volume-title":"Proc. ICLR","author":"Narang"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00572"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10273172\/09690593.pdf?arnumber=9690593","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,13]],"date-time":"2024-01-13T22:46:14Z","timestamp":1705185974000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9690593\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10]]},"references-count":61,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2022.3141665","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,10]]}}}