{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,18]],"date-time":"2026-04-18T03:47:25Z","timestamp":1776484045517,"version":"3.51.2"},"reference-count":48,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2024,3,5]],"date-time":"2024-03-05T00:00:00Z","timestamp":1709596800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,3,5]],"date-time":"2024-03-05T00:00:00Z","timestamp":1709596800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Mach Learn"],"published-print":{"date-parts":[[2024,5]]},"DOI":"10.1007\/s10994-024-06516-z","type":"journal-article","created":{"date-parts":[[2024,3,5]],"date-time":"2024-03-05T17:02:04Z","timestamp":1709658124000},"page":"2597-2618","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["Neural network relief: a pruning algorithm based on neural activity"],"prefix":"10.1007","volume":"113","author":[{"given":"Aleksandr","family":"Dekhovich","sequence":"first","affiliation":[]},{"given":"David M. J.","family":"Tax","sequence":"additional","affiliation":[]},{"given":"Marcel H. F.","family":"Sluiter","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6216-0355","authenticated-orcid":false,"given":"Miguel A.","family":"Bessa","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,3,5]]},"reference":[{"key":"6516_CR1","first-page":"3177","volume":"30","author":"A Aghasi","year":"2017","unstructured":"Aghasi, A., Abdi, A., Nguyen, N., & Romberg, J. (2017). Net-trim: Convex pruning of deep neural networks with performance guarantee. Advances in Neural Information Processing Systems, 30, 3177\u20133186.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"6516_CR2","unstructured":"Ahmad, S., & Scheinkman, L. (2019). How can we be so dense? The benefits of using highly sparse representations. arXiv preprint arXiv:1903.11257"},{"key":"6516_CR3","unstructured":"Ancona, M., \u00d6ztireli, C., & Gross, M. (2020). Shapley value as principled metric for structured network pruning. arXiv preprint arXiv:2006.01795"},{"issue":"1","key":"6516_CR4","doi-asserted-by":"publisher","first-page":"183","DOI":"10.1137\/080716542","volume":"2","author":"A Beck","year":"2009","unstructured":"Beck, A., & Teboulle, M. (2009). A fast iterative shrinkage-thresholding algorithm for linear inverse problems. SIAM Journal on Imaging Sciences, 2(1), 183\u2013202.","journal-title":"SIAM Journal on Imaging Sciences"},{"key":"6516_CR5","unstructured":"Dong, X., Chen, S., & Pan, S. (2017). Learning to prune deep neural networks via layer-wise optimal brain surgeon. In Advances in neural information processing systems (pp. 4857\u20134867)."},{"key":"6516_CR6","unstructured":"Frankle, J., & Carbin, M. (2019). The lottery ticket hypothesis: Finding sparse, trainable neural networks. In International conference on learning representations (ICLR)"},{"key":"6516_CR7","doi-asserted-by":"crossref","unstructured":"Garg, Y., & Candan, K.S. (2020). isparse: Output informed sparsification of neural network. In Proceedings of the 2020 international conference on multimedia retrieval (pp. 180\u2013188).","DOI":"10.1145\/3372278.3390688"},{"issue":"9","key":"6516_CR8","doi-asserted-by":"publisher","first-page":"3161","DOI":"10.1007\/s10994-022-06193-w","volume":"111","author":"L Geng","year":"2022","unstructured":"Geng, L., & Niu, B. (2022). Pruning convolutional neural networks via filter similarity analysis. Machine Learning, 111(9), 3161\u20133180.","journal-title":"Machine Learning"},{"key":"6516_CR9","unstructured":"Guo, Y., Yao, A. & Chen, Y. (2016). Dynamic network surgery for efficient DNNs. In Advances in neural information processing systems (pp. 1379\u20131387)."},{"key":"6516_CR10","unstructured":"Han, S., Mao, H., & Dally, W.J. (2016). Deep compression: Compressing deep neural networks with pruning, trained quantization and Huffman coding. In International conference on learning representations (ICLR)"},{"key":"6516_CR11","unstructured":"Han, S., Pool, J., Tran, J., & Dally, W. (2015). Learning both weights and connections for efficient neural network. In Advances in neural information processing systems (pp. 1135\u20131143)."},{"key":"6516_CR12","unstructured":"Hassibi, B., & Stork, D.G. (1993). Second order derivatives for network pruning: Optimal brain surgeon. In Advances in neural information processing systems (pp. 164\u2013171)"},{"key":"6516_CR13","doi-asserted-by":"crossref","unstructured":"He, Y., Zhang, X., & Sun, J. (2017). Channel pruning for accelerating very deep neural networks. In Proceedings of the IEEE international conference on computer vision (pp. 1389\u20131397).","DOI":"10.1109\/ICCV.2017.155"},{"key":"6516_CR14","doi-asserted-by":"crossref","unstructured":"He, Y., Kang, G., Dong, X., Fu, Y., & Yang, Y. (2018). Soft filter pruning for accelerating deep convolutional neural networks. In International joint conference on artificial intelligence (IJCAI) (pp. 2234\u20132240).","DOI":"10.24963\/ijcai.2018\/309"},{"key":"6516_CR15","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 770\u2013778).","DOI":"10.1109\/CVPR.2016.90"},{"key":"6516_CR16","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., & Sun, J. (2015). Delving deep into rectifiers: Surpassing human-level performance on imagenet classification. In: Proceedings of the IEEE international conference on computer vision (pp. 1026\u20131034).","DOI":"10.1109\/ICCV.2015.123"},{"key":"6516_CR17","unstructured":"Hu, H., Peng, R., Tai, Y.-W., & Tang, C.-K. (2016). Network trimming: A data-driven neuron pruning approach towards efficient deep architectures. arXiv preprint arXiv:1607.03250"},{"key":"6516_CR18","doi-asserted-by":"crossref","unstructured":"Huang, Z., & Wang, N. (2018). Data-driven sparse structure selection for deep neural networks. In Proceedings of the European conference on computer vision (ECCV) (pp. 304\u2013320).","DOI":"10.1007\/978-3-030-01270-0_19"},{"issue":"2","key":"6516_CR19","doi-asserted-by":"publisher","first-page":"37","DOI":"10.1111\/j.1469-8137.1912.tb05611.x","volume":"11","author":"P Jaccard","year":"1912","unstructured":"Jaccard, P. (1912). The distribution of the flora in the alpine zone. 1. New Phytologist, 11(2), 37\u201350.","journal-title":"New Phytologist"},{"issue":"2","key":"6516_CR20","doi-asserted-by":"publisher","first-page":"239","DOI":"10.1109\/72.80236","volume":"1","author":"ED Karnin","year":"1990","unstructured":"Karnin, E. D. (1990). A simple procedure for pruning back-propagation trained neural networks. IEEE Transactions on Neural Networks, 1(2), 239\u2013242.","journal-title":"IEEE Transactions on Neural Networks"},{"key":"6516_CR21","unstructured":"Kingma, D.P. & Ba, J. (2015). Adam: A method for stochastic optimization. In 3rd international conference on learning representations, ICLR."},{"key":"6516_CR22","unstructured":"Krizhevsky. A. (2009). Learning multiple layers of features from tiny images. Master\u2019s thesis, Department of Computer Science, University of Toronto."},{"issue":"11","key":"6516_CR23","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y LeCun","year":"1998","unstructured":"LeCun, Y., Bottou, L., Bengio, Y., & Haffner, P. (1998). Gradient-based learning applied to document recognition. Proceedings of the IEEE, 86(11), 2278\u20132324.","journal-title":"Proceedings of the IEEE"},{"key":"6516_CR24","unstructured":"LeCun, Y., Denker, J.S., & Solla, S.A. (1990). Optimal brain damage. In Advances in neural information processing systems (pp. 598\u2013605)."},{"key":"6516_CR25","doi-asserted-by":"crossref","unstructured":"Lebedev, V., & Lempitsky, V. (2016). Fast convnets using group-wise brain damage. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2554\u20132564).","DOI":"10.1109\/CVPR.2016.280"},{"key":"6516_CR26","unstructured":"Lee, N., Ajanthan, T. & Torr, P.H. (2019). Snip: Single-shot network pruning based on connection sensitivity. In International conference on learning representations (ICLR)"},{"key":"6516_CR27","unstructured":"Li, H., Kadav, A., Durdanovic, I., Samet, H., & Graf, H.P. (2017). Pruning filters for efficient convnets. In international conference of learning representation (ICLR)."},{"key":"6516_CR28","doi-asserted-by":"crossref","unstructured":"Li, T., Wu, B., Yang, Y., Fan, Y., Zhang, Y., & Liu, W. (2019). Compressing convolutional neural networks via factorized convolutional filters. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 3977\u20133986).","DOI":"10.1109\/CVPR.2019.00410"},{"issue":"2","key":"6516_CR29","doi-asserted-by":"publisher","first-page":"685","DOI":"10.1007\/s10994-021-06049-9","volume":"111","author":"N Liao","year":"2022","unstructured":"Liao, N., Wang, S., Xiang, L., Ye, N., Shao, S., & Chu, P. (2022). Achieving adversarial robustness via sparsity. Machine Learning, 111(2), 685\u2013711.","journal-title":"Machine Learning"},{"key":"6516_CR30","doi-asserted-by":"crossref","unstructured":"Lin, M., Ji, R., Wang, Y., Zhang, Y., Zhang, B., Tian, Y., & Shao, L. (2020). Hrank: Filter pruning using high-rank feature map. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 1529\u20131538).","DOI":"10.1109\/CVPR42600.2020.00160"},{"key":"6516_CR31","unstructured":"Louizos, C., Ullrich, K., Welling, M. (2017). Bayesian compression for deep learning. In Advances in neural information processing systems (pp. 3288\u20133298)."},{"key":"6516_CR32","doi-asserted-by":"crossref","unstructured":"Luo, J.-H., Wu, J., & Lin, W. (2017). Thinet: A filter level pruning method for deep neural network compression. In Proceedings of the IEEE international conference on computer vision (pp. 5058\u20135066)","DOI":"10.1109\/ICCV.2017.541"},{"key":"6516_CR33","doi-asserted-by":"crossref","unstructured":"Mallya, A., & Lazebnik, S. (2018). Packnet: Adding multiple tasks to a single network by iterative pruning. In: Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 7765\u20137773).","DOI":"10.1109\/CVPR.2018.00810"},{"key":"6516_CR34","doi-asserted-by":"crossref","unstructured":"Mallya, A., Davis, D., & Lazebnik, S. (2018) Piggyback: Adapting a single network to multiple tasks by learning to mask weights. In Proceedings of the European conference on computer vision (ECCV) (pp. 67\u201382).","DOI":"10.1007\/978-3-030-01225-0_5"},{"key":"6516_CR35","doi-asserted-by":"crossref","unstructured":"Mehta, D., Kim, K.I., & Theobalt, C. (2019). On implicit filter level sparsity in convolutional neural networks. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 520\u2013528).","DOI":"10.1109\/CVPR.2019.00061"},{"key":"6516_CR36","doi-asserted-by":"crossref","unstructured":"Molchanov, P., Mallya, A., Tyree, S., Frosio, I., & Kautz, J. (2019). Importance estimation for neural network pruning. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 11264\u201311272).","DOI":"10.1109\/CVPR.2019.01152"},{"key":"6516_CR37","unstructured":"Molchanov, P., Tyree, S., Karras, T., Aila, T., & Kautz, J. (2016). Pruning convolutional neural networks for resource efficient inference. arXiv preprint arXiv:1611.06440"},{"key":"6516_CR38","unstructured":"Molchanov, D., Ashukha, A., & Vetrov, D. (2017). Variational dropout sparsifies deep neural networks. In International conference on machine learning (pp. 2498\u20132507). PMLR."},{"key":"6516_CR39","unstructured":"Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., Desmaison, A., K\u00f6pf, A., Yang, E., DeVito, Z., Raison, M., Tejani, A., Chilamkurthy, S., Steiner, B., Fang, L., Bai, J. & Chintala, S. (2019). Pytorch: An imperative style, high-performance deep learning library. arXiv preprint arXiv:1912.01703"},{"key":"6516_CR40","unstructured":"Renda, A., Frankle, J., & Carbin, M. (2020). Comparing rewinding and fine-tuning in neural network pruning. arXiv preprint arXiv:2003.02389"},{"key":"6516_CR41","unstructured":"Simonyan, K., & Zisserman, A. (2015). Very deep convolutional networks for large-scale image recognition. In International conference on learning representations (ICLR)."},{"key":"6516_CR42","first-page":"24604","volume":"34","author":"Y Sui","year":"2021","unstructured":"Sui, Y., Yin, M., Xie, Y., Phan, H., Aliari Zonouz, S., & Yuan, B. (2021). Chip: Channel independence-based pruning for compact neural networks. Advances in Neural Information Processing Systems, 34, 24604\u201324616.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"6516_CR43","unstructured":"Ullrich, K., Meeds, E., & Welling, M. (2017). Soft weight-sharing for neural network compression. arXiv preprint arXiv:1702.04008"},{"key":"6516_CR44","doi-asserted-by":"crossref","unstructured":"Ye, S., Xu, K., Liu, S., Cheng, H., Lambrechts, J.-H., Zhang, H., Zhou, A., Ma, K., Wang, Y., & Lin, X. (2019). Adversarial robustness vs. model compression, or both?. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 111\u2013120).","DOI":"10.1109\/ICCV.2019.00020"},{"key":"6516_CR45","unstructured":"Ye, J., Lu, X., Lin, Z., & Wang, J.Z. (2018). Rethinking the smaller-norm-less-informative assumption in channel pruning of convolution layers. arXiv preprint arXiv:1802.00124"},{"key":"6516_CR46","doi-asserted-by":"crossref","unstructured":"Yu, R., Li, A., Chen, C.-F., Lai, J.-H., Morariu, V.I., Han, X., Gao, M., Lin, C.-Y., & Davis, L.S. (2018). Nisp: Pruning networks using neuron importance score propagation. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 9194\u20139203).","DOI":"10.1109\/CVPR.2018.00958"},{"key":"6516_CR47","unstructured":"Zagoruyko, S. (2015). 92.45 on cifar-10 in torch. URL: http:\/\/torch.ch\/blog\/2015\/07\/30\/cifar.html."},{"key":"6516_CR48","doi-asserted-by":"crossref","unstructured":"Zhao, C., Ni, B., Zhang, J., Zhao, Q., Zhang, W., & Tian, Q. (2019). Variational convolutional neural network pruning. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2780\u20132789).","DOI":"10.1109\/CVPR.2019.00289"}],"container-title":["Machine Learning"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-024-06516-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10994-024-06516-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-024-06516-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,27]],"date-time":"2025-11-27T18:04:43Z","timestamp":1764266683000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10994-024-06516-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,3,5]]},"references-count":48,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2024,5]]}},"alternative-id":["6516"],"URL":"https:\/\/doi.org\/10.1007\/s10994-024-06516-z","relation":{},"ISSN":["0885-6125","1573-0565"],"issn-type":[{"value":"0885-6125","type":"print"},{"value":"1573-0565","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,3,5]]},"assertion":[{"value":"19 December 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 November 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 January 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 March 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no conflicts of interest to declare.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Not applicable","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}},{"value":"Not applicable","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to participate"}},{"value":"Not applicable","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}},{"value":"This content has been made available to all.","name":"free","label":"Free to read"}]}}