{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T14:07:12Z","timestamp":1764079632356,"version":"3.40.3"},"publisher-location":"Cham","reference-count":39,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030687625"},{"type":"electronic","value":"9783030687632"}],"license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021]]},"DOI":"10.1007\/978-3-030-68763-2_50","type":"book-chapter","created":{"date-parts":[[2021,2,20]],"date-time":"2021-02-20T16:28:24Z","timestamp":1613838504000},"page":"662-676","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["Learning Sparse Filters in Deep Convolutional Neural Networks with a $$l_1\/l_2$$ Pseudo-Norm"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0545-0345","authenticated-orcid":false,"given":"Anthony","family":"Berthelier","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3294-7867","authenticated-orcid":false,"given":"Yongzhe","family":"Yan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4854-5686","authenticated-orcid":false,"given":"Thierry","family":"Chateau","sequence":"additional","affiliation":[]},{"given":"Christophe","family":"Blanc","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0374-3814","authenticated-orcid":false,"given":"Stefan","family":"Duffner","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7997-9837","authenticated-orcid":false,"given":"Christophe","family":"Garcia","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,2,21]]},"reference":[{"unstructured":"Wen, W., Wu, C., Wang, Y., Chen, Y., Li, H.: Learning structured sparsity in deep neural networks. arXiv preprint arXiv:1608.03665 (2016)","key":"50_CR1"},{"doi-asserted-by":"crossref","unstructured":"Yu, R., et al.: NISP: pruning networks using neuron importance score propagation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 9194\u20139203 (2018)","key":"50_CR2","DOI":"10.1109\/CVPR.2018.00958"},{"doi-asserted-by":"crossref","unstructured":"Lin, S., et al.: Towards optimal structured CNN pruning via generative adversarial learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2790\u20132799 (2019)","key":"50_CR3","DOI":"10.1109\/CVPR.2019.00290"},{"key":"50_CR4","first-page":"1097","volume":"25","author":"A Krizhevsky","year":"2012","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. Adv. Neural Inf. Process. Syst. 25, 1097\u20131105 (2012)","journal-title":"Adv. Neural Inf. Process. Syst."},{"issue":"7553","key":"50_CR5","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1038\/nature14539","volume":"521","author":"Y LeCun","year":"2015","unstructured":"LeCun, Y., Bengio, Y., Hinton, G.: Deep learning. Nature 521(7553), 436\u2013444 (2015)","journal-title":"Nature"},{"unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale Image recognition. arXiv preprint arXiv:1409.1556 (2015)","key":"50_CR6"},{"doi-asserted-by":"crossref","unstructured":"Szegedy, C., et al.: Going deeper with convolutions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1\u20139 (2015)","key":"50_CR7","DOI":"10.1109\/CVPR.2015.7298594"},{"doi-asserted-by":"crossref","unstructured":"He, K., Sun, J.: Convolutional neural networks at constrained time cost. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5353\u20135360 (2015)","key":"50_CR8","DOI":"10.1109\/CVPR.2015.7299173"},{"doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","key":"50_CR9","DOI":"10.1109\/CVPR.2016.90"},{"unstructured":"Li, H., Kadav, A., Durdanovic, I., Samet, H., Graf, H.P.: Pruning filters for efficient ConvNets. arXiv preprint arXiv:1608.08710 (2017)","key":"50_CR10"},{"doi-asserted-by":"crossref","unstructured":"Luo, J. H., Wu, J., Lin, W.: Thinet: a filter level pruning method for deep neural network compression. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 5058\u20135066 (2017)","key":"50_CR11","DOI":"10.1109\/ICCV.2017.541"},{"doi-asserted-by":"crossref","unstructured":"Jaderberg, M., Vedaldi, A., Zisserman, A.: Speeding up convolutional neural networks with low rank expansions. arXiv preprint arXiv:1405.3866 (2014)","key":"50_CR12","DOI":"10.5244\/C.28.88"},{"doi-asserted-by":"crossref","unstructured":"Bach, F., Jenatton, R., Mairal, J., Obozinski, G.: Optimization with sparsity-inducing penalties. arXiv preprint arXiv:1108.0775 (2012)","key":"50_CR13","DOI":"10.1561\/9781601985118"},{"issue":"4","key":"50_CR14","doi-asserted-by":"publisher","first-page":"1978","DOI":"10.1214\/09-AOS778","volume":"38","author":"J Huang","year":"2010","unstructured":"Huang, J., Zhang, T.: The benefit of group sparsity. Ann. Statist. 38(4), 1978\u20132004 (2010)","journal-title":"Ann. Statist."},{"issue":"3","key":"50_CR15","doi-asserted-by":"publisher","first-page":"349","DOI":"10.1198\/004017005000000139","volume":"47","author":"B Turlach","year":"2000","unstructured":"Turlach, B., Venables, W., Wright, S.: Simultaneous variable selection. Technometrics 47(3), 349\u2013363 (2000)","journal-title":"Technometrics"},{"issue":"1","key":"50_CR16","doi-asserted-by":"publisher","first-page":"49","DOI":"10.1111\/j.1467-9868.2005.00532.x","volume":"68","author":"M Yuan","year":"2006","unstructured":"Yuan, M., Lin, Y.: Model selection and estimation in regression with grouped variables. J. Roy. Stat. Soc. B (Stat. Methodol.) 68(1), 49\u201367 (2006)","journal-title":"J. Roy. Stat. Soc. B (Stat. Methodol.)"},{"unstructured":"Dauphin, Y.N., Bengio, Y.: Big neural networks waste capacity. arXiv preprint arXiv:1301.3583 (2013)","key":"50_CR17"},{"unstructured":"Ba, L.J., Caruana, R.: Do deep nets really need to be deep? arXiv preprint arXiv:1312.6184 (2014)","key":"50_CR18"},{"unstructured":"Gupta, S., Agrawal, A., Gopalakrishnan, K., Narayanan, P.: Deep learning with limited numerical precision. In: International Conference on Machine Learning, pp. 1737\u20131746 (2015)","key":"50_CR19"},{"unstructured":"Courbariaux, M., Bengio, Y., David, J.P.: Training deep neural networks with low precision multiplications. arXiv preprint arXiv:1412.7024 (2014)","key":"50_CR20"},{"unstructured":"Williamson, D.: Dynamically scaled fixed point arithmetic. In: IEEE Pacific Rim Conference on Communications, Computers and Signal Processing Conference Proceedings, pp. 315\u2013318 (1991)","key":"50_CR21"},{"unstructured":"Iandola, F.N., Han, S., Moskewicz, M.W., Ashraf, K., Dally, W.J., Keutzer, K.: SqueezeNet: alexnet-level accuracy with 50x fewer parameters and<0.5 mb model size. arXiv preprint arXiv:1602.07360 (2016)","key":"50_CR22"},{"unstructured":"Howard, A.G., et al.: Mobilenets: efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861 (2017)","key":"50_CR23"},{"doi-asserted-by":"crossref","unstructured":"Miikkulainen, R., et al.: Evolving deep neural networks. In: Artificial Intelligence in the Age of Neural Networks and Brain Computing, pp. 293\u2013312 (2017)","key":"50_CR24","DOI":"10.1016\/B978-0-12-815480-9.00015-3"},{"doi-asserted-by":"crossref","unstructured":"Tan, M., Chen, B., Pang, R., Vasudevan, V., Le, Q.V.: MnasNet: platform-aware neural architecture search for mobile. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2820\u20132828 (2019)","key":"50_CR25","DOI":"10.1109\/CVPR.2019.00293"},{"doi-asserted-by":"crossref","unstructured":"He, Y., Lin, J., Liu, Z., Wang, H., Li, L.J., Han, S.: AMC: autoML for model compression and acceleration on mobile devices. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 784\u2013800 (2018)","key":"50_CR26","DOI":"10.1007\/978-3-030-01234-2_48"},{"unstructured":"Han, S., Mao, H., Dally, W. J.: Deep compression: compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv preprint arXiv:1510.00149 (2016)","key":"50_CR27"},{"unstructured":"Choi, Y., El-Khamy, M., Lee, J.: Towards the limit of network quantization. arXiv preprint arXiv:1612.01543 (2017)","key":"50_CR28"},{"unstructured":"Han, S., Pool, J., Tran, J., Dally, W.J.: Learning both weights and connections for efficient neural network. arXiv preprint arXiv:1506.02626 (2015)","key":"50_CR29"},{"issue":"3","key":"50_CR30","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3005348","volume":"13","author":"S Anwar","year":"2017","unstructured":"Anwar, S., Hwang, K., Sung, W.: Structured pruning of deep convolutional neural networks. ACM J. Emerg. Technol. Comput. Syst. 13(3), 1\u201318 (2017)","journal-title":"ACM J. Emerg. Technol. Comput. Syst."},{"unstructured":"Molchanov, P., Tyree, S., Karras, T., Aila, T., Kautz, J.: Pruning convolutional neural networks for resource efficient transfer learning. arXiv preprint arXiv:1611.06440, 3 (2017)","key":"50_CR31"},{"doi-asserted-by":"crossref","unstructured":"He, Y., Zhang, X., Sun, J.: Channel pruning for accelerating very deep neural networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1389\u20131397 (2017)","key":"50_CR32","DOI":"10.1109\/ICCV.2017.155"},{"doi-asserted-by":"crossref","unstructured":"Liu, Z., Li, J., Shen, Z., Huang, G., Yan, S., Zhang, C.: Learning efficient convolutional networks through network slimming. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2736\u20132744 (2017)","key":"50_CR33","DOI":"10.1109\/ICCV.2017.298"},{"unstructured":"Zhuang, Z., et al.: Discrimination-aware channel pruning for deep neural networks. arXiv preprint arXiv:1810.11809 (2018)","key":"50_CR34"},{"doi-asserted-by":"crossref","unstructured":"He, Y., Liu, P., Wang, Z., Hu, Z., Yang, Y.: Filter pruning via geometric median for deep convolutional neural networks acceleration. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4340\u20134349 (2019)","key":"50_CR35","DOI":"10.1109\/CVPR.2019.00447"},{"unstructured":"Liu, B., Wang, M., Foroosh, H., Tappen, M., Pensky, M.: Sparse convolutional neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 806\u2013814 (2015)","key":"50_CR36"},{"issue":"1","key":"50_CR37","doi-asserted-by":"publisher","first-page":"49","DOI":"10.1111\/j.1467-9868.2005.00532.x","volume":"68","author":"M Yuan","year":"2006","unstructured":"Yuan, M., Lin, Y.: Model selection and estimation in regression with grouped variables. J. Roy. Stat. Soc. B (Stat. Methodol. 68(1), 49\u201367 (2006)","journal-title":"J. Roy. Stat. Soc. B (Stat. Methodol."},{"unstructured":"Liu, J., Ye, J.: Efficient l1\/lq norm regularization. arXiv preprint arXiv:1009.4766 (2010)","key":"50_CR38"},{"issue":"11","key":"50_CR39","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y LeCun","year":"1998","unstructured":"LeCun, Y., Bottou, L., Bengio, Y., Haffner, P.: Gradient-based learning applied to document recognition. Proc. IEEE 86(11), 2278\u20132324 (1998)","journal-title":"Proc. IEEE"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition. ICPR International Workshops and Challenges"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-68763-2_50","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,2,20]],"date-time":"2021-02-20T17:41:53Z","timestamp":1613842913000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-030-68763-2_50"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"ISBN":["9783030687625","9783030687632"],"references-count":39,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-68763-2_50","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2021]]},"assertion":[{"value":"21 February 2021","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICPR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Pattern Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2021","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"10 January 2021","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 January 2021","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ICPR2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.icpr2020.it\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}