{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,7]],"date-time":"2025-08-07T09:20:04Z","timestamp":1754558404961,"version":"3.28.0"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,8,6]],"date-time":"2023-08-06T00:00:00Z","timestamp":1691280000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,8,6]],"date-time":"2023-08-06T00:00:00Z","timestamp":1691280000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,8,6]]},"DOI":"10.1109\/mwscas57524.2023.10405945","type":"proceedings-article","created":{"date-parts":[[2024,1,31]],"date-time":"2024-01-31T18:29:15Z","timestamp":1706725755000},"page":"1020-1024","source":"Crossref","is-referenced-by-count":1,"title":["Structured Pruning in Deep Neural Networks with Trainable Probability Masks"],"prefix":"10.1109","author":[{"given":"F.","family":"Martinini","sequence":"first","affiliation":[{"name":"DEI"}]},{"given":"A.","family":"Enttsel","sequence":"additional","affiliation":[{"name":"DEI"}]},{"given":"A.","family":"Marchioni","sequence":"additional","affiliation":[{"name":"DEI"}]},{"given":"M.","family":"Mangia","sequence":"additional","affiliation":[{"name":"DEI"}]},{"given":"R.","family":"Rovatti","sequence":"additional","affiliation":[{"name":"DEI"}]},{"given":"G.","family":"Setti","sequence":"additional","affiliation":[{"name":"Politecnico di Torino,DET,Italy"}]}],"member":"263","reference":[{"key":"ref1","first-page":"362","article-title":"A convergence theory for deep learning via over-parameterization","volume-title":"36th International Conference on Machine Learning, ICML 2019","volume":"2019","author":"Allen-Zhu","year":"2019"},{"key":"ref2","first-page":"5914","article-title":"Train Large, then compress: Rethinking model size for efficient training and inference of transformers","volume-title":"37th International Conference on Machine Learning, ICML 2020","volume":"PartF16814","author":"Li","year":"2020"},{"key":"ref3","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Advances in Neural Information Processing Systems","volume":"33","author":"Brown","year":"2020"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-05318-5_3"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CRV50864.2020.00037"},{"journal-title":"Sparsegpt: Massive language models can be accurately pruned in one-shot","year":"2023","author":"Frantar","key":"ref6"},{"key":"ref7","article-title":"Pruning filters for efficient convnets","volume-title":"5th International Conference on Learning Representations, ICLR 2017 - Conference Track Proceedings","author":"Li","year":"2017"},{"key":"ref8","first-page":"4858","article-title":"Learning to prune deep neural networks via layer-wise optimal brain surgeon","volume":"2017","author":"Dong","year":"2017","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.1993.713929"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2007.08.026"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevA.39.6600"},{"key":"ref12","article-title":"Deep Compression: Compressing Deep Neural Networks with Pruning, Trained Quantization and Huffman Coding","volume-title":"4th International Conference on Learning Representations, ICLR 2016 - Conference Track Proceedings","author":"Han","year":"2015"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2015.2494536"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-20351-1_61"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/BioCAS49922.2021.9644958"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICME.2018.8486540"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3065386"},{"journal-title":"The State of Sparsity in Deep Neural Networks","year":"2019","author":"Gale","key":"ref19"},{"key":"ref20","article-title":"Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms","volume":"abs\/1708.0","author":"Xiao","year":"2017","journal-title":"CoRR"},{"key":"ref21","article-title":"Very deep convolutional networks for large-scale image recognition","volume-title":"3rd International Conference on Learning Representations, ICLR 2015 - Conference Track Proceedings","author":"Simonyan","year":"2015"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.5555\/2999134.2999257"}],"event":{"name":"2023 IEEE 66th International Midwest Symposium on Circuits and Systems (MWSCAS)","start":{"date-parts":[[2023,8,6]]},"location":"Tempe, AZ, USA","end":{"date-parts":[[2023,8,9]]}},"container-title":["2023 IEEE 66th International Midwest Symposium on Circuits and Systems (MWSCAS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10405424\/10405847\/10405945.pdf?arnumber=10405945","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,2]],"date-time":"2024-02-02T00:11:28Z","timestamp":1706832688000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10405945\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,6]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/mwscas57524.2023.10405945","relation":{},"subject":[],"published":{"date-parts":[[2023,8,6]]}}}