{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,9]],"date-time":"2026-02-09T23:49:07Z","timestamp":1770680947414,"version":"3.49.0"},"reference-count":69,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2023,3,1]],"date-time":"2023-03-01T00:00:00Z","timestamp":1677628800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,3,1]],"date-time":"2023-03-01T00:00:00Z","timestamp":1677628800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,3,1]],"date-time":"2023-03-01T00:00:00Z","timestamp":1677628800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"French National Association for Research and Technology","award":["C20\/1396"],"award-info":[{"award-number":["C20\/1396"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2023,3,1]]},"DOI":"10.1109\/tpami.2022.3179616","type":"journal-article","created":{"date-parts":[[2022,6,2]],"date-time":"2022-06-02T19:42:09Z","timestamp":1654198929000},"page":"3664-3676","source":"Crossref","is-referenced-by-count":17,"title":["RED++ : Data-Free Pruning of Deep Neural Networks via Input Splitting and Output Merging"],"prefix":"10.1109","volume":"45","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4318-612X","authenticated-orcid":false,"given":"Edouard","family":"Yvinec","sequence":"first","affiliation":[{"name":"Sorbonne Universit&#x00E9;, CNRS, Institut des Syst&#x00E8;mes Intelligents et de Robotique, ISIR, Paris, France"}]},{"given":"Arnaud","family":"Dapogny","sequence":"additional","affiliation":[{"name":"Datakalab, Paris, France"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0627-5844","authenticated-orcid":false,"given":"Matthieu","family":"Cord","sequence":"additional","affiliation":[{"name":"Sorbonne Universit&#x00E9;, CNRS, Institut des Syst&#x00E8;mes Intelligents et de Robotique, ISIR, Paris, France"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7802-3673","authenticated-orcid":false,"given":"Kevin","family":"Bailly","sequence":"additional","affiliation":[{"name":"Sorbonne Universit&#x00E9;, CNRS, Institut des Syst&#x00E8;mes Intelligents et de Robotique, ISIR, Paris, France"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.5802\/jtnb.432"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1137\/S0040585X97975447"},{"key":"ref3","first-page":"1","article-title":"Deep rewiring: Training very sparse deep networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bellec"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-68763-2_50"},{"key":"ref5","article-title":"Structured convolutions for efficient neural network design","author":"Bhalgat","year":"2020"},{"key":"ref6","article-title":"What is the state of neural network pruning?","author":"Blalock","year":"2020"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2699184"},{"key":"ref8","article-title":"Escoin: Efficient sparse convolutional neural network inference on GPUs","author":"Chen","year":"2018"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/JSSC.2016.2616357"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/msp.2017.2765695"},{"key":"ref11","article-title":"cudnn: Efficient primitives for deep learning","author":"Chetlur","year":"2014"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00508"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.l803.03635"},{"key":"ref15","article-title":"The state of sparsity in deep neural networks","author":"Gale","year":"2019"},{"key":"ref16","first-page":"783","article-title":"OpenVINO deep learning workbench: Comprehensive analysis and tuning of neural networks inference","volume-title":"Proc. IEEE Int. Conf. Comput. Vis. Workshops","author":"Gorbachev"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01204"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/SC41405.2020.00020"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00158"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/309"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/icpr56361.2022.9956237"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.109886"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.5555\/3045118.3045167"},{"key":"ref26","first-page":"585","article-title":"Neuron merging: Compensating for pruned neurons","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Kim"},{"key":"ref27","article-title":"Quantizing deep convolutional networks for efficient inference: A whitepaper","author":"Krishnamoorthi","year":"2018"},{"key":"ref28","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"key":"ref29","first-page":"5533","article-title":"Inducing and exploiting activation sparsity for fast inference on deep neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kurtz"},{"key":"ref30","first-page":"1","article-title":"A signal propagation perspective for pruning neural networks at initialization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lee"},{"key":"ref31","first-page":"1","article-title":"Pruning filters for efficient convnets","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Li"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1049\/cje.2016.01.023"},{"key":"ref33","first-page":"1","article-title":"Provable filter pruning for efficient neural networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Liebenwein"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00160"},{"key":"ref35","first-page":"1","article-title":"Dynamic model pruning with feedback","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lin"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2021.3066410"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/AICAS51828.2021.9458578"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1186\/s13640-017-0167-4"},{"key":"ref39","first-page":"1","article-title":"Rethinking the value of network pruning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Liu"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.1982.1056489"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.541"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5954"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00369"},{"key":"ref44","first-page":"4486","article-title":"Same, same but different: Recovering neural network quantization error through weight factorization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Meller"},{"key":"ref45","first-page":"17629","article-title":"Pruning filter in filter","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Meng"},{"key":"ref46","article-title":"Accelerating sparse deep neural networks","author":"Mishra","year":"2021"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00141"},{"key":"ref48","first-page":"1","article-title":"Lookahead: A far-sighted alternative of magnitude-based pruning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Park"},{"key":"ref49","first-page":"1","article-title":"Comparing rewinding and fine-tuning in neural network pruning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Renda"},{"key":"ref50","article-title":"Big Data ethics","volume":"49","author":"Richards","year":"2014","journal-title":"Wake Forest L. Rev."},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.5244\/C.29.31"},{"key":"ref53","first-page":"1","article-title":"And the bit goes down: Revisiting the quantization of neural networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Stock"},{"key":"ref54","first-page":"6105","article-title":"EfficientNet: Rethinking model scaling for convolutional neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Tan"},{"key":"ref55","first-page":"6377","article-title":"Pruning neural networks without any data by iteratively conserving synaptic flow","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Tanaka"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00498"},{"key":"ref57","first-page":"10936","article-title":"Scop: Scientific control for reliable neural network pruning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Tang"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00010"},{"key":"ref59","first-page":"10347","article-title":"Training data-efficient image transformers & distillation through attention","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Touvron"},{"key":"ref60","article-title":"Efficient inference with tensorrt","volume":"1","author":"Vanholder","year":"2016","journal-title":"Proc. GPU Technol. Conf."},{"key":"ref61","first-page":"1058","article-title":"Regularization of neural networks using dropconnect","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wan"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00881"},{"key":"ref63","article-title":"Accelerate your cnn from three dimensions: A comprehensive pruning framework","author":"Wang","year":"2020"},{"key":"ref64","first-page":"10820","article-title":"Good subnetworks provably exist: Pruning via greedy forward selection","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ye"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00874"},{"key":"ref66","first-page":"20863","article-title":"RED : Looking for redundancies for data-free structured compression of deep neural networks","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Yvinec"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.5244\/C.30.87"},{"key":"ref68","first-page":"7543","article-title":"Improving neural network quantization without retraining using outlier channel splitting","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhao"},{"key":"ref69","first-page":"9865","article-title":"Neuron-level structured pruning using polarization regularizer","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Zhuang"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/34\/10036240\/09786782.pdf?arnumber=9786782","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,18]],"date-time":"2024-07-18T06:10:05Z","timestamp":1721283005000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9786782\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,3,1]]},"references-count":69,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2022.3179616","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,3,1]]}}}