{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T15:08:43Z","timestamp":1730214523474,"version":"3.28.0"},"reference-count":36,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,11,29]],"date-time":"2020-11-29T00:00:00Z","timestamp":1606608000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,11,29]],"date-time":"2020-11-29T00:00:00Z","timestamp":1606608000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,11,29]],"date-time":"2020-11-29T00:00:00Z","timestamp":1606608000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,11,29]]},"DOI":"10.1109\/dicta51227.2020.9363347","type":"proceedings-article","created":{"date-parts":[[2021,3,1]],"date-time":"2021-03-01T18:19:27Z","timestamp":1614622767000},"page":"1-6","source":"Crossref","is-referenced-by-count":3,"title":["Max-Variance Convolutional Neural Network Model Compression"],"prefix":"10.1109","author":[{"given":"Tanya","family":"Boone-Sifuentes","sequence":"first","affiliation":[]},{"given":"Antonio","family":"Robles-Kelly","sequence":"additional","affiliation":[]},{"given":"Asef","family":"Nazari","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","article-title":"A novel channel pruning method for deep neural network compression","author":"hu","year":"2018","journal-title":"CoRR"},{"key":"ref32","first-page":"6105","article-title":"Efficientnet: Rethinking model scaling for convolutional neural networks","volume":"97","author":"tan","year":"0","journal-title":"International Conference on Machine Learning ser Proceedings of Machine Learning Research"},{"key":"ref31","article-title":"Fractional max-pooling","author":"graham","year":"2014","journal-title":"CoRR"},{"key":"ref30","article-title":"Fast and accurate deep network learning by exponential linear units (elus)","author":"clevert","year":"0","journal-title":"International Conference on Learning Representations"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.5244\/C.29.41"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref34","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"0","journal-title":"International Conference on Learning Representations"},{"key":"ref10","first-page":"177","article-title":"Comparing biases for minimal network construction with back-propagation","author":"hanson","year":"1989","journal-title":"Advances in neural information processing systems"},{"key":"ref11","first-page":"164","article-title":"Second order derivatives for network pruning: Optimal brain surgeon","author":"hassibi","year":"1993","journal-title":"Advances in neural information processing systems"},{"key":"ref12","article-title":"Gradient descent provably optimizes over-parameterized neural networks","author":"du","year":"0","journal-title":"International Conference on Learning Representations"},{"key":"ref13","first-page":"2148","article-title":"Pre-dicting parameters in deep learning","author":"denil","year":"2013","journal-title":"Advances in neural information processing systems"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.5244\/C.29.31"},{"key":"ref15","article-title":"Deep compression: Compressing deep neural network with pruning, trained quantization and huffman coding","author":"han","year":"0","journal-title":"International Conference on Learning Representations"},{"key":"ref16","first-page":"1135","article-title":"Learning both weights and connections for efficient neural network","author":"han","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref17","first-page":"598","article-title":"Optimal brain damage","author":"lecun","year":"1990","journal-title":"Advances in neural information processing systems"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_40"},{"key":"ref19","first-page":"2285","article-title":"Compressing neural networks with the hashing trick","author":"chen","year":"0","journal-title":"International Conference Machine Learning"},{"key":"ref28","article-title":"N2N learning: Network to network compression via policy gradient reinforcement learning","author":"ashok","year":"0","journal-title":"International Conference on Learning Representationsz"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.220"},{"key":"ref27","article-title":"Pruning at a glance: Global neural pruning for model compression","author":"salama","year":"2019","journal-title":"CoRR"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10593-2_13"},{"key":"ref6","first-page":"2181","article-title":"Runtime neural pruning","author":"lin","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref29","article-title":"Data-driven compression of convolutional neural networks","author":"pahwa","year":"2019","journal-title":"CoRR"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2765695"},{"key":"ref8","article-title":"Distilling the knowledge in a neural network","author":"hinton","year":"0","journal-title":"Advances in Neural Information Processing Systems Deep Learning Workshop"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2873305"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638963"},{"key":"ref9","first-page":"5977","article-title":"Deep roots: Improving cnn efficiency with hierarchical filter groups","author":"ioannou","year":"2016","journal-title":"Computer Vision and Pattern Recognition"},{"key":"ref1","article-title":"Imagenet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Advances in neural information processing systems"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3299874.3319492"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/480"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1561\/2200000016"},{"key":"ref24","first-page":"249","article-title":"Understanding the difficulty of training deep feedforward neural networks","volume":"9","author":"glorot","year":"0","journal-title":"International Conference on Artificial Intelligence and Statistics ser JMLR Proceedings"},{"key":"ref23","article-title":"Learning in modular systems","author":"bradley","year":"2009","journal-title":"Carnegie Mellon University"},{"key":"ref26","article-title":"Pruning convolutional neural networks for resource efficient transfer learning","author":"molchanov","year":"2016","journal-title":"CoRR"},{"key":"ref25","article-title":"Pruning filters for efficient convnets","author":"li","year":"2016","journal-title":"CoRR"}],"event":{"name":"2020 Digital Image Computing: Techniques and Applications (DICTA)","start":{"date-parts":[[2020,11,29]]},"location":"Melbourne, Australia","end":{"date-parts":[[2020,12,2]]}},"container-title":["2020 Digital Image Computing: Techniques and Applications (DICTA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9363348\/9363346\/09363347.pdf?arnumber=9363347","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,27]],"date-time":"2022-06-27T11:54:45Z","timestamp":1656330885000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9363347\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,11,29]]},"references-count":36,"URL":"https:\/\/doi.org\/10.1109\/dicta51227.2020.9363347","relation":{},"subject":[],"published":{"date-parts":[[2020,11,29]]}}}