{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T01:11:56Z","timestamp":1775092316947,"version":"3.50.1"},"reference-count":27,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2018,12,1]],"date-time":"2018-12-01T00:00:00Z","timestamp":1543622400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"}],"funder":[{"DOI":"10.13039\/100001395","name":"Wisconsin Alumni Research Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100001395","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE J. Emerg. Sel. Topics Circuits Syst."],"published-print":{"date-parts":[[2018,12]]},"DOI":"10.1109\/jetcas.2018.2833383","type":"journal-article","created":{"date-parts":[[2018,5,4]],"date-time":"2018-05-04T18:55:07Z","timestamp":1525460107000},"page":"836-848","source":"Crossref","is-referenced-by-count":19,"title":["Exploring Energy and Accuracy Tradeoff in Structure Simplification of Trained Deep Neural Networks"],"prefix":"10.1109","volume":"8","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4094-3017","authenticated-orcid":false,"given":"Boyu","family":"Zhang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5213-2556","authenticated-orcid":false,"given":"Azadeh","family":"Davoodi","sequence":"additional","affiliation":[]},{"given":"Yu Hen","family":"Hu","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"1474","article-title":"Understanding the impact of precision quantization on the accuracy and energy of neural networks","author":"hashemi","year":"2016","journal-title":"Proc Eur Conf Exhib Design Autom Test"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/0893-6080(89)90020-8"},{"key":"ref13","author":"horwitz","year":"0","journal-title":"Energy table for 45nm process"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1991.150536"},{"key":"ref15","author":"jaderberg","year":"2014","journal-title":"Speeding up convolutional neural networks with low rank expansions"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/2647868.2654889"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3079856.3080246"},{"key":"ref18","article-title":"Learning multiple layers of features from tiny images","author":"krizhevsky","year":"2009"},{"key":"ref19","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref4","first-page":"2148","article-title":"Predicting parameters in deep learning","author":"denil","year":"2013","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref27","author":"yang","year":"2016","journal-title":"Designing Energy-Efficient Convolutional Neural Networks using Energy-Aware Pruning"},{"key":"ref3","first-page":"598","article-title":"Optimal brain damage","volume":"2","author":"le cun","year":"1990","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ASPDAC.2017.7858303"},{"key":"ref5","first-page":"1269","article-title":"Exploiting linear structure within convolutional networks for efficient evaluation","author":"denton","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref8","author":"golub","year":"1989","journal-title":"Matrix Computations"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/BF01436075"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3007787.3001177"},{"key":"ref9","first-page":"1135","article-title":"Learning both weights and connections for efficient neural network","author":"han","year":"2015","journal-title":"Proc Annu Conf Neural Inf Process Syst"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1137\/1.9781611973068.105"},{"key":"ref20","first-page":"950","article-title":"A simple weight decay can improve generalization","volume":"4","author":"krogh","year":"1995","journal-title":"Advances in neural information processing systems"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.1991.155331"},{"key":"ref24","first-page":"17","article-title":"Variation-tolerant architectures for convolutional neural networks in the near threshold voltage regime","author":"lin","year":"2016","journal-title":"Proc IEEE Int Workshop Signal Process Syst"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ASPDAC.2017.7858306"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/IEMBS.1989.96576"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1992.4.4.473"}],"container-title":["IEEE Journal on Emerging and Selected Topics in Circuits and Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5503868\/8571018\/08354792.pdf?arnumber=8354792","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,27]],"date-time":"2022-01-27T02:17:47Z","timestamp":1643249867000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8354792\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,12]]},"references-count":27,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/jetcas.2018.2833383","relation":{},"ISSN":["2156-3357","2156-3365"],"issn-type":[{"value":"2156-3357","type":"print"},{"value":"2156-3365","type":"electronic"}],"subject":[],"published":{"date-parts":[[2018,12]]}}}