{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,21]],"date-time":"2026-02-21T18:17:36Z","timestamp":1771697856202,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":21,"publisher":"ACM","license":[{"start":{"date-parts":[[2017,6,18]],"date-time":"2017-06-18T00:00:00Z","timestamp":1497744000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2017,6,18]]},"DOI":"10.1145\/3061639.3062259","type":"proceedings-article","created":{"date-parts":[[2017,6,13]],"date-time":"2017-06-13T12:18:42Z","timestamp":1497356322000},"page":"1-6","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":49,"title":["Hardware-Software Codesign of Accurate, Multiplier-free Deep Neural Networks"],"prefix":"10.1145","author":[{"given":"Hokchhay","family":"Tann","sequence":"first","affiliation":[{"name":"School of Engineering, Brown University, Providence RI"}]},{"given":"Soheil","family":"Hashemi","sequence":"additional","affiliation":[{"name":"School of Engineering, Brown University, Providence RI"}]},{"given":"R. Iris","family":"Bahar","sequence":"additional","affiliation":[{"name":"School of Engineering, Brown University, Providence RI"}]},{"given":"Sherief","family":"Reda","sequence":"additional","affiliation":[{"name":"School of Engineering, Brown University, Providence RI"}]}],"member":"320","published-online":{"date-parts":[[2017,6,18]]},"reference":[{"key":"e_1_3_2_1_1_1","first-page":"2654","volume-title":"NIPS","author":"Ba J.","year":"2014","unstructured":"J. Ba and R. Caruana . Do deep nets really need to be deep ? In NIPS , pages 2654 -- 2662 , 2014 . J. Ba and R. Caruana. Do deep nets really need to be deep? In NIPS, pages 2654--2662, 2014."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/1150402.1150464"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/2541940.2541967"},{"key":"e_1_3_2_1_4_1","volume-title":"Low precision arithmetic for deep learning. arXiv preprint arXiv:1412.7024","author":"Courbariaux M.","year":"2014","unstructured":"M. Courbariaux , Y. Bengio , and J.-P. David . Low precision arithmetic for deep learning. arXiv preprint arXiv:1412.7024 , 2014 . M. Courbariaux, Y. Bengio, and J.-P. David. Low precision arithmetic for deep learning. arXiv preprint arXiv:1412.7024, 2014."},{"key":"e_1_3_2_1_5_1","volume-title":"Feb.","author":"Courbariaux M.","year":"2016","unstructured":"M. Courbariaux , I. Hubara , D. Soudry , R. El-Yaniv , and Y. Bengio . Binarized Neural Networks: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1. ArXiv preprint arXiv:1602.02830 , Feb. 2016 . M. Courbariaux, I. Hubara, D. Soudry, R. El-Yaniv, and Y. Bengio. Binarized Neural Networks: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1. ArXiv preprint arXiv:1602.02830, Feb. 2016."},{"key":"e_1_3_2_1_6_1","volume-title":"Deep learning with limited numerical precision. CoRR, abs\/1502.02551","author":"Gupta S.","year":"2015","unstructured":"S. Gupta , A. Agrawal , K. Gopalakrishnan , and P. Narayanan . Deep learning with limited numerical precision. CoRR, abs\/1502.02551 , 2015 . S. Gupta, A. Agrawal, K. Gopalakrishnan, and P. Narayanan. Deep learning with limited numerical precision. CoRR, abs\/1502.02551, 2015."},{"key":"e_1_3_2_1_7_1","volume-title":"Ristretto: Hardware-oriented approximation of convolutional neural networks. CoRR, abs\/1605.06402","author":"Gysel P.","year":"2016","unstructured":"P. Gysel . Ristretto: Hardware-oriented approximation of convolutional neural networks. CoRR, abs\/1605.06402 , 2016 . P. Gysel. Ristretto: Hardware-oriented approximation of convolutional neural networks. CoRR, abs\/1605.06402, 2016."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.5555\/3130379.3130725"},{"key":"e_1_3_2_1_9_1","volume-title":"Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531","author":"Hinton G.","year":"2015","unstructured":"G. Hinton , O. Vinyals , and J. Dean . Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531 , 2015 . G. Hinton, O. Vinyals, and J. Dean. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531, 2015."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/SiPS.2014.6986082"},{"key":"e_1_3_2_1_11_1","volume-title":"Caffe: Convolutional architecture for fast feature embedding. arXiv preprint arXiv:1408.5093","author":"Jia Y.","year":"2014","unstructured":"Y. Jia , E. Shelhamer , J. Donahue , S. Karayev , J. Long , R. Girshick , S. Guadarrama , and T. Darrell . Caffe: Convolutional architecture for fast feature embedding. arXiv preprint arXiv:1408.5093 , 2014 . Y. Jia, E. Shelhamer, J. Donahue, S. Karayev, J. Long, R. Girshick, S. Guadarrama, and T. Darrell. Caffe: Convolutional architecture for fast feature embedding. arXiv preprint arXiv:1408.5093, 2014."},{"key":"e_1_3_2_1_12_1","volume-title":"Learning multiple layers of features from tiny images. Technical report","author":"Krizhevsky A.","year":"2009","unstructured":"A. Krizhevsky and G. Hinton . Learning multiple layers of features from tiny images. Technical report , University of Toronto , Apr. 2009 . A. Krizhevsky and G. Hinton. Learning multiple layers of features from tiny images. Technical report, University of Toronto, Apr. 2009."},{"key":"e_1_3_2_1_13_1","volume-title":"Proc. NIPS","author":"Krizhevsky A.","year":"2012","unstructured":"A. Krizhevsky , I. Sutskever , and G. E. Hinton . Imagenet classification with deep convolutional neural networks . In Proc. NIPS , 2012 . A. Krizhevsky, I. Sutskever, and G. E. Hinton. Imagenet classification with deep convolutional neural networks. In Proc. NIPS, 2012."},{"key":"e_1_3_2_1_14_1","volume-title":"Fitnets: Hints for thin deep nets. CoRR, abs\/1412.6550","author":"Romero A.","year":"2014","unstructured":"A. Romero , N. Ballas , S. E. Kahou , A. Chassang , C. Gatta , and Y. Bengio . Fitnets: Hints for thin deep nets. CoRR, abs\/1412.6550 , 2014 . A. Romero, N. Ballas, S. E. Kahou, A. Chassang, C. Gatta, and Y. Bengio. Fitnets: Hints for thin deep nets. CoRR, abs\/1412.6550, 2014."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/ASAP.2009.25"},{"key":"e_1_3_2_1_17_1","volume-title":"Multiplier-less artificial neurons exploiting error resiliency for energy-efficient neural computing. CoRR, abs\/1602.08557","author":"Sarwar S. S.","year":"2016","unstructured":"S. S. Sarwar , S. Venkataramani , A. Raghunathan , and K. Roy . Multiplier-less artificial neurons exploiting error resiliency for energy-efficient neural computing. CoRR, abs\/1602.08557 , 2016 . S. S. Sarwar, S. Venkataramani, A. Raghunathan, and K. Roy. Multiplier-less artificial neurons exploiting error resiliency for energy-efficient neural computing. CoRR, abs\/1602.08557, 2016."},{"key":"e_1_3_2_1_18_1","first-page":"963","volume-title":"Proc. NIPS","author":"Soudry D.","year":"2014","unstructured":"D. Soudry , I. Hubara , and R. Meir . Expectation backpropagation: Parameter-free training of multilayer neural networks with continuous or discrete weights . In Proc. NIPS , pages 963 -- 971 , 2014 . D. Soudry, I. Hubara, and R. Meir. Expectation backpropagation: Parameter-free training of multilayer neural networks with continuous or discrete weights. In Proc. NIPS, pages 963--971, 2014."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/78.229903"},{"key":"e_1_3_2_1_20_1","volume-title":"Runtime configurable deep neural networks for energy-accuracy trade-off. CoRR, abs\/1607.05418","author":"Tann H.","year":"2016","unstructured":"H. Tann , S. Hashemi , R. I. Bahar , and S. Reda . Runtime configurable deep neural networks for energy-accuracy trade-off. CoRR, abs\/1607.05418 , 2016 . H. Tann, S. Hashemi, R. I. Bahar, and S. Reda. Runtime configurable deep neural networks for energy-accuracy trade-off. CoRR, abs\/1607.05418, 2016."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1145\/2684746.2689060"}],"event":{"name":"DAC '17: The 54th Annual Design Automation Conference 2017","location":"Austin TX USA","acronym":"DAC '17","sponsor":["EDAC Electronic Design Automation Consortium","SIGDA ACM Special Interest Group on Design Automation","IEEE-CEDA","SIGBED ACM Special Interest Group on Embedded Systems"]},"container-title":["Proceedings of the 54th Annual Design Automation Conference 2017"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3061639.3062259","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3061639.3062259","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T03:36:35Z","timestamp":1750217795000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3061639.3062259"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017,6,18]]},"references-count":21,"alternative-id":["10.1145\/3061639.3062259","10.1145\/3061639"],"URL":"https:\/\/doi.org\/10.1145\/3061639.3062259","relation":{},"subject":[],"published":{"date-parts":[[2017,6,18]]},"assertion":[{"value":"2017-06-18","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}