{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T04:31:16Z","timestamp":1750221076833,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":29,"publisher":"ACM","license":[{"start":{"date-parts":[[2018,11,5]],"date-time":"2018-11-05T00:00:00Z","timestamp":1541376000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2018,11,5]]},"DOI":"10.1145\/3240765.3240799","type":"proceedings-article","created":{"date-parts":[[2018,11,6]],"date-time":"2018-11-06T13:36:57Z","timestamp":1541511417000},"page":"1-8","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":10,"title":["DIMA"],"prefix":"10.1145","author":[{"given":"Shaahin","family":"Angizi","sequence":"first","affiliation":[{"name":"University of Central Florida"}]},{"given":"Zhezhi","family":"He","sequence":"additional","affiliation":[{"name":"University of Central Florida"}]},{"given":"Deliang","family":"Fan","sequence":"additional","affiliation":[{"name":"University of Central Florida"}]}],"member":"320","published-online":{"date-parts":[[2018,11,5]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1145\/2744769.2744788"},{"key":"e_1_3_2_1_2_1","first-page":"236","author":"Andri R.","year":"2016","unstructured":"R. Andri et al., \"Yodann: An ultra-low power convolutional neural network accelerator based on binary weights,\" in ISVLSI. IEEE, 2016, pp. 236--241.","journal-title":"ISVLSI. IEEE"},{"key":"e_1_3_2_1_3_1","volume-title":"Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding,\" in ICLR'16","author":"Han S.","year":"2015","unstructured":"S. Han et al., \"Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding,\" in ICLR'16, 2015."},{"key":"e_1_3_2_1_4_1","volume-title":"Dorefa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients,\" arXiv preprint:1606.06160","author":"Zhou S.","year":"2016","unstructured":"S. Zhou et al., \"Dorefa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients,\" arXiv preprint:1606.06160, 2016."},{"key":"e_1_3_2_1_5_1","first-page":"525","volume-title":"Xnor-net: Imagenet classification using binary convolutional neural networks,\" in European Conference on Computer Vision","author":"Rastegari M.","year":"2016","unstructured":"M. Rastegari et al., \"Xnor-net: Imagenet classification using binary convolutional neural networks,\" in European Conference on Computer Vision. Springer, 2016, pp. 525--542."},{"key":"e_1_3_2_1_6_1","volume-title":"Convolutional neural networks with low-rank regularization,\" arXiv preprint arXiv:1511.06067","author":"Tai C.","year":"2015","unstructured":"C. Tai et al., \"Convolutional neural networks with low-rank regularization,\" arXiv preprint arXiv:1511.06067, 2015."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA.2016.13"},{"key":"e_1_3_2_1_8_1","first-page":"288","volume-title":"ACM","author":"Li S.","year":"2017","unstructured":"S. Li et al., \"Drisa: A dram-based reconfigurable in-situ accelerator,\" in Micro. ACM, 2017, pp. 288--301."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.5555\/3201607.3201631"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1145\/2897937.2898064"},{"key":"e_1_3_2_1_11_1","first-page":"481","volume-title":"2017 IEEE International Symposium on. IEEE","author":"Aga S.","year":"2017","unstructured":"S. Aga et al., \"Compute caches,\" in High Performance Computer Architecture (HPCA), 2017 IEEE International Symposium on. IEEE, 2017, pp. 481--492."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/1555815.1555758"},{"key":"e_1_3_2_1_13_1","volume-title":"Spin-transfer torque devices for logic and memory: Prospects and perspectives,\" IEEE TCAD","author":"Fong X.","year":"2016","unstructured":"X. Fong et al., \"Spin-transfer torque devices for logic and memory: Prospects and perspectives,\" IEEE TCAD, vol. 35, 2016."},{"key":"e_1_3_2_1_14_1","volume-title":"IEEE","author":"Chung S.-W.","year":"2016","unstructured":"S.-W. Chung et al., \"4gbit density stt-mram using perpendicular mtj realized with compact cell structure,\" in IEDM. IEEE, 2016."},{"key":"e_1_3_2_1_16_1","volume-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications,\" arXiv preprint:1704.04861","author":"Howard A. G.","year":"2017","unstructured":"A. G. Howard et al., \"Mobilenets: Efficient convolutional neural networks for mobile vision applications,\" arXiv preprint:1704.04861, 2017."},{"key":"e_1_3_2_1_17_1","volume-title":"Deep learning with depthwise separable convolutions,\" arXiv preprint:1610.02357","author":"Chollet F.","year":"2016","unstructured":"F. Chollet, \"Xception: Deep learning with depthwise separable convolutions,\" arXiv preprint:1610.02357, 2016."},{"key":"e_1_3_2_1_18_1","volume-title":"Binarized neural networks: Training deep neural networks with weights and activations constrained to +1 or-1,\" arXiv:1602.02830","author":"Matthieu C.","year":"2016","unstructured":"C. Matthieu et al., \"Binarized neural networks: Training deep neural networks with weights and activations constrained to +1 or-1,\" arXiv:1602.02830, 2016."},{"key":"e_1_3_2_1_19_1","first-page":"770","author":"He K.","year":"2016","unstructured":"K. He et al., \"Deep residual learning for image recognition,\" in Proceedings of the IEEE CVPR, 2016, pp. 770--778.","journal-title":"Proceedings of the IEEE CVPR"},{"key":"e_1_3_2_1_20_1","volume-title":"Estimating or propagating gradients through stochastic neurons for conditional computation,\" arXiv:1308.3432","author":"Bengio Y.","year":"2013","unstructured":"Y. Bengio et al., \"Estimating or propagating gradients through stochastic neurons for conditional computation,\" arXiv:1308.3432, 2013."},{"key":"e_1_3_2_1_21_1","volume-title":"Spin transfer torque devices utilizing the giant spin hall effect of tungsten,\" Applied Physics Letters","author":"Pai C.-F.","year":"2012","unstructured":"C.-F. Pai et al., \"Spin transfer torque devices utilizing the giant spin hall effect of tungsten,\" Applied Physics Letters, 2012."},{"key":"e_1_3_2_1_22_1","first-page":"45","author":"Angizi S.","year":"2017","unstructured":"S. Angizi et al., \"Rimpa: A new reconfigurable dual-mode in-memory processing architecture with spin hall effect-driven domain wall motion device,\" in ISVLSI. IEEE, 2017, pp. 45--50.","journal-title":"ISVLSI. IEEE"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.5555\/2969442.2969588"},{"key":"e_1_3_2_1_24_1","volume-title":"Reading digits in natural images with unsupervised feature learning,\" in NIPS workshop","author":"Netzer Y.","year":"2011","unstructured":"Y. Netzer et al., \"Reading digits in natural images with unsupervised feature learning,\" in NIPS workshop, vol. 2011, 2011, p. 5."},{"key":"e_1_3_2_1_25_1","first-page":"97","author":"He Z.","year":"2017","unstructured":"Z. He et al., \"High performance and energy-efficient in-memory computing architecture based on sot-mram,\" in NANOARCH. IEEE, 2017, pp. 97--102.","journal-title":"NANOARCH. IEEE"},{"key":"e_1_3_2_1_26_1","first-page":"51","author":"Fong X.","year":"2011","unstructured":"X. Fong, S. K. Gupta et al., \"Knack: A hybrid spin-charge mixed-mode simulator for evaluating different genres of spin-transfer torque mram bit-cells,\" in SISPAD. IEEE, 2011, pp. 51--54.","journal-title":"SISPAD. IEEE"},{"key":"e_1_3_2_1_27_1","unstructured":"(2011) Ncsu eda freepdk45. {Online}. Available: http:\/\/www.eda.ncsu.edu\/wiki\/FreePDK45:Contents"},{"key":"e_1_3_2_1_28_1","first-page":"15","volume-title":"Nvsim: A circuit-level performance, energy, and area model for emerging non-volatile memory,\" in Emerging Memory Technologies","author":"Dong X.","year":"2014","unstructured":"X. Dong et al., \"Nvsim: A circuit-level performance, energy, and area model for emerging non-volatile memory,\" in Emerging Memory Technologies. Springer, 2014, pp. 15--50."},{"key":"e_1_3_2_1_29_1","unstructured":"S. D. C. P. V.. Synopsys Inc."},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.5555\/2492708.2492719"}],"event":{"name":"ICCAD '18: IEEE\/ACM INTERNATIONAL CONFERENCE ON COMPUTER-AIDED DESIGN","sponsor":["IEEE-EDS Electronic Devices Society","IEEE CAS","IEEE CEDA"],"location":"San Diego California","acronym":"ICCAD '18"},"container-title":["Proceedings of the International Conference on Computer-Aided Design"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3240765.3240799","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3240765.3240799","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T00:57:33Z","timestamp":1750208253000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3240765.3240799"}},"subtitle":["a &lt;u&gt;d&lt;\/u&gt;epthwise CNN &lt;u&gt;i&lt;\/u&gt;n-&lt;u&gt;m&lt;\/u&gt;emory &lt;u&gt;a&lt;\/u&gt;ccelerator"],"short-title":[],"issued":{"date-parts":[[2018,11,5]]},"references-count":29,"alternative-id":["10.1145\/3240765.3240799","10.1145\/3240765"],"URL":"https:\/\/doi.org\/10.1145\/3240765.3240799","relation":{},"subject":[],"published":{"date-parts":[[2018,11,5]]},"assertion":[{"value":"2018-11-05","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}