{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,7]],"date-time":"2026-01-07T18:44:43Z","timestamp":1767811483463,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":20,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,1,16]],"date-time":"2023-01-16T00:00:00Z","timestamp":1673827200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"RIE2020 Industry Alignment Fund ? Industry Collaboration Projects (IAF-ICP) Funding Initiative","award":["I1801E0028"],"award-info":[{"award-number":["I1801E0028"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,1,16]]},"DOI":"10.1145\/3566097.3567856","type":"proceedings-article","created":{"date-parts":[[2023,1,31]],"date-time":"2023-01-31T18:40:49Z","timestamp":1675190449000},"page":"234-239","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["Crossbar-Aligned &amp; Integer-Only Neural Network Compression for Efficient in-Memory Acceleration"],"prefix":"10.1145","author":[{"given":"Shuo","family":"Huai","sequence":"first","affiliation":[{"name":"Nanyang Technological University, Singapore"}]},{"given":"Di","family":"Liu","sequence":"additional","affiliation":[{"name":"Nanyang Technological University, Singapore"}]},{"given":"Xiangzhong","family":"Luo","sequence":"additional","affiliation":[{"name":"Nanyang Technological University, Singapore"}]},{"given":"Hui","family":"Chen","sequence":"additional","affiliation":[{"name":"Nanyang Technological University, Singapore"}]},{"given":"Weichen","family":"Liu","sequence":"additional","affiliation":[{"name":"Nanyang Technological University, Singapore"}]},{"given":"Ravi","family":"Subramaniam","sequence":"additional","affiliation":[{"name":"HP Inc."}]}],"member":"320","published-online":{"date-parts":[[2023,1,31]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"crossref","unstructured":"Pai-Yu Chen et al. 2018. NeuroSim: A circuit-level macro model for benchmarking neuro-inspired architectures in online learning. TCAD (2018).","DOI":"10.1109\/TCAD.2018.2789723"},{"key":"e_1_3_2_1_2_1","unstructured":"Chaoqun Chu et al. 2020. PIM-Prune: fine-grain DCNN pruning for crossbar-based process-in-memory architecture. In 2020 57th DAC. IEEE."},{"key":"e_1_3_2_1_3_1","volume-title":"International Conference on Machine Learning. PMLR, 1675--1685","author":"Simon","unstructured":"Simon Du et al. 2019. Gradient descent finds global minima of deep neural networks. In International Conference on Machine Learning. PMLR, 1675--1685."},{"key":"e_1_3_2_1_4_1","volume-title":"FPCAS: In-memory floating point computations for autonomous systems","author":"Ensan Sina Sayyah","year":"2019","unstructured":"Sina Sayyah Ensan et al. 2019. FPCAS: In-memory floating point computations for autonomous systems. In IJCNN. IEEE, 1--8."},{"key":"e_1_3_2_1_5_1","volume-title":"Why Momentum Really Works. Distill","author":"Goh Gabriel","year":"2017","unstructured":"Gabriel Goh. 2017. Why Momentum Really Works. Distill (2017)."},{"key":"e_1_3_2_1_6_1","unstructured":"Kaiming He et al. 2016. Deep Residual Learning for Image Recognition. In CVPR."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"crossref","unstructured":"Shuo Huai et al. 2021. ZeroBN: Learning Compact Neural Networks For Latency-Critical Edge Systems. In 2021 58th DAC. IEEE.","DOI":"10.1109\/DAC18074.2021.9586309"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"crossref","unstructured":"Benoit Jacob et al. 2018. Quantization and training of neural networks for efficient integer-arithmetic-only inference. In CVPR.","DOI":"10.1109\/CVPR.2018.00286"},{"key":"e_1_3_2_1_9_1","series-title":"Lecture Notes on the Status of IEEE 94720-1776","volume-title":"IEEE standard 754 for binary floating-point arithmetic","author":"Kahan William","year":"1996","unstructured":"William Kahan. 1996. IEEE standard 754 for binary floating-point arithmetic. Lecture Notes on the Status of IEEE 94720-1776 (1996), 11."},{"key":"e_1_3_2_1_10_1","unstructured":"Alex Krizhevsky et al. 2009. Learning multiple layers of features from tiny images. Citeseer."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"crossref","unstructured":"Ling Liang et al. 2018. Crossbar-aware neural network pruning. IEEE Access.","DOI":"10.1109\/ACCESS.2018.2874823"},{"key":"e_1_3_2_1_12_1","unstructured":"Chenchen Liu et al. 2020. Enabling efficient ReRAM-based neural network computing via crossbar structure adaptive optimization. In ISLPED."},{"key":"e_1_3_2_1_13_1","unstructured":"Zhuang Liu et al. 2017. Learning Efficient Convolutional Networks Through Network Slimming. In ICCV."},{"key":"e_1_3_2_1_14_1","unstructured":"Huizi Mao et al. 2017. Exploring the granularity of sparsity in convolutional neural networks. In CVPR. 13--20."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"crossref","unstructured":"Jian Meng et al. 2021. Structured Pruning of RRAM Crossbars for Efficient In-Memory Computing Acceleration of Deep Neural Networks. Trans. Circuits Syst. II Express Briefs (2021).","DOI":"10.1109\/TCSII.2021.3069011"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"crossref","unstructured":"Naveen Muralimanohar et al. 2009. CACTI 6.0: A tool to model large caches. HP laboratories 27 (2009) 28.","DOI":"10.1109\/MM.2008.2"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"crossref","unstructured":"Olga Russakovsky et al. 2015. ImageNet Large Scale Visual Recognition Challenge. IJCV (2015).","DOI":"10.1007\/s11263-015-0816-y"},{"key":"e_1_3_2_1_18_1","article-title":"ISAAC: A convolutional neural network accelerator with in-situ analog arithmetic in crossbars. In ACM SIGARCH","author":"Ali Shafiee","year":"2016","unstructured":"Ali Shafiee et al. 2016. ISAAC: A convolutional neural network accelerator with in-situ analog arithmetic in crossbars. In ACM SIGARCH Comput. Archit. News.","journal-title":"Comput. Archit. News."},{"key":"e_1_3_2_1_19_1","unstructured":"Karen Simonyan et al. 2014. Very Deep Convolutional Networks for Large-Scale Image Recognition. In CVPR."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"crossref","unstructured":"Shimeng Yu et al. 2016. Emerging memory technologies: Recent trends and prospects. IEEE Solid-State Circuits Magazine 8 (2016).","DOI":"10.1109\/MSSC.2016.2638049"}],"event":{"name":"ASPDAC '23: 28th Asia and South Pacific Design Automation Conference","location":"Tokyo Japan","acronym":"ASPDAC '23","sponsor":["SIGDA ACM Special Interest Group on Design Automation","IEEE CEDA","IEICE","IEEE CAS","IPSJ"]},"container-title":["Proceedings of the 28th Asia and South Pacific Design Automation Conference"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3566097.3567856","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3566097.3567856","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,7]],"date-time":"2026-01-07T17:33:42Z","timestamp":1767807222000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3566097.3567856"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,1,16]]},"references-count":20,"alternative-id":["10.1145\/3566097.3567856","10.1145\/3566097"],"URL":"https:\/\/doi.org\/10.1145\/3566097.3567856","relation":{},"subject":[],"published":{"date-parts":[[2023,1,16]]},"assertion":[{"value":"2023-01-31","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}