{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,22]],"date-time":"2026-01-22T02:24:16Z","timestamp":1769048656258,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":33,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,4,25]],"date-time":"2022-04-25T00:00:00Z","timestamp":1650844800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"SFI\/12\/RC\/2289_P2"},{"name":"SFI\/16\/RC\/3918"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2022,4,25]]},"DOI":"10.1145\/3477314.3507135","type":"proceedings-article","created":{"date-parts":[[2022,5,7]],"date-time":"2022-05-07T00:37:36Z","timestamp":1651883856000},"page":"246-254","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["ElastiQuant"],"prefix":"10.1145","author":[{"given":"Bharath","family":"Sudharsan","sequence":"first","affiliation":[{"name":"NUI Galway, Ireland"}]},{"given":"John G.","family":"Breslin","sequence":"additional","affiliation":[{"name":"NUI Galway, Ireland"}]},{"given":"Muhammad Intizar","family":"Ali","sequence":"additional","affiliation":[{"name":"Dublin City University, Ireland"}]},{"given":"Peter","family":"Corcoran","sequence":"additional","affiliation":[{"name":"NUI Galway, Ireland"}]},{"given":"Rajiv","family":"Ranjan","sequence":"additional","affiliation":[{"name":"Newcastle University, Newcastle upon Tyne, U.K"}]}],"member":"320","published-online":{"date-parts":[[2022,5,6]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"QSGD: Communication-efficient SGD via gradient quantization and encoding. Neural Information Processing Systems (NIPS).","author":"Alistarh Dan","year":"2017","unstructured":"Dan Alistarh, Demjan Grubic, Jerry Li, Ryota Tomioka, and Milan Vojnovic. 2017. QSGD: Communication-efficient SGD via gradient quantization and encoding. Neural Information Processing Systems (NIPS)."},{"key":"e_1_3_2_1_2_1","volume-title":"International Conference on Machine Learning (ICML).","author":"Bernstein Jeremy","year":"2018","unstructured":"Jeremy Bernstein, Yu-Xiang Wang, Kamyar Azizzadenesheli, and Animashree Anandkumar. 2018. signSGD: Compressed optimisation for non-convex problems. In International Conference on Machine Learning (ICML)."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11728"},{"key":"e_1_3_2_1_4_1","unstructured":"Wei Dai Eric P Xing et al. 2018. Toward understanding the impact of staleness in distributed machine learning. arXiv preprint."},{"key":"e_1_3_2_1_5_1","unstructured":"Christopher M De Sa Ce Zhang Kunle Olukotun and Christopher R\u00e9. 2015. Taming the wild: A unified analysis of hogwild-style algorithms. In Neural Information Processing Systems (NIPS)."},{"key":"e_1_3_2_1_6_1","unstructured":"Fartash Faghri Iman Tabrizian Ilia Markov Dan Alistarh Daniel Roy and Ali Ramezani-Kebrya. 2020. Adaptive Gradient Quantization for Data-Parallel SGD. In Neural information processing systems (NIPS)."},{"key":"e_1_3_2_1_7_1","volume-title":"Marco Canini, and Peter Richt\u00e1rik.","author":"Horvath Samuel","year":"2019","unstructured":"Samuel Horvath, Chen-Yu Ho, Ludovit Horvath, Atal Narayan Sahu, Marco Canini, and Peter Richt\u00e1rik. 2019. Natural compression for distributed deep learning. arXiv preprint."},{"key":"e_1_3_2_1_8_1","volume-title":"Loss-aware Weight Quantization of Deep Networks. In 6th International Conference on Learning Representations (ICLR).","author":"Hou Lu","unstructured":"Lu Hou and James T. Kwok. 2018. Loss-aware Weight Quantization of Deep Networks. In 6th International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"crossref","unstructured":"Arthur Jochems et al. 2016. Distributed learning: developing a predictive model based on data from multiple hospitals without data leaving the hospital-a real life proof of concept. Radiotherapy and Oncology.","DOI":"10.1016\/S0167-8140(16)30111-6"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijrobp.2017.04.021"},{"key":"e_1_3_2_1_11_1","volume-title":"International Conference on Machine Learning (ICML).","author":"Karimireddy Sai Praneeth","year":"2019","unstructured":"Sai Praneeth Karimireddy, Quentin Rebjock, Sebastian Stich, and Martin Jaggi. 2019. Error feedback fixes signsgd and other gradient compression schemes. In International Conference on Machine Learning (ICML)."},{"key":"e_1_3_2_1_12_1","volume-title":"Lognet: Energy-efficient neural networks using logarithmic computation","author":"Lee Edward H","year":"2017","unstructured":"Edward H Lee, Daisuke Miyashita, Elaina Chai, Boris Murmann, and S Simon Wong. 2017. Lognet: Energy-efficient neural networks using logarithmic computation. In IEEE ICASSP."},{"key":"e_1_3_2_1_13_1","volume-title":"International Conference on Machine Learning (ICML).","author":"Lian Xiangru","year":"2018","unstructured":"Xiangru Lian, Wei Zhang, Ce Zhang, and Ji Liu. 2018. ADPSGD Asynchronous decentralized parallel stochastic gradient descent. In International Conference on Machine Learning (ICML)."},{"key":"e_1_3_2_1_14_1","unstructured":"Yujun Lin Song Han Huizi Mao Yu Wang and William J Dally. 2017. Deep gradient compression: Reducing the communication bandwidth for distributed training. arXiv preprint."},{"key":"e_1_3_2_1_15_1","article-title":"NUQSGD: Provably Communication-efficient Data-parallel SGD via Nonuniform Quantization","author":"Ramezani-Kebrya Ali","year":"2021","unstructured":"Ali Ramezani-Kebrya, Fartash Faghri, Ilya Markov, Vitalii Aksenov, Dan Alistarh, and Daniel M Roy. 2021. NUQSGD: Provably Communication-efficient Data-parallel SGD via Nonuniform Quantization. Journal of Machine Learning Research.","journal-title":"Journal of Machine Learning Research."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2019.8852172"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"crossref","unstructured":"Frank Seide Hao Fu Jasha Droppo Gang Li and Dong Yu. 2014. 1-bit stochastic gradient descent and its application to data-parallel distributed training of speech dnns. In Interspeech.","DOI":"10.21437\/Interspeech.2014-274"},{"key":"e_1_3_2_1_18_1","volume-title":"Scalable distributed DNN training using commodity GPU cloud computing","author":"Strom Nikko","unstructured":"Nikko Strom. 2015. Scalable distributed DNN training using commodity GPU cloud computing. In International Speech Communication Association (ISCA)."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/SWC50871.2021.00023"},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1109\/SWC50871.2021.00024"},{"key":"e_1_3_2_1_21_1","volume-title":"ML-MCU: A Framework to Train ML Classifiers on MCU-based IoT Edge Devices","author":"Sudharsan Bharath","unstructured":"Bharath Sudharsan, John G Breslin, and Muhammad Intizar Ali. 2021. ML-MCU: A Framework to Train ML Classifiers on MCU-based IoT Edge Devices. In IEEE Internet of Things Journal."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-86517-7_2"},{"key":"e_1_3_2_1_23_1","volume-title":"Karan Mitra, Schahram Dustdar, Prem Prakash, and Rajiv Ranjan.","author":"Sudharsan Bharath","year":"2021","unstructured":"Bharath Sudharsan, Omer Rana, Pankesh Patel, John Breslin, Muhammad Intizar Ali, Karan Mitra, Schahram Dustdar, Prem Prakash, and Rajiv Ranjan. 2021. Towards Distributed, Global, Deep Learning using IoT Devices. IEEE Internet Computing."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/3485730.3492885"},{"key":"e_1_3_2_1_25_1","unstructured":"Jun Sun Tianyi Chen Georgios B Giannakis and Zaiyue Yang. 2019. Communication-efficient distributed learning via lazily aggregated quantized gradients. In Neural Information Processing Systems (NIPS)."},{"key":"e_1_3_2_1_26_1","unstructured":"Hanlin Tang Shaoduo Gan Ce Zhang Tong Zhang and Ji Liu. 2018. Communication Compression for Decentralized Training. In Neural information processing systems (NIPS)."},{"key":"e_1_3_2_1_27_1","unstructured":"Hanlin Tang Xiangru Lian Ming Yan Ce Zhang and Ji Liu. 2018. D2: Decentralized Training over Decentralized Data. arXiv preprint."},{"key":"e_1_3_2_1_28_1","volume-title":"Analog Communications","author":"Vasudevan Kasturi","unstructured":"Kasturi Vasudevan. 2021. Pulse Code Modulation. In Analog Communications. Springer."},{"key":"e_1_3_2_1_29_1","volume-title":"Sai Praneeth Karinireddy, and Martin Jaggi","author":"Vogels Thijs","year":"2019","unstructured":"Thijs Vogels, Sai Praneeth Karinireddy, and Martin Jaggi. 2019. PowerSGD: Practical low-rank gradient compression for distributed optimization. Neural Information Processing Systems (NIPS)."},{"key":"e_1_3_2_1_30_1","volume-title":"ATOMO: Communication-efficient Learning via Atomic Sparsification. In Neural Information Processing Systems (NIPS).","author":"Wang Hongyi","year":"2018","unstructured":"Hongyi Wang, Scott Sievert, Shengchao Liu, Zachary Charles, Dimitris Papailiopoulos, and Stephen Wright. 2018. ATOMO: Communication-efficient Learning via Atomic Sparsification. In Neural Information Processing Systems (NIPS)."},{"key":"e_1_3_2_1_31_1","unstructured":"Jianyu Wang and Gauri Joshi. 2019. Adaptive Communication Strategies for Best Error-Runtime Trade-offs in Communication-Efficient Distributed SGD. In Neural Information Processing Systems (NIPS)."},{"key":"e_1_3_2_1_32_1","volume-title":"Terngrad: Ternary gradients to reduce communication in distributed deep learning. In Neural Information Processing Systems (NIPS).","author":"Wen Wei","year":"2017","unstructured":"Wei Wen, Cong Xu, Feng Yan, Chunpeng Wu, Yandan Wang, Yiran Chen, and Hai Li. 2017. Terngrad: Ternary gradients to reduce communication in distributed deep learning. In Neural Information Processing Systems (NIPS)."},{"key":"e_1_3_2_1_33_1","volume-title":"Dorefa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients. arXiv preprint.","author":"Zhou Shuchang","year":"2016","unstructured":"Shuchang Zhou, Yuheng Zou, et al. 2016. Dorefa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients. arXiv preprint."}],"event":{"name":"SAC '22: The 37th ACM\/SIGAPP Symposium on Applied Computing","location":"Virtual Event","acronym":"SAC '22","sponsor":["SIGAPP ACM Special Interest Group on Applied Computing"]},"container-title":["Proceedings of the 37th ACM\/SIGAPP Symposium on Applied Computing"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3477314.3507135","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3477314.3507135","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T19:31:29Z","timestamp":1750188689000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3477314.3507135"}},"subtitle":["elastic quantization strategy for communication efficient distributed machine learning in IoT"],"short-title":[],"issued":{"date-parts":[[2022,4,25]]},"references-count":33,"alternative-id":["10.1145\/3477314.3507135","10.1145\/3477314"],"URL":"https:\/\/doi.org\/10.1145\/3477314.3507135","relation":{},"subject":[],"published":{"date-parts":[[2022,4,25]]},"assertion":[{"value":"2022-05-06","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}