{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,21]],"date-time":"2026-02-21T20:38:48Z","timestamp":1771706328007,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":65,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,11,20]],"date-time":"2024-11-20T00:00:00Z","timestamp":1732060800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,11,20]]},"DOI":"10.1145\/3698038.3698553","type":"proceedings-article","created":{"date-parts":[[2024,11,14]],"date-time":"2024-11-14T06:32:43Z","timestamp":1731565963000},"page":"1012-1031","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":5,"title":["Inshrinkerator: Compressing Deep Learning Training Checkpoints via Dynamic Quantization"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2286-577X","authenticated-orcid":false,"given":"Amey","family":"Agrawal","sequence":"first","affiliation":[{"name":"Georgia Institute of Technology"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-6630-7063","authenticated-orcid":false,"given":"Sameer","family":"Reddy","sequence":"additional","affiliation":[{"name":"Cisco Inc. and Georgia Institute of Technology"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8985-3709","authenticated-orcid":false,"given":"Satwik","family":"Bhattamishra","sequence":"additional","affiliation":[{"name":"University of Oxford"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-7451-2939","authenticated-orcid":false,"given":"Venkata Prabhakara Sarath","family":"Nookala","sequence":"additional","affiliation":[{"name":"Meta Inc. and Georgia Institute of Technology"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-4276-7102","authenticated-orcid":false,"given":"Vidushi","family":"Vashishth","sequence":"additional","affiliation":[{"name":"Google Inc. and Georgia Institute of Technology"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0326-2877","authenticated-orcid":false,"given":"Kexin","family":"Rong","sequence":"additional","affiliation":[{"name":"Georgia Institute of Technology"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-7862-1477","authenticated-orcid":false,"given":"Alexey","family":"Tumanov","sequence":"additional","affiliation":[{"name":"Georgia Institute of Technology"}]}],"member":"320","published-online":{"date-parts":[[2024,11,20]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"2022. OPT 175B Training Logbook. https:\/\/github.com\/facebookresearch\/metaseq\/blob\/main\/projects\/OPT\/chronicles\/OPT175B_Logbook.pdf"},{"key":"e_1_3_2_1_2_1","unstructured":"2023. bert-base-uncased. Hugging Face. https:\/\/huggingface.co\/bert-base-uncased"},{"key":"e_1_3_2_1_3_1","unstructured":"2023. Hugging Face. https:\/\/huggingface.co."},{"key":"e_1_3_2_1_4_1","unstructured":"2023. MosaicML BERT. https:\/\/www.mosaicml.com\/blog\/mosaicbert"},{"key":"e_1_3_2_1_5_1","volume-title":"Clusterability: A theoretical study. In Artificial intelligence and statistics. PMLR, 1--8.","author":"Ackerman Margareta","year":"2009","unstructured":"Margareta Ackerman and Shai Ben-David. 2009. Clusterability: A theoretical study. In Artificial intelligence and statistics. PMLR, 1--8."},{"key":"e_1_3_2_1_6_1","volume-title":"Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete algorithms. 1027--1035","author":"Arthur David","year":"2007","unstructured":"David Arthur and Sergei Vassilvitskii. 2007. K-means++ the advantages of careful seeding. In Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete algorithms. 1027--1035."},{"key":"e_1_3_2_1_7_1","volume-title":"Post training 4-bit quantization of convolutional networks for rapid-deployment. Advances in Neural Information Processing Systems 32","author":"Banner Ron","year":"2019","unstructured":"Ron Banner, Yury Nahshan, and Daniel Soudry. 2019. Post training 4-bit quantization of convolutional networks for rapid-deployment. Advances in Neural Information Processing Systems 32 (2019)."},{"key":"e_1_3_2_1_8_1","volume-title":"International Conference on Machine Learning. PMLR, 280--288","author":"Ben-David Shai","year":"2014","unstructured":"Shai Ben-David and Nika Haghtalab. 2014. Clustering in the presence of background noise. In International Conference on Machine Learning. PMLR, 280--288."},{"key":"e_1_3_2_1_9_1","volume-title":"Shivanshu Purohit, USVSN Sai Prashanth, Edward Raff, et al.","author":"Biderman Stella","year":"2023","unstructured":"Stella Biderman, Hailey Schoelkopf, Quentin Anthony, Herbie Bradley, Kyle O'Brien, Eric Hallahan, Mohammad Aflah Khan, Shivanshu Purohit, USVSN Sai Prashanth, Edward Raff, et al. 2023. Pythia: A suite for analyzing large language models across training and scaling. arXiv preprint arXiv:2304.01373 (2023)."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01318"},{"key":"e_1_3_2_1_11_1","volume-title":"On efficient constructions of checkpoints. arXiv preprint arXiv:2009.13003","author":"Chen Yu","year":"2020","unstructured":"Yu Chen, Zhenming Liu, Bin Ren, and Xin Jin. 2020. On efficient constructions of checkpoints. arXiv preprint arXiv:2009.13003 (2020)."},{"key":"e_1_3_2_1_12_1","volume-title":"Towards the limit of network quantization. arXiv preprint arXiv:1612.01543","author":"Choi Yoojin","year":"2016","unstructured":"Yoojin Choi, Mostafa El-Khamy, and Jungwon Lee. 2016. Towards the limit of network quantization. arXiv preprint arXiv:1612.01543 (2016)."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2019.00363"},{"key":"e_1_3_2_1_14_1","volume-title":"Binaryconnect: Training deep neural networks with binary weights during propagations. Advances in neural information processing systems 28","author":"Courbariaux Matthieu","year":"2015","unstructured":"Matthieu Courbariaux, Yoshua Bengio, and Jean-Pierre David. 2015. Binaryconnect: Training deep neural networks with binary weights during propagations. Advances in neural information processing systems 28 (2015)."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3400302.3415679"},{"key":"e_1_3_2_1_16_1","volume-title":"Gregory Rogez, and Puneet K Dokania.","author":"Jorge Pau De","year":"2020","unstructured":"Pau De Jorge, Amartya Sanyal, Harkirat S Behl, Philip HS Torr, Gregory Rogez, and Puneet K Dokania. 2020. Progressive skeletonization: Trimming more fat from a network at initialization. arXiv preprint arXiv:2006.09081 (2020)."},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"e_1_3_2_1_18_1","volume-title":"int8 (): 8-bit matrix multiplication for transformers at scale. arXiv preprint arXiv:2208.07339","author":"Dettmers Tim","year":"2022","unstructured":"Tim Dettmers, Mike Lewis, Younes Belkada, and Luke Zettlemoyer. 2022. Llm. int8 (): 8-bit matrix multiplication for transformers at scale. arXiv preprint arXiv:2208.07339 (2022)."},{"key":"e_1_3_2_1_19_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_1_20_1","volume-title":"Lin (Eds.)","volume":"33","author":"Dong Zhen","year":"2020","unstructured":"Zhen Dong, Zhewei Yao, Daiyaan Arfeen, Amir Gholami, Michael W Mahoney, and Kurt Keutzer. 2020. HAWQ-V2: Hessian Aware trace-Weighted Quantization of Neural Networks. In Advances in Neural Information Processing Systems, H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (Eds.), Vol. 33. Curran Associates, Inc., 18518--18529."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00038"},{"key":"e_1_3_2_1_22_1","unstructured":"Alexey Dosovitskiy Lucas Beyer Alexander Kolesnikov Dirk Weissenborn Xiaohua Zhai Thomas Unterthiner Mostafa Dehghani Matthias Minderer Georg Heigold Sylvain Gelly et al. 2020. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)."},{"key":"e_1_3_2_1_23_1","volume-title":"19th USENIX Symposium on Networked Systems Design and Implementation (NSDI 22)","author":"Eisenman Assaf","year":"2022","unstructured":"Assaf Eisenman, Kiran Kumar Matam, Steven Ingram, Dheevatsa Mudigere, Raghuraman Krishnamoorthi, Krishnakumar Nair, Misha Smelyanskiy, and Murali Annavaram. 2022. Check-N-Run: a checkpointing system for training deep learning recommendation models. In 19th USENIX Symposium on Networked Systems Design and Implementation (NSDI 22). 929--943."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1109\/72.963775"},{"key":"e_1_3_2_1_25_1","volume-title":"Adaptive gradient quantization for data-parallel sgd. Advances in neural information processing systems 33","author":"Faghri Fartash","year":"2020","unstructured":"Fartash Faghri, Iman Tabrizian, Ilia Markov, Dan Alistarh, Daniel M Roy, and Ali Ramezani-Kebrya. 2020. Adaptive gradient quantization for data-parallel sgd. Advances in neural information processing systems 33 (2020), 3174--3185."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58536-5_5"},{"key":"e_1_3_2_1_27_1","volume-title":"The lottery ticket hypothesis: Finding sparse, trainable neural networks. arXiv preprint arXiv:1803.03635","author":"Frankle Jonathan","year":"2018","unstructured":"Jonathan Frankle and Michael Carbin. 2018. The lottery ticket hypothesis: Finding sparse, trainable neural networks. arXiv preprint arXiv:1803.03635 (2018)."},{"key":"e_1_3_2_1_28_1","unstructured":"Aaron Gokaslan and Vanya Cohen. 2019. OpenWebText Corpus. http:\/\/Skylion007.github.io\/OpenWebTextCorpus."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.1966.1053907"},{"key":"e_1_3_2_1_30_1","volume-title":"Post-training 4-bit quantization on embedding tables. arXiv preprint arXiv:1911.02079","author":"Guan Hui","year":"2019","unstructured":"Hui Guan, Andrey Malevich, Jiyan Yang, Jongsoo Park, and Hector Yuen. 2019. Post-training 4-bit quantization on embedding tables. arXiv preprint arXiv:1911.02079 (2019)."},{"key":"e_1_3_2_1_31_1","volume-title":"Hardware-oriented approximation of convolutional neural networks. arXiv preprint arXiv:1604.03168","author":"Gysel Philipp","year":"2016","unstructured":"Philipp Gysel, Mohammad Motamedi, and Soheil Ghiasi. 2016. Hardware-oriented approximation of convolutional neural networks. arXiv preprint arXiv:1604.03168 (2016)."},{"key":"e_1_3_2_1_32_1","volume-title":"Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv preprint arXiv:1510.00149","author":"Han Song","year":"2015","unstructured":"Song Han, Huizi Mao, and William J Dally. 2015. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv preprint arXiv:1510.00149 (2015)."},{"key":"e_1_3_2_1_33_1","volume-title":"Learning both weights and connections for efficient neural network. Advances in neural information processing systems 28","author":"Han Song","year":"2015","unstructured":"Song Han, Jeff Pool, John Tran, and William Dally. 2015. Learning both weights and connections for efficient neural network. Advances in neural information processing systems 28 (2015)."},{"key":"e_1_3_2_1_34_1","volume-title":"Deep residual learning for image recognition. arXiv","author":"He Kaiming","year":"2015","unstructured":"Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2015. Deep residual learning for image recognition. arXiv 2015. arXiv preprint arXiv:1512.03385 14 (2015)."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01246-5_46"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00140"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1145\/3404397.3404408"},{"key":"e_1_3_2_1_38_1","volume-title":"Garnett (Eds.)","volume":"29","author":"Hubara Itay","year":"2016","unstructured":"Itay Hubara, Matthieu Courbariaux, Daniel Soudry, Ran El-Yaniv, and Yoshua Bengio. 2016. Binarized Neural Networks. In Advances in Neural Information Processing Systems, D. Lee, M. Sugiyama, U. Luxburg, I. Guyon, and R. Garnett (Eds.), Vol. 29. Curran Associates, Inc. https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2016\/file\/d8330f857a17c53d217014ee776bfd50-Paper.pdf"},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1109\/JRPROC.1952.273898"},{"key":"e_1_3_2_1_40_1","volume-title":"USENIX Annual Technical Conference. 947--960","author":"Jeon Myeongjae","year":"2019","unstructured":"Myeongjae Jeon, Shivaram Venkataraman, Amar Phanishayee, Junjie Qian, Wencong Xiao, and Fan Yang. 2019. Analysis of Large-Scale Multi-Tenant GPU Clusters for DNN Training Workloads.. In USENIX Annual Technical Conference. 947--960."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2022.3230840"},{"key":"e_1_3_2_1_42_1","volume-title":"NeurIPS 2020 Workshop: Deep Learning through Information Geometry.","author":"Kadambi Prad","year":"2020","unstructured":"Prad Kadambi, Karthikeyan Natesan Ramamurthy, and Visar Berisha. 2020. Comparing fisher information regularization with distillation for dnn quantization. In NeurIPS 2020 Workshop: Deep Learning through Information Geometry."},{"key":"e_1_3_2_1_43_1","unstructured":"nangpt Karpathy. [n.d.]. Karpathy\/nanogpt: The simplest fastest repository for training\/finetuning medium-sized gpts. https:\/\/github.com\/karpathy\/nanoGPT"},{"key":"e_1_3_2_1_44_1","volume-title":"Optimal brain damage. Advances in neural information processing systems 2","author":"LeCun Yann","year":"1989","unstructured":"Yann LeCun, John Denker, and Sara Solla. 1989. Optimal brain damage. Advances in neural information processing systems 2 (1989)."},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"crossref","unstructured":"Guiying Li Chao Qian Chunhui Jiang Xiaofen Lu and Ke Tang. 2018. Optimization based layer-wise magnitude-based pruning for DNN compression.. In IJCAI. 2383--2389.","DOI":"10.24963\/ijcai.2018\/330"},{"key":"e_1_3_2_1_46_1","volume-title":"Neural networks with few multiplications. arXiv preprint arXiv:1510.03009","author":"Lin Zhouhan","year":"2015","unstructured":"Zhouhan Lin, Matthieu Courbariaux, Roland Memisevic, and Yoshua Bengio. 2015. Neural networks with few multiplications. arXiv preprint arXiv:1510.03009 (2015)."},{"key":"e_1_3_2_1_47_1","unstructured":"TorchVision maintainers and contributors. 2016. TorchVision:PyTorch's Computer Vision library."},{"key":"e_1_3_2_1_48_1","volume-title":"DDSketch: A fast and fully-mergeable quantile sketch with relative-error guarantees. arXiv preprint arXiv:1908.10693","author":"Masson Charles","year":"2019","unstructured":"Charles Masson, Jee E Rim, and Homin K Lee. 2019. DDSketch: A fast and fully-mergeable quantile sketch with relative-error guarantees. arXiv preprint arXiv:1908.10693 (2019)."},{"key":"e_1_3_2_1_49_1","first-page":"203","article-title":"CheckFreq: Frequent, Fine-Grained DNN Checkpointing","volume":"21","author":"Mohan Jayashree","year":"2021","unstructured":"Jayashree Mohan, Amar Phanishayee, and Vijay Chidambaram. 2021. CheckFreq: Frequent, Fine-Grained DNN Checkpointing.. In FAST, Vol. 21. 203--216.","journal-title":"FAST"},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1109\/CCGrid49817.2020.00-76"},{"key":"e_1_3_2_1_51_1","unstructured":"Ryosuke Okuta Yuya Unno Daisuke Nishino Shohei Hido and Crissman Loomis. 2017. CuPy: A NumPy-Compatible Library for NVIDIA GPU Calculations. In Proceedings of Workshop on Machine Learning Systems (LearningSys) in The Thirty-first Annual Conference on Neural Information Processing Systems (NIPS). http:\/\/learningsys.org\/nips17\/assets\/papers\/paper_16.pdf"},{"key":"e_1_3_2_1_52_1","unstructured":"Alec Radford Jeffrey Wu Rewon Child David Luan Dario Amodei Ilya Sutskever et al. 2019. Language models are unsupervised multitask learners. OpenAI blog 1 8 (2019) 9."},{"key":"e_1_3_2_1_53_1","volume-title":"Liu","author":"Raffel Colin","year":"2019","unstructured":"Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2019. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. arXiv e-prints (2019). arXiv:1910.10683"},{"key":"e_1_3_2_1_54_1","volume-title":"Machine Learning in Python: Main developments and technology trends in data science, machine learning, and artificial intelligence. arXiv preprint arXiv:2002.04803","author":"Raschka Sebastian","year":"2020","unstructured":"Sebastian Raschka, Joshua Patterson, and Corey Nolet. 2020. Machine Learning in Python: Main developments and technology trends in data science, machine learning, and artificial intelligence. arXiv preprint arXiv:2002.04803 (2020)."},{"key":"e_1_3_2_1_55_1","volume-title":"Amsterdam, The Netherlands","author":"Rastegari Mohammad","year":"2016","unstructured":"Mohammad Rastegari, Vicente Ordonez, Joseph Redmon, and Ali Farhadi. 2016. Xnor-net: Imagenet classification using binary convolutional neural networks. In Computer Vision-ECCV 2016:14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part IV. Springer, 525--542."},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01193"},{"key":"e_1_3_2_1_57_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6409"},{"key":"e_1_3_2_1_58_1","volume-title":"Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556","author":"Simonyan Karen","year":"2014","unstructured":"Karen Simonyan and Andrew Zisserman. 2014. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)."},{"key":"e_1_3_2_1_59_1","volume-title":"Degree-quant: Quantization-aware training for graph neural networks. arXiv preprint arXiv:2008.05000","author":"Tailor Shyam A","year":"2020","unstructured":"Shyam A Tailor, Javier Fernandez-Marques, and Nicholas D Lane. 2020. Degree-quant: Quantization-aware training for graph neural networks. arXiv preprint arXiv:2008.05000 (2020)."},{"key":"e_1_3_2_1_60_1","volume-title":"Hashimoto","author":"Taori Rohan","year":"2023","unstructured":"Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. 2023. Stanford Alpaca: An Instruction-following LLaMA model. https:\/\/github.com\/tatsu-lab\/stanford_alpaca."},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3084527"},{"key":"e_1_3_2_1_62_1","volume-title":"GLUE: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461","author":"Wang Alex","year":"2018","unstructured":"Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2018. GLUE: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461 (2018)."},{"key":"e_1_3_2_1_63_1","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO50266.2020.00071"},{"key":"e_1_3_2_1_64_1","volume-title":"Xi Victoria Lin, et al","author":"Zhang Susan","year":"2022","unstructured":"Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen, Christopher Dewan, Mona Diab, Xian Li, Xi Victoria Lin, et al. 2022. Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068 (2022)."},{"key":"e_1_3_2_1_65_1","doi-asserted-by":"publisher","DOI":"10.1109\/SMC53654.2022.9945569"}],"event":{"name":"SoCC '24: ACM Symposium on Cloud Computing","location":"Redmond WA USA","acronym":"SoCC '24","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGOPS ACM Special Interest Group on Operating Systems"]},"container-title":["Proceedings of the ACM Symposium on Cloud Computing"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3698038.3698553","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3698038.3698553","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T19:00:35Z","timestamp":1755889235000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3698038.3698553"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,20]]},"references-count":65,"alternative-id":["10.1145\/3698038.3698553","10.1145\/3698038"],"URL":"https:\/\/doi.org\/10.1145\/3698038.3698553","relation":{},"subject":[],"published":{"date-parts":[[2024,11,20]]},"assertion":[{"value":"2024-11-20","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}