{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:29:41Z","timestamp":1775579381031,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":65,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,5,30]],"date-time":"2024-05-30T00:00:00Z","timestamp":1717027200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"NSF grants","award":["1829524, 1817077, 2011212"],"award-info":[{"award-number":["1829524, 1817077, 2011212"]}]},{"name":"the PRISM center in JUMP 2.0"},{"name":"an SRC program sponsored by DARPA"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,5,30]]},"DOI":"10.1145\/3650200.3656631","type":"proceedings-article","created":{"date-parts":[[2024,6,3]],"date-time":"2024-06-03T14:11:54Z","timestamp":1717423914000},"page":"498-510","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":4,"title":["Fasor: A Fast Tensor Program Optimization Framework for Efficient DNN Deployment"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6338-3289","authenticated-orcid":false,"given":"Hanxian","family":"Huang","sequence":"first","affiliation":[{"name":"University of California San Diego, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1950-2468","authenticated-orcid":false,"given":"Xin","family":"Chen","sequence":"additional","affiliation":[{"name":"Intel, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8766-0946","authenticated-orcid":false,"given":"Jishen","family":"Zhao","sequence":"additional","affiliation":[{"name":"University of California San Diego, United States of America"}]}],"member":"320","published-online":{"date-parts":[[2024,6,3]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"[n. d.]. CuBLAS: Basic Linear Algebra on NVIDIA GPUs. https:\/\/developer.nvidia.com\/cublas."},{"key":"e_1_3_2_1_2_1","unstructured":"[n. d.]. Intel PMU profiling tools. https:\/\/github.com\/andikleen\/pmu-tools."},{"key":"e_1_3_2_1_3_1","volume-title":"d.]. Intel","unstructured":"[n. d.]. Intel(R) Math Kernel Library for Deep Neural Networks (Intel(R) MKL-DNN). https:\/\/github.com\/rsdubtso\/mkl-dnn."},{"key":"e_1_3_2_1_4_1","unstructured":"[n. d.]. The Performance Application Programming Interface (PAPI). https:\/\/tvm.apache.org\/docs\/how_to\/profile\/papi.html."},{"key":"e_1_3_2_1_5_1","volume-title":"TensorFlow: A System for Large-Scale Machine Learning. In 12th USENIX symposium on operating systems design and implementation (OSDI 16)","author":"Abadi Mart\u00edn","year":"2016","unstructured":"Mart\u00edn Abadi, Paul Barham, Jianmin Chen, Zhifeng Chen, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Geoffrey Irving, Michael Isard, 2016. TensorFlow: A System for Large-Scale Machine Learning. In 12th USENIX symposium on operating systems design and implementation (OSDI 16). 265\u2013283."},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/3306346.3322967"},{"key":"e_1_3_2_1_7_1","volume-title":"Chameleon: Adaptive Code Optimization for Expedited Deep Neural Network Compilation. In 8th International Conference on Learning Representations, ICLR 2020","author":"Ahn Byung\u00a0Hoon","year":"2020","unstructured":"Byung\u00a0Hoon Ahn, Prannoy Pilligundla, Amir Yazdanbakhsh, and Hadi Esmaeilzadeh. 2020. Chameleon: Adaptive Code Optimization for Expedited Deep Neural Network Compilation. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net. https:\/\/openreview.net\/forum?id=rygG4AVFvH"},{"key":"e_1_3_2_1_8_1","first-page":"181","article-title":"A deep learning based cost model for automatic code optimization","volume":"3","author":"Baghdadi Riyadh","year":"2021","unstructured":"Riyadh Baghdadi, Massinissa Merouani, Mohamed-Hicham Leghettas, Kamel Abdous, Taha Arbaoui, Karima Benatchba, 2021. A deep learning based cost model for automatic code optimization. Proceedings of Machine Learning and Systems 3 (2021), 181\u2013193.","journal-title":"Proceedings of Machine Learning and Systems"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.5555\/3314872.3314896"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1063\/1.2159147"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/3582016.3582061"},{"key":"e_1_3_2_1_12_1","volume-title":"The Eleventh International Conference on Learning Representations.","author":"Bi Jun","year":"2022","unstructured":"Jun Bi, Xiaqing Li, Qi Guo, Rui Zhang, Yuanbo Wen, Xing Hu, Zidong Du, Xinkai Song, Yifan Hao, and Yunji Chen. 2022. BALTO: fast tensor program optimization with diversity-based active learning. In The Eleventh International Conference on Learning Representations."},{"key":"e_1_3_2_1_13_1","volume-title":"Proxylessnas: Direct neural architecture search on target task and hardware. arXiv preprint arXiv:1812.00332","author":"Cai Han","year":"2018","unstructured":"Han Cai, Ligeng Zhu, and Song Han. 2018. Proxylessnas: Direct neural architecture search on target task and hardware. arXiv preprint arXiv:1812.00332 (2018)."},{"key":"e_1_3_2_1_14_1","volume-title":"Mxnet: A flexible and efficient machine learning library for heterogeneous distributed systems. arXiv preprint arXiv:1512.01274","author":"Chen Tianqi","year":"2015","unstructured":"Tianqi Chen, Mu Li, Yutian Li, Min Lin, Naiyan Wang, Minjie Wang, Tianjun Xiao, Bing Xu, Chiyuan Zhang, and Zheng Zhang. 2015. Mxnet: A flexible and efficient machine learning library for heterogeneous distributed systems. arXiv preprint arXiv:1512.01274 (2015)."},{"key":"e_1_3_2_1_15_1","volume-title":"TVM: An Automated End-to-End Optimizing Compiler for Deep Learning. In 13th USENIX Symposium on Operating Systems Design and Implementation (OSDI 18)","author":"Chen Tianqi","year":"2018","unstructured":"Tianqi Chen, Thierry Moreau, Ziheng Jiang, Lianmin Zheng, Eddie Yan, Haichen Shen, Meghan Cowan, Leyuan Wang, Yuwei Hu, Luis Ceze, Carlos Guestrin, and Arvind Krishnamurthy. 2018. TVM: An Automated End-to-End Optimizing Compiler for Deep Learning. In 13th USENIX Symposium on Operating Systems Design and Implementation (OSDI 18). USENIX Association, Carlsbad, CA, 578\u2013594. https:\/\/www.usenix.org\/conference\/osdi18\/presentation\/chen"},{"key":"e_1_3_2_1_16_1","volume-title":"Learning to optimize tensor programs. Advances in Neural Information Processing Systems 31","author":"Chen Tianqi","year":"2018","unstructured":"Tianqi Chen, Lianmin Zheng, Eddie Yan, Ziheng Jiang, Thierry Moreau, Luis Ceze, Carlos Guestrin, and Arvind Krishnamurthy. 2018. Learning to optimize tensor programs. Advances in Neural Information Processing Systems 31 (2018)."},{"key":"e_1_3_2_1_17_1","volume-title":"cudnn: Efficient primitives for deep learning. arXiv preprint arXiv:1410.0759","author":"Chetlur Sharan","year":"2014","unstructured":"Sharan Chetlur, Cliff Woolley, Philippe Vandermersch, Jonathan Cohen, John Tran, Bryan Catanzaro, and Evan Shelhamer. 2014. cudnn: Efficient primitives for deep learning. arXiv preprint arXiv:1410.0759 (2014)."},{"key":"e_1_3_2_1_18_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/3559009.3569682"},{"key":"e_1_3_2_1_20_1","volume-title":"Proceedings of the thirteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 249\u2013256","author":"Glorot Xavier","year":"2010","unstructured":"Xavier Glorot and Yoshua Bengio. 2010. Understanding the difficulty of training deep feedforward neural networks. In Proceedings of the thirteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 249\u2013256."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1145\/3368826.3377928"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00300"},{"key":"e_1_3_2_1_24_1","volume-title":"Proc. Workshop ML Syst. NeurIPS. 1\u20136.","author":"Kaufman Samuel","year":"2019","unstructured":"Samuel Kaufman, Phitchaya\u00a0Mangpo Phothilimthana, and Mike Burrows. 2019. Learned TPU cost model for XLA tensor programs. In Proc. Workshop ML Syst. NeurIPS. 1\u20136."},{"key":"e_1_3_2_1_25_1","volume-title":"Adam: A Method for Stochastic Optimization. In International Conference on Learning Representations (ICLR)","author":"Kingma Diederik","year":"2015","unstructured":"Diederik Kingma and Jimmy Ba. 2015. Adam: A Method for Stochastic Optimization. In International Conference on Learning Representations (ICLR). San Diega, CA, USA."},{"key":"e_1_3_2_1_26_1","volume-title":"Proceedings of the 25th International Conference on Neural Information Processing Systems -","volume":"1","author":"Krizhevsky Alex","year":"2012","unstructured":"Alex Krizhevsky, Ilya Sutskever, and Geoffrey\u00a0E. Hinton. 2012. ImageNet Classification with Deep Convolutional Neural Networks. In Proceedings of the 25th International Conference on Neural Information Processing Systems - Volume 1 (Lake Tahoe, Nevada) (NIPS\u201912). Curran Associates Inc., Red Hook, NY, USA, 1097\u20131105."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.435"},{"key":"e_1_3_2_1_28_1","volume-title":"Rapid neural architecture search by learning to generate graphs from datasets. arXiv preprint arXiv:2107.00860","author":"Lee Hayeon","year":"2021","unstructured":"Hayeon Lee, Eunyoung Hyung, and Sung\u00a0Ju Hwang. 2021. Rapid neural architecture search by learning to generate graphs from datasets. arXiv preprint arXiv:2107.00860 (2021)."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1145\/3476994"},{"key":"e_1_3_2_1_30_1","volume-title":"Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692","author":"Liu Yinhan","year":"2019","unstructured":"Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)."},{"key":"e_1_3_2_1_31_1","volume-title":"Fast training of convolutional networks through ffts. arXiv preprint arXiv:1312.5851","author":"Mathieu Michael","year":"2013","unstructured":"Michael Mathieu, Mikael Henaff, and Yann LeCun. 2013. Fast training of convolutional networks through ffts. arXiv preprint arXiv:1312.5851 (2013)."},{"key":"e_1_3_2_1_32_1","volume-title":"Massively distributed SGD: ImageNet\/ResNet-50 training in a flash. arXiv preprint arXiv:1811.05233","author":"Mikami Hiroaki","year":"2018","unstructured":"Hiroaki Mikami, Hisahiro Suganuma, Yoshiki Tanaka, Yuichi Kageyama, 2018. Massively distributed SGD: ImageNet\/ResNet-50 training in a flash. arXiv preprint arXiv:1811.05233 (2018)."},{"key":"e_1_3_2_1_33_1","volume-title":"Playing atari with deep reinforcement learning. arXiv preprint arXiv:1312.5602","author":"Mnih Volodymyr","year":"2013","unstructured":"Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Alex Graves, Ioannis Antonoglou, Daan Wierstra, and Martin Riedmiller. 2013. Playing atari with deep reinforcement learning. arXiv preprint arXiv:1312.5602 (2013)."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1145\/3229762.3229766"},{"key":"e_1_3_2_1_35_1","volume-title":"Spearman correlation coefficients, differences between. Encyclopedia of statistical sciences 12","author":"Myers Leann","year":"2004","unstructured":"Leann Myers and Maria\u00a0J Sirois. 2004. Spearman correlation coefficients, differences between. Encyclopedia of statistical sciences 12 (2004)."},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1093\/biomet\/78.3.691"},{"key":"e_1_3_2_1_37_1","unstructured":"Nvidia.2023. An Order-of-Magnitude Leap for Accelerated Computing.https:\/\/www.nvidia.com\/en-us\/data-center\/h100\/"},{"key":"e_1_3_2_1_38_1","volume-title":"Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems 32","author":"Paszke Adam","year":"2019","unstructured":"Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, 2019. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems 32 (2019)."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1145\/2499370.2462176"},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1145\/3497776.3517774"},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"e_1_3_2_1_42_1","volume-title":"Julian Schrittwieser, Ioannis Antonoglou","author":"Silver David","year":"2016","unstructured":"David Silver, Aja Huang, Chris\u00a0J Maddison, Arthur Guez, Laurent Sifre, George Van Den\u00a0Driessche, Julian Schrittwieser, Ioannis Antonoglou, Veda Panneershelvam, Marc Lanctot, 2016. Mastering the game of Go with deep neural networks and tree search. nature 529, 7587 (2016), 484\u2013489."},{"key":"e_1_3_2_1_43_1","volume-title":"Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556","author":"Simonyan Karen","year":"2014","unstructured":"Karen Simonyan and Andrew Zisserman. 2014. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)."},{"key":"e_1_3_2_1_44_1","volume-title":"Optimizing network performance for distributed dnn training on gpu clusters: Imagenet\/alexnet training in 1.5 minutes. arXiv preprint arXiv:1902.06855","author":"Sun Peng","year":"2019","unstructured":"Peng Sun, Wansen Feng, Ruobing Han, Shengen Yan, and Yonggang Wen. 2019. Optimizing network performance for distributed dnn training on gpu clusters: Imagenet\/alexnet training in 1.5 minutes. arXiv preprint arXiv:1902.06855 (2019)."},{"key":"e_1_3_2_1_45_1","volume-title":"STC-NAS: Fast Neural Architecture Search with Source-Target Consistency. Neurocomputing","author":"Sun Zihao","year":"2021","unstructured":"Zihao Sun, Yu Hu, Longxing Yang, Shun Lu, Jilin Mei, Yinhe Han, and Xiaowei Li. 2021. STC-NAS: Fast Neural Architecture Search with Source-Target Consistency. Neurocomputing (2021)."},{"key":"e_1_3_2_1_46_1","volume-title":"Mobilebert: a compact task-agnostic bert for resource-limited devices. arXiv preprint arXiv:2004.02984","author":"Sun Zhiqing","year":"2020","unstructured":"Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. 2020. Mobilebert: a compact task-agnostic bert for resource-limited devices. arXiv preprint arXiv:2004.02984 (2020)."},{"key":"e_1_3_2_1_47_1","unstructured":"Akihiro Tabuchi Akihiko Kasagi Masafumi Yamazaki Takumi Honda Masahiro Miwa Takashi Shiraishi Motohiro Kosaki Naoto Fukumoto Tsuguchika Tabaru Atsushi Ike [n. d.]. Extremely Accelerated Deep Learning: ResNet-50 Training in 70.4 Seconds. ([n. d.])."},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00293"},{"key":"e_1_3_2_1_49_1","volume-title":"International conference on machine learning. PMLR, 6105\u20136114","author":"Tan Mingxing","year":"2019","unstructured":"Mingxing Tan and Quoc Le. 2019. Efficientnet: Rethinking model scaling for convolutional neural networks. In International conference on machine learning. PMLR, 6105\u20136114."},{"key":"e_1_3_2_1_50_1","volume-title":"Tensor comprehensions: Framework-agnostic high-performance machine learning abstractions. arXiv preprint arXiv:1802.04730","author":"Vasilache Nicolas","year":"2018","unstructured":"Nicolas Vasilache, Oleksandr Zinenko, Theodoros Theodoridis, Priya Goyal, Zachary DeVito, William\u00a0S Moses, Sven Verdoolaege, Andrew Adams, and Albert Cohen. 2018. Tensor comprehensions: Framework-agnostic high-performance machine learning abstractions. arXiv preprint arXiv:1802.04730 (2018)."},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1145\/2400682.2400713"},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.3390\/app14020513"},{"key":"e_1_3_2_1_53_1","volume-title":"High-Performance Computing on the Intel\u00ae Xeon Phi\u2122","author":"Wang Endong","unstructured":"Endong Wang, Qing Zhang, Bo Shen, Guangyong Zhang, Xiaowei Lu, Qing Wu, and Yajuan Wang. 2014. Intel math kernel library. In High-Performance Computing on the Intel\u00ae Xeon Phi\u2122. Springer, 167\u2013188."},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/1498765.1498785"},{"key":"e_1_3_2_1_55_1","volume-title":"Huggingface\u2019s transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771","author":"Wolf Thomas","year":"2019","unstructured":"Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtowicz, 2019. Huggingface\u2019s transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771 (2019)."},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00137"},{"key":"e_1_3_2_1_57_1","volume-title":"Yet another accelerated sgd: Resnet-50 training on imagenet in 74.7 seconds. arXiv preprint arXiv:1903.12650","author":"Yamazaki Masafumi","year":"2019","unstructured":"Masafumi Yamazaki, Akihiko Kasagi, Akihiro Tabuchi, Takumi Honda, Masahiro Miwa, Naoto Fukumoto, Tsuguchika Tabaru, Atsushi Ike, and Kohta Nakashima. 2019. Yet another accelerated sgd: Resnet-50 training on imagenet in 74.7 seconds. arXiv preprint arXiv:1903.12650 (2019)."},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"publisher","DOI":"10.1109\/ISPASS.2014.6844459"},{"key":"e_1_3_2_1_59_1","doi-asserted-by":"publisher","DOI":"10.1145\/3472883.3486973"},{"key":"e_1_3_2_1_60_1","doi-asserted-by":"publisher","DOI":"10.1145\/3575693.3575737"},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.chemosphere.2018.12.128"},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"publisher","DOI":"10.1145\/3572864.3580330"},{"key":"e_1_3_2_1_63_1","volume-title":"14th USENIX Symposium on Operating Systems Design and Implementation (OSDI 20)","author":"Zheng Lianmin","year":"2020","unstructured":"Lianmin Zheng, Chengfan Jia, Minmin Sun, Zhao Wu, Cody\u00a0Hao Yu, Ameer Haj-Ali, Yida Wang, Jun Yang, Danyang Zhuo, Koushik Sen, 2020. Ansor: Generating { High-Performance} Tensor Programs for Deep Learning. In 14th USENIX Symposium on Operating Systems Design and Implementation (OSDI 20). 863\u2013879."},{"key":"e_1_3_2_1_64_1","volume-title":"TenSet: A Large-scale Program Performance Dataset for Learned Tensor Compilers. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1).","author":"Zheng Lianmin","year":"2021","unstructured":"Lianmin Zheng, Ruochen Liu, Junru Shao, Tianqi Chen, Joseph\u00a0E Gonzalez, Ion Stoica, and Ameer\u00a0Haj Ali. 2021. TenSet: A Large-scale Program Performance Dataset for Learned Tensor Compilers. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1)."},{"key":"e_1_3_2_1_65_1","doi-asserted-by":"publisher","DOI":"10.1145\/3373376.3378508"}],"event":{"name":"ICS '24: 2024 International Conference on Supercomputing","location":"Kyoto Japan","acronym":"ICS '24","sponsor":["SIGARCH ACM Special Interest Group on Computer Architecture"]},"container-title":["Proceedings of the 38th ACM International Conference on Supercomputing"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3650200.3656631","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3650200.3656631","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T15:25:24Z","timestamp":1755876324000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3650200.3656631"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,30]]},"references-count":65,"alternative-id":["10.1145\/3650200.3656631","10.1145\/3650200"],"URL":"https:\/\/doi.org\/10.1145\/3650200.3656631","relation":{},"subject":[],"published":{"date-parts":[[2024,5,30]]},"assertion":[{"value":"2024-06-03","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}