{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T14:34:07Z","timestamp":1774449247368,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":55,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,4,22]],"date-time":"2024-04-22T00:00:00Z","timestamp":1713744000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"National Science Foundation of China (NSFC)","award":["62272122, 62302123, 62002240 and 62302126"],"award-info":[{"award-number":["62272122, 62302123, 62002240 and 62302126"]}]},{"name":"Hong Kong RGC GRF","award":["16209120, 16200221 and 16207922"],"award-info":[{"award-number":["16209120, 16200221 and 16207922"]}]},{"name":"Shenzhen Science and Technology Program","award":["RCBS20221008093125065"],"award-info":[{"award-number":["RCBS20221008093125065"]}]},{"name":"School-Enterprise Cooperation Project of Shenzhen Technology University","award":["20211061040025 and 20211064010040"],"award-info":[{"award-number":["20211061040025 and 20211064010040"]}]},{"name":"Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies","award":["2022B1212010005"],"award-info":[{"award-number":["2022B1212010005"]}]},{"name":"Hong Kong RIF","award":["R6021-20"],"award-info":[{"award-number":["R6021-20"]}]},{"name":"Hong Kong RGC CRF","award":["C2004-21G, C7004-22G and C1029-22G"],"award-info":[{"award-number":["C2004-21G, C7004-22G and C1029-22G"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,4,22]]},"DOI":"10.1145\/3627703.3650083","type":"proceedings-article","created":{"date-parts":[[2024,4,18]],"date-time":"2024-04-18T06:28:28Z","timestamp":1713421708000},"page":"236-249","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":19,"title":["ScheMoE: An Extensible Mixture-of-Experts Distributed Training System with Tasks Scheduling"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1418-5160","authenticated-orcid":false,"given":"Shaohuai","family":"Shi","sequence":"first","affiliation":[{"name":"Harbin Institute of Technology, Shenzhen"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1172-9935","authenticated-orcid":false,"given":"Xinglin","family":"Pan","sequence":"additional","affiliation":[{"name":"The Hong Kong University of Science and Technology (Guangzhou)"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2986-967X","authenticated-orcid":false,"given":"Qiang","family":"Wang","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology, Shenzhen"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0103-4670","authenticated-orcid":false,"given":"Chengjian","family":"Liu","sequence":"additional","affiliation":[{"name":"Shenzhen Technology University"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0432-5510","authenticated-orcid":false,"given":"Xiaozhe","family":"Ren","sequence":"additional","affiliation":[{"name":"Huawei Central Research Institute, Huawei Technologies"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6708-3942","authenticated-orcid":false,"given":"Zhongzhe","family":"Hu","sequence":"additional","affiliation":[{"name":"Huawei Central Research Institute, Huawei Technologies"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-1544-956X","authenticated-orcid":false,"given":"Yu","family":"Yang","sequence":"additional","affiliation":[{"name":"Huawei Central Research Institute, Huawei Technologies"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2955-750X","authenticated-orcid":false,"given":"Bo","family":"Li","sequence":"additional","affiliation":[{"name":"The Hong Kong University of Science and Technology"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9745-4372","authenticated-orcid":false,"given":"Xiaowen","family":"Chu","sequence":"additional","affiliation":[{"name":"The Hong Kong University of Science and Technology (Guangzhou) and The Hong Kong University of Science and Technology"}]}],"member":"320","published-online":{"date-parts":[[2024,4,22]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"2022-07-13. Doubling all2all Performance with NVIDIA Collective Communication Library 2.12. https:\/\/developer.nvidia.com\/blog\/doubling-all2all-performance-with-nvidia-collective-communication-library-2-12\/ (2022-07-13)."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1109\/SC41404.2022.00051"},{"key":"e_1_3_2_1_3_1","volume-title":"Preemptive All-reduce Scheduling for Expediting Distributed DNN Training. In IEEE INFOCOM 2020-IEEE Conference on Computer Communications. IEEE, 626--635","author":"Bao Yixin","year":"2020","unstructured":"Yixin Bao, Yanghua Peng, Yangrui Chen, and Chuan Wu. 2020. Preemptive All-reduce Scheduling for Expediting Distributed DNN Training. In IEEE INFOCOM 2020-IEEE Conference on Computer Communications. IEEE, 626--635."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.26599\/BDMA.2020.9020004"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/W14-3302"},{"key":"e_1_3_2_1_6_1","unstructured":"Tom Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared D Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell et al. 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020) 1877--1901."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/181014.181756"},{"key":"e_1_3_2_1_8_1","unstructured":"Chang Chen Min Li Zhihua Wu Dianhai Yu and Chao Yang. 2022. TA-MoE: Topology-Aware Large Scale Mixture-of-Expert Training. In Advances in Neural Information Processing Systems."},{"key":"e_1_3_2_1_9_1","volume-title":"Proceedings of Machine Learning and Systems","author":"Chowdhery Aakanksha","year":"2022","unstructured":"Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. 2022. PaLM: Scaling language modeling with pathways. In Proceedings of Machine Learning and Systems 2022."},{"key":"e_1_3_2_1_10_1","unstructured":"Jeffrey Dean Greg Corrado Rajat Monga Kai Chen Matthieu Devin Mark Mao Marc'aurelio Ranzato Andrew Senior Paul Tucker Ke Yang et al. 2012. Large scale distributed deep networks. In Advances in neural information processing systems. 1223--1231."},{"key":"e_1_3_2_1_11_1","volume-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","volume":"1","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1. 4171--4186."},{"key":"e_1_3_2_1_12_1","first-page":"27434","article-title":"AC-GC: Lossy activation compression with guaranteed convergence","volume":"34","author":"David Evans R","year":"2021","unstructured":"R David Evans and Tor Aamodt. 2021. AC-GC: Lossy activation compression with guaranteed convergence. Advances in Neural Information Processing Systems 34 (2021), 27434--27448.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_13_1","first-page":"1","article-title":"Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity","volume":"23","author":"Fedus William","year":"2022","unstructured":"William Fedus, Barret Zoph, and Noam Shazeer. 2022. Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity. Journal of Machine Learning Research 23, 120 (2022), 1--39.","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/3503221.3508418"},{"key":"e_1_3_2_1_15_1","volume-title":"Gpipe: Efficient training of giant neural networks using pipeline parallelism. Advances in neural information processing systems 32","author":"Huang Yanping","year":"2019","unstructured":"Yanping Huang, Youlong Cheng, Ankur Bapna, Orhan Firat, Dehao Chen, Mia Chen, HyoukJoong Lee, Jiquan Ngiam, Quoc V Le, Yonghui Wu, et al. 2019. Gpipe: Efficient training of giant neural networks using pipeline parallelism. Advances in neural information processing systems 32 (2019)."},{"key":"e_1_3_2_1_16_1","volume-title":"Tutel: Adaptive Mixture-of-Experts at Scale. arXiv preprint arXiv:2206.03382","author":"Hwang Changho","year":"2022","unstructured":"Changho Hwang, Wei Cui, Yifan Xiong, Ziyue Yang, Ze Liu, Han Hu, Zilong Wang, Rafael Salas, Jithin Jose, Prabhat Ram, et al. 2022. Tutel: Adaptive Mixture-of-Experts at Scale. arXiv preprint arXiv:2206.03382 (2022)."},{"key":"e_1_3_2_1_17_1","volume-title":"Adaptive mixtures of local experts. Neural computation 3, 1","author":"Jacobs Robert A","year":"1991","unstructured":"Robert A Jacobs, Michael I Jordan, Steven J Nowlan, and Geoffrey E Hinton. 1991. Adaptive mixtures of local experts. Neural computation 3, 1 (1991), 79--87."},{"key":"e_1_3_2_1_18_1","volume-title":"GShard: Scaling Giant Models with Conditional Computation and Automatic Sharding. In International Conference on Learning Representations.","author":"Lepikhin Dmitry","year":"2021","unstructured":"Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan Firat, Yanping Huang, Maxim Krikun, Noam Shazeer, and Zhifeng Chen. 2021. GShard: Scaling Giant Models with Conditional Computation and Automatic Sharding. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_19_1","volume-title":"International Conference on Machine Learning. PMLR, 6265--6274","author":"Lewis Mike","year":"2021","unstructured":"Mike Lewis, Shruti Bhosale, Tim Dettmers, Naman Goyal, and Luke Zettlemoyer. 2021. BASE layers: Simplifying training of large, sparse models. In International Conference on Machine Learning. PMLR, 6265--6274."},{"key":"e_1_3_2_1_20_1","volume-title":"2023 USENIX Annual Technical Conference (USENIX ATC 23)","author":"Li Jiamin","year":"2023","unstructured":"Jiamin Li, Yimin Jiang, Yibo Zhu, Cong Wang, and Hong Xu. 2023. Accelerating distributed {MoE} training and inference with lina. In 2023 USENIX Annual Technical Conference (USENIX ATC 23). 945--959."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2014.2346458"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3603269.3604869"},{"key":"e_1_3_2_1_23_1","volume-title":"Gating Dropout: Communication-efficient Regularization for Sparsely Activated Transformers. In International Conference on Machine Learning. PMLR, 13782--13792","author":"Liu Rui","year":"2022","unstructured":"Rui Liu, Young Jin Kim, Alexandre Muzio, and Hany Hassan. 2022. Gating Dropout: Communication-efficient Regularization for Sparsely Activated Transformers. In International Conference on Machine Learning. PMLR, 13782--13792."},{"key":"e_1_3_2_1_24_1","volume-title":"International Conference on Learning Representations.","author":"Liu Zirui","year":"2021","unstructured":"Zirui Liu, Kaixiong Zhou, Fan Yang, Li Li, Rui Chen, and Xia Hu. 2021. EXACT: Scalable graph neural networks training via extreme activation compression. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_25_1","volume-title":"Efficient Pipeline Planning for Expedited Distributed DNN Training. In IEEE INFOCOM 2022-IEEE Conference on Computer Communications.","author":"Luo Ziyue","year":"2022","unstructured":"Ziyue Luo, Xiaodong Yi, Guoping Long, Shiqing Fan, Chuan Wu, Jun Yang, and Wei Lin. 2022. Efficient Pipeline Planning for Expedited Distributed DNN Training. In IEEE INFOCOM 2022-IEEE Conference on Computer Communications."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1145\/3503221.3508417"},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.14778\/3570690.3570697"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1145\/3341301.3359646"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1145\/3458817.3476209"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1145\/3588964"},{"key":"e_1_3_2_1_31_1","volume-title":"HetuMoE: An Efficient Trillion-scale Mixture-of-Expert Distributed Training System. arXiv preprint arXiv:2203.14685","author":"Nie Xiaonan","year":"2022","unstructured":"Xiaonan Nie, Pinxue Zhao, Xupeng Miao, and Bin Cui. 2022. HetuMoE: An Efficient Trillion-scale Mixture-of-Expert Distributed Training System. arXiv preprint arXiv:2203.14685 (2022)."},{"key":"e_1_3_2_1_32_1","volume-title":"International conference on machine learning. PMLR, 1310--1318","author":"Pascanu Razvan","year":"2013","unstructured":"Razvan Pascanu, Tomas Mikolov, and Yoshua Bengio. 2013. On the difficulty of training recurrent neural networks. In International conference on machine learning. PMLR, 1310--1318."},{"key":"e_1_3_2_1_33_1","volume-title":"Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems 32","author":"Paszke Adam","year":"2019","unstructured":"Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. 2019. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems 32 (2019)."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1145\/3341301.3359642"},{"key":"e_1_3_2_1_35_1","unstructured":"Alec Radford Jeffrey Wu Rewon Child David Luan Dario Amodei Ilya Sutskever et al. 2019. Language models are unsupervised multitask learners. OpenAI blog 1 8 (2019) 9."},{"key":"e_1_3_2_1_36_1","volume-title":"International Conference on Machine Learning. PMLR","author":"Rajbhandari Samyam","year":"2022","unstructured":"Samyam Rajbhandari, Conglong Li, Zhewei Yao, Minjia Zhang, Reza Yazdani Aminabadi, Ammar Ahmad Awan, Jeff Rasley, and Yuxiong He. 2022. DeepSpeed-MoE: Advancing mixture-of-experts inference and training to power next-generation ai scale. In International Conference on Machine Learning. PMLR, 18332--18346."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3406703"},{"key":"e_1_3_2_1_38_1","first-page":"8583","article-title":"Scaling vision with sparse mixture of experts","volume":"34","author":"Riquelme Carlos","year":"2021","unstructured":"Carlos Riquelme, Joan Puigcerver, Basil Mustafa, Maxim Neumann, Rodolphe Jenatton, Andr\u00e9 Susano Pinto, Daniel Keysers, and Neil Houlsby. 2021. Scaling vision with sparse mixture of experts. Advances in Neural Information Processing Systems 34 (2021), 8583--8595.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_39_1","volume-title":"International Conference on Learning Representations.","author":"Shazeer Noam","year":"2017","unstructured":"Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. 2017. Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_40_1","volume-title":"SE-MoE: A Scalable and Efficient Mixture-of-Experts Distributed Training and Inference System. arXiv preprint arXiv:2205.10034","author":"Shen Liang","year":"2022","unstructured":"Liang Shen, Zhihua Wu, WeiBao Gong, Hongxiang Hao, Yangfan Bai, HuaChao Wu, Xinxuan Wu, Haoyi Xiong, Dianhai Yu, and Yanjun Ma. 2022. SE-MoE: A Scalable and Efficient Mixture-of-Experts Distributed Training and Inference System. arXiv preprint arXiv:2205.10034 (2022)."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM.2019.8737367"},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM42981.2021.9488803"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM53939.2023.10228874"},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1145\/3577193.3593704"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1145\/3575693.3575712"},{"key":"e_1_3_2_1_46_1","volume-title":"International Conference on Learning Representations.","author":"Stephen Merity","year":"2017","unstructured":"Merity Stephen, Xiong Caiming, Bradbury James, and Richard Socher. 2017. Pointer sentinel mixture models. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_47_1","volume-title":"Attention is all you need. Advances in neural information processing systems 30","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in neural information processing systems 30 (2017)."},{"key":"e_1_3_2_1_48_1","volume-title":"Proceedings of Machine Learning and Systems 5","author":"Wang Zhuang","year":"2023","unstructured":"Zhuang Wang, Xinyu Wu, Zhaozhuo Xu, and TS Ng. 2023. Cupcake: A Compression Scheduler for Scalable Communication-Efficient Distributed Training. Proceedings of Machine Learning and Systems 5 (2023)."},{"key":"e_1_3_2_1_49_1","volume-title":"2023 USENIX Annual Technical Conference (USENIX ATC 23)","author":"Zhai Mingshu","year":"2023","unstructured":"Mingshu Zhai, Jiaao He, Zixuan Ma, Zan Zong, Runqing Zhang, and Jidong Zhai. 2023. SmartMoE: Efficiently Training Sparsely-Activated Models through Combining Offline and Online Parallelization. In 2023 USENIX Annual Technical Conference (USENIX ATC 23). 961--975."},{"key":"e_1_3_2_1_50_1","volume-title":"DeAR: Accelerating Distributed Deep Learning with Fine-Grained All-Reduce Pipelining. In 2023 IEEE 43rd International Conference on Distributed Computing Systems (ICDCS). IEEE, 142--153","author":"Zhang Lin","year":"2023","unstructured":"Lin Zhang, Shaohuai Shi, Xiaowen Chu, Wei Wang, Bo Li, and Chengjian Liu. 2023. DeAR: Accelerating Distributed Deep Learning with Fine-Grained All-Reduce Pipelining. In 2023 IEEE 43rd International Conference on Distributed Computing Systems (ICDCS). IEEE, 142--153."},{"key":"e_1_3_2_1_51_1","volume-title":"Alpa: Automating Inter-and Intra-Operator Parallelism for Distributed Deep Learning. In 16th USENIX Symposium on Operating Systems Design and Implementation.","author":"Zheng Lianmin","year":"2022","unstructured":"Lianmin Zheng, Zhuohan Li, Hao Zhang, Yonghao Zhuang, Zhifeng Chen, Yanping Huang, Yida Wang, Yuanzhong Xu, Danyang Zhuo, Joseph E Gonzalez, et al. 2022. Alpa: Automating Inter-and Intra-Operator Parallelism for Distributed Deep Learning. In 16th USENIX Symposium on Operating Systems Design and Implementation."},{"key":"e_1_3_2_1_52_1","volume-title":"Accelerating MPI All-to-All Communication with Online Compression on Modern GPU Clusters. In International Conference on High Performance Computing. Springer, 3--25","author":"Zhou Qinghua","year":"2022","unstructured":"Qinghua Zhou, Pouya Kousha, Quentin Anthony, Kawthar Shafie Khorassani, Aamir Shafi, Hari Subramoni, and Dhabaleswar K Panda. 2022. Accelerating MPI All-to-All Communication with Online Compression on Modern GPU Clusters. In International Conference on High Performance Computing. Springer, 3--25."},{"key":"e_1_3_2_1_53_1","first-page":"7103","article-title":"Mixture-of-experts with expert choice routing","volume":"35","author":"Zhou Yanqi","year":"2022","unstructured":"Yanqi Zhou, Tao Lei, Hanxiao Liu, Nan Du, Yanping Huang, Vincent Zhao, Andrew M Dai, Quoc V Le, James Laudon, et al. 2022. Mixture-of-experts with expert choice routing. Advances in Neural Information Processing Systems 35 (2022), 7103--7114.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.11"},{"key":"e_1_3_2_1_55_1","volume-title":"Taming Sparsely Activated Transformer with Stochastic Experts. In International Conference on Learning Representations.","author":"Zuo Simiao","year":"2022","unstructured":"Simiao Zuo, Xiaodong Liu, Jian Jiao, Young Jin Kim, Hany Hassan, Ruofei Zhang, Jianfeng Gao, and Tuo Zhao. 2022. Taming Sparsely Activated Transformer with Stochastic Experts. In International Conference on Learning Representations."}],"event":{"name":"EuroSys '24: Nineteenth European Conference on Computer Systems","location":"Athens Greece","acronym":"EuroSys '24","sponsor":["SIGOPS ACM Special Interest Group on Operating Systems"]},"container-title":["Proceedings of the Nineteenth European Conference on Computer Systems"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3627703.3650083","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3627703.3650083","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T01:08:42Z","timestamp":1755824922000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3627703.3650083"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,22]]},"references-count":55,"alternative-id":["10.1145\/3627703.3650083","10.1145\/3627703"],"URL":"https:\/\/doi.org\/10.1145\/3627703.3650083","relation":{},"subject":[],"published":{"date-parts":[[2024,4,22]]},"assertion":[{"value":"2024-04-22","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}