{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,28]],"date-time":"2026-04-28T01:25:05Z","timestamp":1777339505681,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":53,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,3,30]],"date-time":"2025-03-30T00:00:00Z","timestamp":1743292800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"NSFC","award":["62272122,62302123"],"award-info":[{"award-number":["62272122,62302123"]}]},{"name":"Guangzhou Municipal Joint Funding Project with Universities and Enterprises","award":["2024A03J0616"],"award-info":[{"award-number":["2024A03J0616"]}]},{"name":"Guangdong Provincial Key Laboratory of Novel Security Intelligence Technologies","award":["2022B1212010005"],"award-info":[{"award-number":["2022B1212010005"]}]},{"name":"Shenzhen Science and Technology Program","award":["KJZD20230923115113026,KJZD20230923114213027"],"award-info":[{"award-number":["KJZD20230923115113026,KJZD20230923114213027"]}]},{"name":"RGC RIF","award":["R6021-20"],"award-info":[{"award-number":["R6021-20"]}]},{"name":"Hong Kong RIF","award":["R6021-20"],"award-info":[{"award-number":["R6021-20"]}]},{"name":"RGC GRF","award":["16200221,16207922,16207423"],"award-info":[{"award-number":["16200221,16207922,16207423"]}]},{"name":"RGC TRS","award":["T43-513\/23N-2"],"award-info":[{"award-number":["T43-513\/23N-2"]}]},{"name":"Hong Kong CRF","award":["C2004-21G,C7004-22G,C1029-22G,C6015-23G"],"award-info":[{"award-number":["C2004-21G,C7004-22G,C1029-22G,C6015-23G"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,3,30]]},"DOI":"10.1145\/3669940.3707272","type":"proceedings-article","created":{"date-parts":[[2025,2,6]],"date-time":"2025-02-06T12:28:01Z","timestamp":1738844881000},"page":"524-539","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":8,"title":["FSMoE: A Flexible and Scalable Training System for Sparse Mixture-of-Experts Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1172-9935","authenticated-orcid":false,"given":"Xinglin","family":"Pan","sequence":"first","affiliation":[{"name":"The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5147-0844","authenticated-orcid":false,"given":"Wenxiang","family":"Lin","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology, Shenzhen, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8493-4705","authenticated-orcid":false,"given":"Lin","family":"Zhang","sequence":"additional","affiliation":[{"name":"Hong Kong University of Science and Technology, Hong Kong SAR, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1418-5160","authenticated-orcid":false,"given":"Shaohuai","family":"Shi","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology, Shenzhen, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8769-9974","authenticated-orcid":false,"given":"Zhenheng","family":"Tang","sequence":"additional","affiliation":[{"name":"The Hong Kong University of Science and Technology, Hong Kong SAR, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-5580-189X","authenticated-orcid":false,"given":"Rui","family":"Wang","sequence":"additional","affiliation":[{"name":"The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2955-750X","authenticated-orcid":false,"given":"Bo","family":"Li","sequence":"additional","affiliation":[{"name":"Hong Kong University of Science and Technology, Hong Kong SAR, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9745-4372","authenticated-orcid":false,"given":"Xiaowen","family":"Chu","sequence":"additional","affiliation":[{"name":"The Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China, &amp; Hong Kong University of Science and Technology, Hong Kong SAR, China"}]}],"member":"320","published-online":{"date-parts":[[2025,3,30]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Doubling all2all performance with nvidia collective communication library 2.12. https:\/\/developer.nvidia.com\/blog\/doubling-all2all- performance-with-nvidia-collective-communication-library-2-12\/. Ac- cessed: 2022-07--13."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1109\/SC41404.2022.00051"},{"key":"e_1_3_2_1_3_1","volume-title":"Language models are few-shot learners. Advances in neural information processing systems, 33:1877--1901","author":"Brown Tom","year":"2020","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877--1901, 2020."},{"key":"e_1_3_2_1_4_1","volume-title":"FLUX: fast software-based communication overlap on gpus through kernel fusion. CoRR, abs\/2406.06858","author":"Chang Li-Wen","year":"2024","unstructured":"Li-Wen Chang, Wenlei Bao, Qi Hou, Chengquan Jiang, Ningxin Zheng, Yinmin Zhong, Xuanrun Zhang, Zuquan Song, Ziheng Jiang, Haibin Lin, Xin Jin, and Xin Liu. FLUX: fast software-based communication overlap on gpus through kernel fusion. CoRR, abs\/2406.06858, 2024."},{"key":"e_1_3_2_1_5_1","first-page":"178","volume-title":"Proceedings of the 29th ACM International Conference on Architectural Support for Programming Languages and Operating Systems","volume":"3","author":"Chen Chang","year":"2024","unstructured":"Chang Chen, Xiuhong Li, Qianchao Zhu, Jiangfei Duan, Peng Sun, Xingcheng Zhang, and Chao Yang. Centauri: Enabling efficient sched- uling for communication-computation overlap in large model train- ing via communication partitioning. In Proceedings of the 29th ACM International Conference on Architectural Support for Programming Languages and Operating Systems, Volume 3, pages 178--191, 2024."},{"key":"e_1_3_2_1_6_1","first-page":"34600","article-title":"On the representation collapse of sparse mixture of experts","volume":"35","author":"Chi Zewen","year":"2022","unstructured":"Zewen Chi, Li Dong, Shaohan Huang, Damai Dai, Shuming Ma, Barun Patra, Saksham Singhal, Payal Bajaj, Xia Song, Xian-Ling Mao, et al. On the representation collapse of sparse mixture of experts. Advances in Neural Information Processing Systems, 35:34600--34613, 2022.","journal-title":"Advances in Neural Information Processing Systems"},{"issue":"240","key":"e_1_3_2_1_7_1","first-page":"1","article-title":"Palm: Scaling language modeling with pathways","volume":"24","author":"Chowdhery Aakanksha","year":"2023","unstructured":"Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. Journal of Machine Learning Research, 24(240):1--113, 2023.","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.489"},{"key":"e_1_3_2_1_9_1","volume-title":"Large scale distributed deep networks. Advances in neural information processing systems, 25","author":"Dean Jeffrey","year":"2012","unstructured":"Jeffrey Dean, Greg Corrado, Rajat Monga, Kai Chen, Matthieu Devin, Mark Mao, Marc'aurelio Ranzato, Andrew Senior, Paul Tucker, Ke Yang, et al. Large scale distributed deep networks. Advances in neural information processing systems, 25, 2012."},{"key":"e_1_3_2_1_10_1","volume-title":"Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model","author":"AI.","year":"2024","unstructured":"DeepSeek-AI. Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model, 2024."},{"key":"e_1_3_2_1_11_1","volume-title":"Corey Lynch, Aakanksha Chowdhery, Brian Ichter, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, et al. Palm-e: An embodied multimodal language model. arXiv preprint arXiv:2303.03378","author":"Driess Danny","year":"2023","unstructured":"Danny Driess, Fei Xia, Mehdi SM Sajjadi, Corey Lynch, Aakanksha Chowdhery, Brian Ichter, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, et al. Palm-e: An embodied multimodal language model. arXiv preprint arXiv:2303.03378, 2023."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.5555\/3586589.3586709"},{"key":"e_1_3_2_1_13_1","volume-title":"Fastmoe: A fast mixture-of-expert training system. arXiv preprint arXiv:2103.13262","author":"He Jiaao","year":"2021","unstructured":"Jiaao He, Jiezhong Qiu, Aohan Zeng, Zhilin Yang, Jidong Zhai, and Jie Tang. Fastmoe: A fast mixture-of-expert training system. arXiv preprint arXiv:2103.13262, 2021."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/3503221.3508418"},{"key":"e_1_3_2_1_15_1","volume-title":"et al. Gpipe: Efficient training of giant neural networks using pipeline parallelism. Advances in neural information processing systems, 32","author":"Huang Yanping","year":"2019","unstructured":"Yanping Huang, Youlong Cheng, Ankur Bapna, Orhan Firat, Dehao Chen, Mia Chen, HyoukJoong Lee, Jiquan Ngiam, Quoc V Le, Yonghui Wu, et al. Gpipe: Efficient training of giant neural networks using pipeline parallelism. Advances in neural information processing systems, 32, 2019."},{"key":"e_1_3_2_1_16_1","volume-title":"Experts weights averaging: A new general training scheme for vision transformers. arXiv preprint arXiv:2308.06093","author":"Huang Yongqi","year":"2023","unstructured":"Yongqi Huang, Peng Ye, Xiaoshui Huang, Sheng Li, Tao Chen, and Wanli Ouyang. Experts weights averaging: A new general training scheme for vision transformers. arXiv preprint arXiv:2308.06093, 2023."},{"key":"e_1_3_2_1_17_1","first-page":"5","article-title":"Tutel: Adaptive mixture-of-experts at scale","author":"Hwang Changho","year":"2023","unstructured":"Changho Hwang, Wei Cui, Yifan Xiong, Ziyue Yang, Ze Liu, Han Hu, Zilong Wang, Rafael Salas, Jithin Jose, Prabhat Ram, et al. Tutel: Adaptive mixture-of-experts at scale. Proceedings of Machine Learning and Systems, 5, 2023.","journal-title":"Proceedings of Machine Learning and Systems"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1145\/3503222.3507778"},{"key":"e_1_3_2_1_19_1","volume-title":"Proc. of Workshop on Systems for ML and Open Source Software, collocated with NeurIPS 2018","author":"Jia Xianyan","year":"2018","unstructured":"Xianyan Jia, Shutao Song, Shaohuai Shi, Wei He, Yangzihao Wang, Haidong Rong, Feihu Zhou, Liqiang Xie, Zhenyu Guo, Yuanzhou Yang, Liwei Yu, Tiegang Chen, Guangxiao Hu, and Xiaowen Chu. Highly scalable deep learning training system with mixed-precision: Training ImageNet in four minutes. In Proc. of Workshop on Systems for ML and Open Source Software, collocated with NeurIPS 2018, 2018."},{"key":"e_1_3_2_1_20_1","volume-title":"Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088","author":"Jiang Albert Q","year":"2024","unstructured":"Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv preprint arXiv:2401.04088, 2024."},{"key":"e_1_3_2_1_21_1","first-page":"74","article-title":"Accelerating mixture-of-experts training by over-lapping weight gradient computation and all-to-all communication","volume":"6","author":"Jiang Chenyu","year":"2024","unstructured":"Chenyu Jiang, Ye Tian, Zhen Jia, Chuan Wu, Yida Wang, and Shuai Zheng. Lancet: Accelerating mixture-of-experts training by over-lapping weight gradient computation and all-to-all communication. Proceedings of Machine Learning and Systems, 6:74--86, 2024.","journal-title":"Proceedings of Machine Learning and Systems"},{"key":"e_1_3_2_1_22_1","volume-title":"International Conference on Learning Representations","author":"Lepikhin Dmitry","year":"2020","unstructured":"Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan Firat, Yanping Huang, Maxim Krikun, Noam Shazeer, and Zhifeng Chen. Gshard: Scaling giant models with conditional computation and automatic sharding. In International Conference on Learning Representations, 2020."},{"key":"e_1_3_2_1_23_1","first-page":"6265","volume-title":"International Conference on Machine Learning","author":"Lewis Mike","year":"2021","unstructured":"Mike Lewis, Shruti Bhosale, Tim Dettmers, Naman Goyal, and Luke Zettlemoyer. BASE layers: Simplifying training of large, sparse models. In International Conference on Machine Learning, pages 6265--6274. PMLR, 2021."},{"key":"e_1_3_2_1_24_1","first-page":"945","volume-title":"USENIX Annual Technical Conference","author":"Li Jiamin","year":"2023","unstructured":"Jiamin Li, Yimin Jiang, Yibo Zhu, Cong Wang, and Hong Xu. Accelerating distributed {MoE} training and inference with lina. In USENIX Annual Technical Conference, pages 945--959, 2023."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1145\/3603269.3604869"},{"key":"e_1_3_2_1_26_1","first-page":"13782","volume-title":"International Conference on Machine Learning","author":"Liu Rui","year":"2022","unstructured":"Rui Liu, Young Jin Kim, Alexandre Muzio, and Hany Hassan. Gating dropout: Communication-efficient regularization for sparsely activated transformers. In International Conference on Machine Learning, pages 13782--13792. PMLR, 2022."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1145\/3219819.3220007"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1145\/3503221.3508417"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1145\/3458817.3476209"},{"key":"e_1_3_2_1_30_1","volume-title":"Flexmoe: Scaling large-scale sparse pre-trained model training via dynamic device placement. Pro- ceedings of the ACM on Management of Data, 1(1):1--19","author":"Nie Xiaonan","year":"2023","unstructured":"Xiaonan Nie, Xupeng Miao, Zilong Wang, Zichao Yang, Jilong Xue, Lingxiao Ma, Gang Cao, and Bin Cui. Flexmoe: Scaling large-scale sparse pre-trained model training via dynamic device placement. Pro- ceedings of the ACM on Management of Data, 1(1):1--19, 2023."},{"key":"e_1_3_2_1_31_1","volume-title":"Hetumoe: An efficient trillion-scale mixture-of-expert distributed training system. arXiv preprint arXiv:2203.14685","author":"Nie Xiaonan","year":"2022","unstructured":"Xiaonan Nie, Pinxue Zhao, Xupeng Miao, Tong Zhao, and Bin Cui. Hetumoe: An efficient trillion-scale mixture-of-expert distributed training system. arXiv preprint arXiv:2203.14685, 2022."},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1007\/b98874"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM52122.2024.10621327"},{"key":"e_1_3_2_1_34_1","volume-title":"ACM","author":"Pati Suchita","year":"2024","unstructured":"Suchita Pati, Shaizeen Aga, Mahzabeen Islam, Nuwan Jayasena, and Matthew D. Sinclair. T3: transparent tracking & triggering for fine-grained overlap of compute & collectives. In ASPLOS (2), pages 1146--1164. ACM, 2024."},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-30504-7_8"},{"key":"e_1_3_2_1_36_1","volume-title":"From sparse to soft mixtures of experts. arXiv preprint arXiv:2308.00951","author":"Puigcerver Joan","year":"2023","unstructured":"Joan Puigcerver, Carlos Riquelme, Basil Mustafa, and Neil Houlsby. From sparse to soft mixtures of experts. arXiv preprint arXiv:2308.00951, 2023."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1109\/SC41406.2024.00094"},{"issue":"8","key":"e_1_3_2_1_38_1","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford Alec","year":"2019","unstructured":"Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019.","journal-title":"OpenAI blog"},{"key":"e_1_3_2_1_39_1","first-page":"18332","volume-title":"International Conference on Machine Learning","author":"Rajbhandari Samyam","year":"2022","unstructured":"Samyam Rajbhandari, Conglong Li, Zhewei Yao, Minjia Zhang, Reza Yazdani Aminabadi, Ammar Ahmad Awan, Jeff Rasley, and Yuxiong He. Deepspeed-moe: Advancing mixture-of-experts inference and training to power next-generation ai scale. In International Conference on Machine Learning, pages 18332--18346. PMLR, 2022."},{"key":"e_1_3_2_1_40_1","volume-title":"International Conference on Learning Representations","author":"Shazeer Noam","year":"2016","unstructured":"Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. In International Conference on Learning Representations, 2016."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM42981.2021.9488803"},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM53939.2023.10228874"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1145\/3627703.3650083"},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1145\/3577193.3593704"},{"key":"e_1_3_2_1_45_1","volume-title":"Stanford alpaca: An instruction-following llama model. https:\/\/github.com\/ tatsu-lab\/stanford_alpaca","author":"Taori Rohan","year":"2023","unstructured":"Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li, Carlos Guestrin, Percy Liang, and Tatsunori B. Hashimoto. Stanford alpaca: An instruction-following llama model. https:\/\/github.com\/ tatsu-lab\/stanford_alpaca, 2023."},{"key":"e_1_3_2_1_46_1","volume-title":"Attention is all you need. Advances in neural information processing systems, 30","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017."},{"key":"e_1_3_2_1_47_1","first-page":"93","volume-title":"ASPLOS","author":"Wang Shibo","year":"2023","unstructured":"Shibo Wang, Jinliang Wei, Amit Sabne, Andy Davis, Berkin Ilbeyi, Blake Hechtman, Dehao Chen, Karthik Srinivasa Murthy, Marcello Maggioni, Qiao Zhang, Sameer Kumar, Tongfei Guo, Yuanzhong Xu, and Zongwei Zhou. Overlap communication with dependent compu- tation via decomposition in large deep learning models. In ASPLOS , pages 93--106. ACM, 2023."},{"key":"e_1_3_2_1_48_1","volume-title":"International Conference on Learning Represen- tations","author":"You Yang","year":"2020","unstructured":"Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training BERT in 76 minutes. In International Conference on Learning Represen- tations, 2020."},{"key":"e_1_3_2_1_49_1","volume-title":"Speechmoe: Scaling to large acoustic models with dynamic routing mixture of experts. arXiv preprint arXiv:2105.03036","author":"You Zhao","year":"2021","unstructured":"Zhao You, Shulin Feng, Dan Su, and Dong Yu. Speechmoe: Scaling to large acoustic models with dynamic routing mixture of experts. arXiv preprint arXiv:2105.03036, 2021."},{"key":"e_1_3_2_1_50_1","first-page":"961","volume-title":"USENIX Annual Technical Conference","author":"Zhai Mingshu","year":"2023","unstructured":"Mingshu Zhai, Jiaao He, Zixuan Ma, Zan Zong, Runqing Zhang, and Jidong Zhai. SmartMoE: Efficiently training Sparsely-Activated mod- els through combining offline and online parallelization. In USENIX Annual Technical Conference, pages 961--975, 2023."},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1145\/3600006.3613139"},{"key":"e_1_3_2_1_52_1","volume-title":"Andrew Dai, Zhifeng Chen, Quoc Le, and James Laudon. Mixture-of-experts with expert choice routing. arXiv preprint arXiv:2202.09368","author":"Zhou Yanqi","year":"2022","unstructured":"Yanqi Zhou, Tao Lei, Hanxiao Liu, Nan Du, Yanping Huang, Vin- cent Zhao, Andrew Dai, Zhifeng Chen, Quoc Le, and James Laudon. Mixture-of-experts with expert choice routing. arXiv preprint arXiv:2202.09368, 2022."},{"key":"e_1_3_2_1_53_1","volume-title":"International Conference on Learning Representations","author":"Zuo Simiao","year":"2021","unstructured":"Simiao Zuo, Xiaodong Liu, Jian Jiao, Young Jin Kim, Hany Hassan, Ruofei Zhang, Jianfeng Gao, and Tuo Zhao. Taming sparsely activated transformer with stochastic experts. In International Conference on Learning Representations, 2021"}],"event":{"name":"ASPLOS '25: 30th ACM International Conference on Architectural Support for Programming Languages and Operating Systems","location":"Rotterdam Netherlands","acronym":"ASPLOS '25","sponsor":["SIGPLAN ACM Special Interest Group on Programming Languages","SIGOPS ACM Special Interest Group on Operating Systems","SIGARCH ACM Special Interest Group on Computer Architecture"]},"container-title":["Proceedings of the 30th ACM International Conference on Architectural Support for Programming Languages and Operating Systems, Volume 1"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3669940.3707272","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3669940.3707272","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T14:47:58Z","timestamp":1755787678000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3669940.3707272"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3,30]]},"references-count":53,"alternative-id":["10.1145\/3669940.3707272","10.1145\/3669940"],"URL":"https:\/\/doi.org\/10.1145\/3669940.3707272","relation":{},"subject":[],"published":{"date-parts":[[2025,3,30]]},"assertion":[{"value":"2025-03-30","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}