{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T06:32:50Z","timestamp":1774074770580,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":83,"publisher":"ACM","funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2022YFB4500700"],"award-info":[{"award-number":["2022YFB4500700"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Scientific Research Innovation Capability Support Project for Young Faculty","award":["ZYGXQNJSKYCXNLZCXM-I1"],"award-info":[{"award-number":["ZYGXQNJSKYCXNLZCXM-I1"]}]},{"name":"Fundamental Research Funds for the Central Universities, Peking University"},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62172008"],"award-info":[{"award-number":["62172008"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62325201"],"award-info":[{"award-number":["62325201"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,9,8]]},"DOI":"10.1145\/3718958.3750506","type":"proceedings-article","created":{"date-parts":[[2025,8,27]],"date-time":"2025-08-27T16:54:11Z","timestamp":1756313651000},"page":"592-608","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["MegaScale-Infer: Efficient Mixture-of-Experts Model Serving with Disaggregated Expert Parallelism"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1389-7884","authenticated-orcid":false,"given":"Ruidong","family":"Zhu","sequence":"first","affiliation":[{"name":"School of Computer Science, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-7732-4391","authenticated-orcid":false,"given":"Ziheng","family":"Jiang","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Seattle, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-1355-4995","authenticated-orcid":false,"given":"Chao","family":"Jin","sequence":"additional","affiliation":[{"name":"School of Computer Science, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-2446-5264","authenticated-orcid":false,"given":"Peng","family":"Wu","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, San Jose, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5052-0077","authenticated-orcid":false,"given":"Cesar A.","family":"Stuardo","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Seattle, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-6446-700X","authenticated-orcid":false,"given":"Dongyang","family":"Wang","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-2279-4258","authenticated-orcid":false,"given":"Xinlei","family":"Zhang","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-1620-4110","authenticated-orcid":false,"given":"Huaping","family":"Zhou","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-4093-3187","authenticated-orcid":false,"given":"Haoran","family":"Wei","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-3320-7050","authenticated-orcid":false,"given":"Yang","family":"Cheng","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-0140-184X","authenticated-orcid":false,"given":"Jianzhe","family":"Xiao","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Seattle, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-5068-6818","authenticated-orcid":false,"given":"Xinyi","family":"Zhang","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-7280-1304","authenticated-orcid":false,"given":"Lingjun","family":"Liu","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Seattle, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4879-5335","authenticated-orcid":false,"given":"Haibin","family":"Lin","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Seattle, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6515-6733","authenticated-orcid":false,"given":"Li-Wen","family":"Chang","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Seattle, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-3395-3624","authenticated-orcid":false,"given":"Jianxi","family":"Ye","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Seattle, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-8474-0018","authenticated-orcid":false,"given":"Xiao","family":"Yu","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, San Jose, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7908-8484","authenticated-orcid":false,"given":"Xuanzhe","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Computer Science, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8741-5847","authenticated-orcid":false,"given":"Xin","family":"Jin","sequence":"additional","affiliation":[{"name":"School of Computer Science, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-8346-3323","authenticated-orcid":false,"given":"Xin","family":"Liu","sequence":"additional","affiliation":[{"name":"ByteDance Ltd, Seattle, USA"}]}],"member":"320","published-online":{"date-parts":[[2025,8,27]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"2022. NCCL GDR Flush Operation. https:\/\/github.com\/NVIDIA\/nccl\/issues\/683. (2022)."},{"key":"e_1_3_2_1_2_1","unstructured":"2022. NVIDIA GPUDirect async. https:\/\/developer.nvidia.com\/blog\/improving-network-performance-of-hpc-systems-using-nvidia-magnum-io-nvshmem-and-gpudirect-async\/. (2022)."},{"key":"e_1_3_2_1_3_1","unstructured":"2025. ChatGPT. https:\/\/chatgpt.com\/. (2025)."},{"key":"e_1_3_2_1_4_1","unstructured":"2025. CUDA driver API: cuStreamWaitValue32. https:\/\/docs.nvidia.com\/cuda\/cuda-driver-api\/group__CUDA__MEMOP.html#group__CUDA__MEMOP_1g629856339de7bc6606047385addbb398. (2025)."},{"key":"e_1_3_2_1_5_1","unstructured":"2025. CUDA runtime API: cudaEventQuery. https:\/\/docs.nvidia.com\/cuda\/cuda-runtime-api\/group__CUDART__EVENT.html#group__CUDART__EVENT_1g2bf738909b4a059023537eaa29d8a5b7. (2025)."},{"key":"e_1_3_2_1_6_1","unstructured":"2025. Cursor. https:\/\/www.cursor.com\/. (2025)."},{"key":"e_1_3_2_1_7_1","unstructured":"2025. DeepEP: an efficient expert-parallel communication library. https:\/\/github.com\/deepseek-ai\/DeepEP. (2025)."},{"key":"e_1_3_2_1_8_1","unstructured":"2025. Gemini. https:\/\/gemini.google.com\/app. (2025)."},{"key":"e_1_3_2_1_9_1","unstructured":"2025. GitHub Copilot. https:\/\/github.com\/features\/copilot. (2025)."},{"key":"e_1_3_2_1_10_1","unstructured":"2025. NCCL Data Transfer Between GPU and Proxy. https:\/\/github.com\/NVIDIA\/nccl\/issues\/852. (2025)."},{"key":"e_1_3_2_1_11_1","unstructured":"2025. NCCL Group Operations. https:\/\/docs.nvidia.com\/deeplearning\/nccl\/user-guide\/docs\/usage\/groups.html. (2025)."},{"key":"e_1_3_2_1_12_1","unstructured":"2025. NCCL Optimized primitives for inter-GPU communication. https:\/\/github.com\/NVIDIA\/nccl\/. (2025)."},{"key":"e_1_3_2_1_13_1","unstructured":"2025. NCCL User-buffer Registration. https:\/\/docs.nvidia.com\/deeplearning\/nccl\/user-guide\/docs\/usage\/bufferreg.html. (2025)."},{"key":"e_1_3_2_1_14_1","unstructured":"2025. NVIDIA Available RDMA Operations. https:\/\/docs.nvidia.com\/networking\/display\/rdmaawareprogrammingv17\/available+communication+operations. (2025)."},{"key":"e_1_3_2_1_15_1","unstructured":"2025. NVIDIA GDR Copy. https:\/\/github.com\/NVIDIA\/gdrcopy. (2025)."},{"key":"e_1_3_2_1_16_1","unstructured":"2025. NVIDIA GPUDirect. https:\/\/developer.nvidia.com\/gpudirect. (2025)."},{"key":"e_1_3_2_1_17_1","unstructured":"2025. NVIDIA PTX. https:\/\/developer.nvidia.com\/blog\/understanding-ptx-the-assembly-language-of-cuda-gpu-computing\/. (2025)."},{"key":"e_1_3_2_1_18_1","unstructured":"2025. NVIDIA RDMA Key Concepts. https:\/\/docs.nvidia.com\/networking\/display\/rdmaawareprogrammingv17\/key+concepts. (2025)."},{"key":"e_1_3_2_1_19_1","unstructured":"2025. NVIDIA Streaming multiprocessors. https:\/\/docs.nvidia.com\/cuda\/ampere-tuning-guide\/index.html#streaming-multiprocessor. (2025)."},{"key":"e_1_3_2_1_20_1","unstructured":"2025. NVIDIA Triton Inference Server. https:\/\/developer.nvidia.com\/triton-inference-server. (2025)."},{"key":"e_1_3_2_1_21_1","unstructured":"2025. OFED Performance Tests. https:\/\/github.com\/linux-rdma\/perftest. (2025)."},{"key":"e_1_3_2_1_22_1","unstructured":"2025. Torch Custom C++ and CUDA Extensions. https:\/\/pytorch.org\/tutorials\/advanced\/cpp_extension.html. (2025)."},{"key":"e_1_3_2_1_23_1","unstructured":"Amey Agrawal Nitin Kedia Ashish Panwar Jayashree Mohan Nipun Kwatra Bhargav Gulavani Alexey Tumanov and Ramachandran Ramjee. 2024. Taming Throughput-Latency Tradeoff in LLM Inference with Sarathi-Serve. In USENIX OSDI."},{"key":"e_1_3_2_1_24_1","volume-title":"Gqa: Training generalized multi-query transformer models from multi-head checkpoints. arXiv preprint arXiv:2305.13245","author":"Ainslie Joshua","year":"2023","unstructured":"Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebr\u00f3n, and Sumit Sanghai. 2023. Gqa: Training generalized multi-query transformer models from multi-head checkpoints. arXiv preprint arXiv:2305.13245 (2023)."},{"key":"e_1_3_2_1_25_1","unstructured":"Anthropic. 2025. Introducing the next generation of Claude. https:\/\/www.anthropic.com\/news\/claude-3-family. (2025)."},{"key":"e_1_3_2_1_26_1","volume-title":"IEEE International Conference on Cluster Computing.","author":"Beckman Pete","year":"2006","unstructured":"Pete Beckman, Kamil Iskra, Kazutomo Yoshii, and Susan Coghlan. 2006. The influence of operating systems on the performance of collective operations at extreme scale. In IEEE International Conference on Cluster Computing."},{"key":"e_1_3_2_1_27_1","unstructured":"Zixian Cai Zhengyang Liu Saeed Maleki Madanlal Musuvathi Todd Mytkowicz Jacob Nelson and Olli Saarikivi. 2021. Synthesizing optimal collective algorithms. In ACM PPoPP."},{"key":"e_1_3_2_1_28_1","volume-title":"MoE-Lightning: High-Throughput MoE Inference on Memory-constrained GPUs. arXiv preprint arXiv:2411.11217","author":"Cao Shiyi","year":"2024","unstructured":"Shiyi Cao, Shu Liu, Tyler Griggs, Peter Schafhalter, Xiaoxuan Liu, Ying Sheng, Joseph E Gonzalez, Matei Zaharia, and Ion Stoica. 2024. MoE-Lightning: High-Throughput MoE Inference on Memory-constrained GPUs. arXiv preprint arXiv:2411.11217 (2024)."},{"key":"e_1_3_2_1_29_1","volume-title":"Flux: Fast software-based communication overlap on gpus through kernel fusion. arXiv preprint arXiv:2406.06858","author":"Chang Li-Wen","year":"2024","unstructured":"Li-Wen Chang, Wenlei Bao, Qi Hou, Chengquan Jiang, Ningxin Zheng, Yinmin Zhong, Xuanrun Zhang, Zuquan Song, Chengji Yao, Ziheng Jiang, Haibin Lin, Xin Jin, and Xin Liu. 2024. Flux: Fast software-based communication overlap on gpus through kernel fusion. arXiv preprint arXiv:2406.06858 (2024)."},{"key":"e_1_3_2_1_30_1","volume-title":"Centauri: Enabling Efficient Scheduling for Communication-Computation Overlap in Large Model Training via Communication Partitioning. In ACM ASPLOS.","author":"Chen Chang","year":"2024","unstructured":"Chang Chen, Xiuhong Li, Qianchao Zhu, Jiangfei Duan, Peng Sun, Xingcheng Zhang, and Chao Yang. 2024. Centauri: Enabling Efficient Scheduling for Communication-Computation Overlap in Large Model Training via Communication Partitioning. In ACM ASPLOS."},{"key":"e_1_3_2_1_31_1","volume-title":"Efficient and Economic Large Language Model Inference with Attention Offloading. arXiv preprint arXiv:2405.01814","author":"Chen Shaoyuan","year":"2024","unstructured":"Shaoyuan Chen, Yutong Lin, Mingxing Zhang, and Yongwei Wu. 2024. Efficient and Economic Large Language Model Inference with Attention Offloading. arXiv preprint arXiv:2405.01814 (2024)."},{"key":"e_1_3_2_1_32_1","unstructured":"Weihao Cui Zhenhua Han Lingji Ouyang Yichuan Wang Ningxin Zheng Lingxiao Ma Yuqing Yang Fan Yang Jilong Xue Lili Qiu Lidong Zhou Quan Chen Haisheng Tan and Minyi Guo. 2023. Optimizing dynamic neural networks with brainstorm. In USENIX OSDI."},{"key":"e_1_3_2_1_33_1","volume-title":"Flashattention: Fast and memory-efficient exact attention with io-awareness. Advances in Neural Information Processing Systems","author":"Dao Tri","year":"2022","unstructured":"Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher R\u00e9. 2022. Flashattention: Fast and memory-efficient exact attention with io-awareness. Advances in Neural Information Processing Systems (2022)."},{"key":"e_1_3_2_1_34_1","volume-title":"A Strong, Economical, and Efficient Mixture-of-Experts Language Model. arXiv preprint arXiv:2405.04434","author":"Aixin Liu AI","year":"2024","unstructured":"DeepSeek-AI, Aixin Liu, Bei Feng, Bin Wang, Bingxuan Wang, Bo Liu, Chenggang Zhao, Chengqi Dengr, Chong Ruan, Damai Dai, Daya Guo, Dejian Yang, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Hanwei Xu, Hao Yang, Haowei Zhang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Li, Hui Qu, J. L. Cai, Jian Liang, Jianzhong Guo, Jiaqi Ni, Jiashi Li, Jin Chen, Jingyang Yuan, Junjie Qiu, Junxiao Song, Kai Dong, Kaige Gao, Kang Guan, Lean Wang, Lecong Zhang, Lei Xu, Leyi Xia, Liang Zhao, Liyue Zhang, Meng Li, Miaojun Wang, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Mingming Li, Ning Tian, Panpan Huang, Peiyi Wang, Peng Zhang, Qihao Zhu, Qinyu Chen, Qiushi Du, R. J. Chen, R. L. Jin, Ruiqi Ge, Ruizhe Pan, Runxin Xu, Ruyi Chen, S. S. Li, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shaoqing Wu, Shengfeng Ye, Shirong Ma, Shiyu Wang, Shuang Zhou, Shuiping Yu, Shunfeng Zhou, Size Zheng, T. Wang, Tian Pei, Tian Yuan, Tianyu Sun, W. L. Xiao, Wangding Zeng, Wei An, Wen Liu, Wenfeng Liang, Wenjun Gao, Wentao Zhang, X. Q. Li, Xiangyue Jin, Xianzu Wang, Xiao Bi, Xiaodong Liu, Xiaohan Wang, Xiaojin Shen, Xiaokang Chen, Xiaosha Chen, Xiaotao Nie, Xiaowen Sun, Xiaoxiang Wang, Xin Liu, Xin Xie, Xingkai Yu, Xinnan Song, Xinyi Zhou, Xinyu Yang, Xuan Lu, Xuecheng Su, Y. Wu, Y. K. Li, Y. X. Wei, Y. X. Zhu, Yanhong Xu, Yanping Huang, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Li, Yaohui Wang, Yi Zheng, Yichao Zhang, Yiliang Xiong, Yilong Zhao, Ying He, Ying Tang, Yishi Piao, Yixin Dong, Yixuan Tan, Yiyuan Liu, Yongji Wang, Yongqiang Guo, Yuchen Zhu, Yuduan Wang, Yuheng Zou, Yukun Zha, Yunxian Ma, Yuting Yan, Yuxiang You, Yuxuan Liu, Z. Z. Ren, Zehui Ren, Zhangli Sha, Zhe Fu, Zhen Huang, Zhen Zhang, Zhenda Xie, Zhewen Hao, Zhihong Shao, Zhiniu Wen, Zhipeng Xu, Zhongyu Zhang, Zhuoshu Li, Zihan Wang, Zihui Gu, Zilin Li, and Ziwei Xie. 2024. DeepSeek-V2: A Strong, Economical, and Efficient Mixture-of-Experts Language Model. arXiv preprint arXiv:2405.04434 (2024)."},{"key":"e_1_3_2_1_35_1","volume-title":"Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437","author":"Aixin Liu AI","year":"2024","unstructured":"DeepSeek-AI, Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Daya Guo, Dejian Yang, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Haowei Zhang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Li, Hui Qu, J. L. Cai, Jian Liang, Jianzhong Guo, Jiaqi Ni, Jiashi Li, Jiawei Wang, Jin Chen, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, Junxiao Song, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Lei Xu, Leyi Xia, Liang Zhao, Litong Wang, Liyue Zhang, Meng Li, Miaojun Wang, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Mingming Li, Ning Tian, Panpan Huang, Peiyi Wang, Peng Zhang, Qiancheng Wang, Qihao Zhu, Qinyu Chen, Qiushi Du, R. J. Chen, R. L. Jin, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, Runxin Xu, Ruoyu Zhang, Ruyi Chen, S. S. Li, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shaoqing Wu, Shengfeng Ye, Shengfeng Ye, Shirong Ma, Shiyu Wang, Shuang Zhou, Shuiping Yu, Shunfeng Zhou, Shuting Pan, T. Wang, Tao Yun, Tian Pei, Tianyu Sun, W. L. Xiao, Wangding Zeng, Wanjia Zhao, Wei An, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, X. Q. Li, Xiangyue Jin, Xianzu Wang, Xiao Bi, Xiaodong Liu, Xiaohan Wang, Xiaojin Shen, Xiaokang Chen, Xiaokang Zhang, Xiaosha Chen, Xiaotao Nie, Xiaowen Sun, Xiaoxiang Wang, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xingkai Yu, Xinnan Song, Xinxia Shan, Xinyi Zhou, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, Y. K. Li, Y. Q. Wang, Y. X. Wei, Y. X. Zhu, Yang Zhang, Yanhong Xu, Yanhong Xu, Yanping Huang, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Li, Yaohui Wang, Yi Yu, Yi Zheng, Yichao Zhang, Yifan Shi, Yiliang Xiong, Ying He, Ying Tang, Yishi Piao, Yisong Wang, Yixuan Tan, Yiyang Ma, Yiyuan Liu, Yongqiang Guo, Yu Wu, Yuan Ou, Yuchen Zhu, Yuduan Wang, Yue Gong, Yuheng Zou, Yujia He, Yukun Zha, Yunfan Xiong, Yunxian Ma, Yuting Yan, Yuxiang Luo, Yuxiang You, Yuxuan Liu, Yuyang Zhou, Z. F. Wu, Z. Z. Ren, Zehui Ren, Zhangli Sha, Zhe Fu, Zhean Xu, Zhen Huang, Zhen Zhang, Zhenda Xie, Zhengyan Zhang, Zhewen Hao, Zhibin Gou, Zhicheng Ma, Zhigang Yan, Zhihong Shao, Zhipeng Xu, Zhiyu Wu, Zhongyu Zhang, Zhuoshu Li, Zihui Gu, Zijia Zhu, Zijun Liu, Zilin Li, Ziwei Xie, Ziyang Song, Ziyi Gao, and Zizheng Pan. 2024. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437 (2024)."},{"key":"e_1_3_2_1_36_1","volume-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research","author":"Fedus William","year":"2022","unstructured":"William Fedus, Barret Zoph, and Noam Shazeer. 2022. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. Journal of Machine Learning Research (2022)."},{"key":"e_1_3_2_1_37_1","unstructured":"Yao Fu Leyang Xue Yeqi Huang Andrei-Octavian Brabete Dmitrii Ustiugov Yuvraj Patel and Luo Mai. 2024. ServerlessLLM: Low-Latency Serverless Inference for Large Language Models. In USENIX OSDI."},{"key":"e_1_3_2_1_38_1","unstructured":"Aaron Grattafiori Abhimanyu Dubey Abhinav Jauhri et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783 (2024)."},{"key":"e_1_3_2_1_39_1","volume-title":"USENIX Conference on File and Storage Technologies.","author":"Gunawi Haryadi S.","year":"2018","unstructured":"Haryadi S. Gunawi, Riza O. Suminto, Russell Sears, Casey Golliher, Swaminathan Sundararaman, Xing Lin, Tim Emami, Weiguang Sheng, Nematollah Bidokhti, Caitie McCaffrey, Gary Grider, Parks M. Fields, Kevin Harms, Robert B. Ross, Andree Jacobson, Robert Ricci, Kirk Webb, Peter Alvaro, H. Birali Runesha, Mingzhe Hao, and Huaicheng Li. 2018. Fail-Slow at Scale: Evidence of Hardware Performance Faults in Large Production Systems. In USENIX Conference on File and Storage Technologies."},{"key":"e_1_3_2_1_40_1","volume-title":"Mira: A program-behavior-guided far memory system. In ACM SOSP.","author":"Guo Zhiyuan","year":"2023","unstructured":"Zhiyuan Guo, Zijian He, and Yiying Zhang. 2023. Mira: A program-behavior-guided far memory system. In ACM SOSP."},{"key":"e_1_3_2_1_41_1","volume-title":"ACM\/SPEC International Conference on Performance Engineering.","author":"Hao Yueming","unstructured":"Yueming Hao, Nikhil Jain, Rob F. Van der Wijngaart, Nirmal R. Saxena, Yuanbo Fan, and Xu Liu. 2023. Drgpu: A top-down profiler for gpu applications. In ACM\/SPEC International Conference on Performance Engineering."},{"key":"e_1_3_2_1_42_1","volume-title":"FastDecode: High-Throughput GPU-Efficient LLM Serving using Heterogeneous Pipelines. arXiv preprint arXiv:2403.11421","author":"He Jiaao","year":"2024","unstructured":"Jiaao He and Jidong Zhai. 2024. FastDecode: High-Throughput GPU-Efficient LLM Serving using Heterogeneous Pipelines. arXiv preprint arXiv:2403.11421 (2024)."},{"key":"e_1_3_2_1_43_1","volume-title":"The International Conference for High Performance Computing, Networking, Storage, and Analysis.","author":"Hoefler Torsten","year":"2010","unstructured":"Torsten Hoefler, Timo Schneider, and Andrew Lumsdaine. 2010. Characterizing the influence of system noise on large-scale applications by simulation. In The International Conference for High Performance Computing, Networking, Storage, and Analysis."},{"key":"e_1_3_2_1_44_1","volume-title":"Inference without interference: Disaggregate llm inference for mixed downstream workloads. arXiv preprint arXiv:2401.11181","author":"Hu Cunchen","year":"2024","unstructured":"Cunchen Hu, Heyang Huang, Liangliang Xu, Xusheng Chen, Jiang Xu, Shuang Chen, Hao Feng, Chenxi Wang, Sa Wang, Yungang Bao, Ninghui Sun, and Yizhou Shan. 2024. Inference without interference: Disaggregate llm inference for mixed downstream workloads. arXiv preprint arXiv:2401.11181 (2024)."},{"key":"e_1_3_2_1_45_1","volume-title":"Dehao Chen, HyoukJoong Lee, Jiquan Ngiam, Quoc V. Le, Yonghui Wu, and Zhifeng Chen.","author":"Huang Yanping","year":"2019","unstructured":"Yanping Huang, Youlong Cheng, Ankur Bapna, Orhan Firat, Mia Xu Chen, Dehao Chen, HyoukJoong Lee, Jiquan Ngiam, Quoc V. Le, Yonghui Wu, and Zhifeng Chen. 2019. Gpipe: Efficient training of giant neural networks using pipeline parallelism. Neural Information Processing Systems (2019)."},{"key":"e_1_3_2_1_46_1","volume-title":"Conference on Machine Learning and Systems","author":"Hwang Changho","year":"2023","unstructured":"Changho Hwang, Wei Cui, Yifan Xiong, Ziyue Yang, Ze Liu, Han Hu, Zilong Wang, Rafael Salas, Jithin Jose, Prabhat Ram, Joe Chau, Peng Cheng, Fan Yang, Mao Yang, and Yongqiang Xiong. 2023. Tutel: Adaptive mixture-of-experts at scale. Conference on Machine Learning and Systems (2023)."},{"key":"e_1_3_2_1_47_1","volume-title":"Pre-gated moe: An algorithm-system co-design for fast and scalable mixture-of-expert inference","author":"Hwang Ranggi","unstructured":"Ranggi Hwang, Jianyu Wei, Shijie Cao, Changho Hwang, Xiaohu Tang, Ting Cao, and Mao Yang. 2024. Pre-gated moe: An algorithm-system co-design for fast and scalable mixture-of-expert inference. In ACM\/IEEE ISCA."},{"key":"e_1_3_2_1_48_1","volume-title":"Saeed Maleki, Youshan Miao, Madanlal Musuvathi, Todd Mytkowicz, and Olli Saarikivi.","author":"Jangda Abhinav","year":"2022","unstructured":"Abhinav Jangda, Jun Huang, Guodong Liu, Amir Hossein Nodehi Sabet, Saeed Maleki, Youshan Miao, Madanlal Musuvathi, Todd Mytkowicz, and Olli Saarikivi. 2022. Breaking the computation and communication abstraction barrier in distributed machine learning workloads. In ACM ASPLOS."},{"key":"e_1_3_2_1_49_1","unstructured":"Ziheng Jiang Haibin Lin Yinmin Zhong Qi Huang Yangrui Chen Zhi Zhang Yanghua Peng Xiang Li Cong Xie Shibiao Nong Yulu Jia Sun He Hongmin Chen Zhihao Bai Qi Hou Shipeng Yan Ding Zhou Yiyao Sheng Zhuo Jiang Haohan Xu Haoran Wei Zhang Zhang Pengfei Nie Leqi Zou Sida Zhao Liang Xiang Zherui Liu Zhe Li Xiaoying Jia Jianxi Ye Xin Jin and Xin Liu. 2024. MegaScale: Scaling large language model training to more than 10 000 GPUs. In USENIX NSDI."},{"key":"e_1_3_2_1_50_1","volume-title":"Scaling laws for neural language models. arXiv preprint arXiv:2001.08361","author":"Kaplan Jared","year":"2020","unstructured":"Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. 2020. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361 (2020)."},{"key":"e_1_3_2_1_51_1","volume-title":"Joseph Gonzalez, Hao Zhang, and Ion Stoica.","author":"Kwon Woosuk","year":"2023","unstructured":"Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph Gonzalez, Hao Zhang, and Ion Stoica. 2023. Efficient memory management for large language model serving with pagedattention. In ACM SOSP."},{"key":"e_1_3_2_1_52_1","volume-title":"Gshard: Scaling giant models with conditional computation and automatic sharding. arXiv preprint arXiv:2006.16668","author":"Lepikhin Dmitry","year":"2020","unstructured":"Dmitry Lepikhin, HyoukJoong Lee, Yuanzhong Xu, Dehao Chen, Orhan Firat, Yanping Huang, Maxim Krikun, Noam Shazeer, and Zhifeng Chen. 2020. Gshard: Scaling giant models with conditional computation and automatic sharding. arXiv preprint arXiv:2006.16668 (2020)."},{"key":"e_1_3_2_1_53_1","unstructured":"Jiamin Li Yimin Jiang Yibo Zhu Cong Wang and Hong Xu. 2023. Accelerating distributed MoE training and inference with lina. In USENIX ATC."},{"key":"e_1_3_2_1_54_1","volume-title":"Infinite-llm: Efficient llm service for long context with distattention and distributed kvcache. arXiv preprint arXiv:2401.02669","author":"Lin Bin","year":"2024","unstructured":"Bin Lin, Chen Zhang, Tao Peng, Hanyu Zhao, Wencong Xiao, Minmin Sun, Anmin Liu, Zhipeng Zhang, Lanbo Li, Xiafei Qiu, Shen Li, Zhigang Ji, Tao Xie, Yong Li, and Wei Lin. 2024. Infinite-llm: Efficient llm service for long context with distattention and distributed kvcache. arXiv preprint arXiv:2401.02669 (2024)."},{"key":"e_1_3_2_1_55_1","volume-title":"Jessie Hui Wang, and Yimin Jiang","author":"Liu Juncai","year":"2023","unstructured":"Juncai Liu, Jessie Hui Wang, and Yimin Jiang. 2023. Janus: A unified distributed training framework for sparse mixture-of-experts models. In ACM SIGCOMM."},{"key":"e_1_3_2_1_56_1","volume-title":"Liangyu Zhao, Vincent Liu, Miguel Castro, Srikanth Kandula, and Luke Marshall.","author":"Liu Xuting","year":"2024","unstructured":"Xuting Liu, Behnaz Arzani, Siva Kesava Reddy Kakarla, Liangyu Zhao, Vincent Liu, Miguel Castro, Srikanth Kandula, and Luke Marshall. 2024. Rethinking machine learning collective communication as a multi-commodity flow problem. In ACM SIGCOMM."},{"key":"e_1_3_2_1_57_1","unstructured":"NVIDIA. 2024. NVIDIA TensorRT-LLM. https:\/\/docs.nvidia.com\/tensorrt-llm\/index.html. (2024)."},{"key":"e_1_3_2_1_58_1","unstructured":"OpenAI. 2025. Introducing ChatGPT search. https:\/\/openai.com\/index\/introducing-chatgpt-search. (2025)."},{"key":"e_1_3_2_1_59_1","unstructured":"OpenAI Josh Achiam Steven Adler et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023)."},{"key":"e_1_3_2_1_60_1","volume-title":"Splitwise: Efficient generative llm inference using phase splitting","author":"Patel Pratyush","year":"2024","unstructured":"Pratyush Patel, Esha Choukse, Chaojie Zhang, Aashaka Shah, \u00cd\u00f1igo Goiri, Saeed Maleki, and Ricardo Bianchini. 2024. Splitwise: Efficient generative llm inference using phase splitting. In ACM\/IEEE ISCA."},{"key":"e_1_3_2_1_61_1","volume-title":"Mooncake: Kimi's KVCache-centric Architecture for LLM Serving. arXiv preprint arXiv:2407.00079","author":"Qin Ruoyu","year":"2024","unstructured":"Ruoyu Qin, Zheming Li, Weiran He, Mingxing Zhang, Yongwei Wu, Weimin Zheng, and Xinran Xu. 2024. Mooncake: Kimi's KVCache-centric Architecture for LLM Serving. arXiv preprint arXiv:2407.00079 (2024)."},{"key":"e_1_3_2_1_62_1","volume-title":"Ammar Ahmad Awan, Jeff Rasley, and Yuxiong He.","author":"Rajbhandari Samyam","year":"2022","unstructured":"Samyam Rajbhandari, Conglong Li, Zhewei Yao, Minjia Zhang, Reza Yazdani Aminabadi, Ammar Ahmad Awan, Jeff Rasley, and Yuxiong He. 2022. DeepSpeed-MoE: Advancing Mixture-of-Experts Inference and Training to Power Next-Generation AI Scale. arXiv preprint arXiv:2201.05596 (2022)."},{"key":"e_1_3_2_1_63_1","volume-title":"TACCL: Guiding Collective Algorithm Synthesis using Communication Sketches. In USENIX NSDI.","author":"Shah Aashaka","year":"2023","unstructured":"Aashaka Shah, Vijay Chidambaram, Meghan Cowan, Saeed Maleki, Madan Musuvathi, Todd Mytkowicz, Jacob Nelson, Olli Saarikivi, and Rachee Singh. 2023. TACCL: Guiding Collective Algorithm Synthesis using Communication Sketches. In USENIX NSDI."},{"key":"e_1_3_2_1_64_1","unstructured":"Yizhou Shan Yutong Huang Yilun Chen and Yiying Zhang. 2018. LegoOS: A disseminated distributed OS for hardware resource disaggregation. In USENIX OSDI."},{"key":"e_1_3_2_1_65_1","volume-title":"Megatron-lm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053","author":"Shoeybi Mohammad","year":"2019","unstructured":"Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. 2019. Megatron-lm: Training multi-billion parameter language models using model parallelism. arXiv preprint arXiv:1909.08053 (2019)."},{"key":"e_1_3_2_1_66_1","volume-title":"Accelerator-Rich Systems. In The International Conference for High Performance Computing, Networking, Storage, and Analysis.","author":"Sinha Prasoon","year":"2022","unstructured":"Prasoon Sinha, Akhil Guliani, Rutwik Jain, Brandon Tran, Matthew D. Sinclair, and Shivaram Venkataraman. 2022. Not All GPUs Are Created Equal: Characterizing Variability in Large-Scale, Accelerator-Rich Systems. In The International Conference for High Performance Computing, Networking, Storage, and Analysis."},{"key":"e_1_3_2_1_67_1","volume-title":"Fault-tolerant Generative LLM Serving. In International Conference on Machine Learning (ICML).","author":"Strati Foteini","year":"2024","unstructured":"Foteini Strati, Sara McAllister, Amar Phanishayee, Jakub Tarnawski, and Ana Klimovic. 2024. D\u00e9j\u00e0Vu: KV-cache Streaming for Fast, Fault-tolerant Generative LLM Serving. In International Conference on Machine Learning (ICML)."},{"key":"e_1_3_2_1_68_1","volume-title":"Llumnix: Dynamic Scheduling for Large Language Model Serving. In USENIX OSDI.","author":"Sun Biao","year":"2024","unstructured":"Biao Sun, Ziming Huang, Hanyu Zhao, Wencong Xiao, Xinyi Zhang, Yong Li, and Wei Lin. 2024. Llumnix: Dynamic Scheduling for Large Language Model Serving. In USENIX OSDI."},{"key":"e_1_3_2_1_69_1","unstructured":"Xiongchao Tang Jidong Zhai Xuehai Qian Bingsheng He Wei Xue and Wenguang Chen. 2018. Vsensor: leveraging fixed-workload snippets of programs for performance variance detection. In ACM PPoPP."},{"key":"e_1_3_2_1_70_1","unstructured":"Mistral AI team. 2024. Mixtral 8x22B. https:\/\/mistral.ai\/news\/mixtral-8x22b. (2024)."},{"key":"e_1_3_2_1_71_1","unstructured":"Mosaic Research Team. 2024. Introducing DBRX: A New State-of-the-Art Open LLM. https:\/\/www.databricks.com\/blog\/introducing-dbrx-new-state-art-open-llm. (2024)."},{"key":"e_1_3_2_1_72_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume Lample. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_1_73_1","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale Dan Bikel Lukas Blecher Cristian Canton Ferrer Moya Chen Guillem Cucurull David Esiobu Jude Fernandes Jeremy Fu Wenyin Fu Brian Fuller Cynthia Gao Vedanuj Goswami Naman Goyal Anthony Hartshorn Saghar Hosseini Rui Hou Hakan Inan Marcin Kardas Viktor Kerkez Madian Khabsa Isabel Kloumann Artem Korenev Punit Singh Koura Marie-Anne Lachaux Thibaut Lavril Jenya Lee Diana Liskovich Yinghai Lu Yuning Mao Xavier Martinet Todor Mihaylov Pushkar Mishra Igor Molybog Yixin Nie Andrew Poulton Jeremy Reizenstein Rashi Rungta Kalyan Saladi Alan Schelten Ruan Silva Eric Michael Smith Ranjan Subramanian Xiaoqing Ellen Tan Binh Tang Ross Taylor Adina Williams Jian Xiang Kuan Puxin Xu Zheng Yan Iliyan Zarov Yuchen Zhang Angela Fan Melanie Kambadur Sharan Narang Aurelien Rodriguez Robert Stojnic Sergey Edunov and Thomas Scialom. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)."},{"key":"e_1_3_2_1_74_1","doi-asserted-by":"publisher","DOI":"10.1145\/1498765.1498785"},{"key":"e_1_3_2_1_75_1","unstructured":"Bingyang Wu Shengyu Liu Yinmin Zhong Peng Sun Xuanzhe Liu and Xin Jin. 2024. LoongServe: Efficiently Serving Long-Context Large Language Models with Elastic Sequence Parallelism. In ACM SOSP."},{"key":"e_1_3_2_1_76_1","unstructured":"Bingyang Wu Ruidong Zhu Zili Zhang Peng Sun Xuanzhe Liu and Xin Jin. 2024. dLoRA: Dynamically Orchestrating Requests and Adapters for LoRA LLM Serving. In USENIX OSDI."},{"key":"e_1_3_2_1_77_1","volume-title":"Gspmd: general and scalable parallelization for ml computation graphs. arXiv preprint arXiv:2105.04663","author":"Xu Yuanzhong","year":"2021","unstructured":"Yuanzhong Xu, HyoukJoong Lee, Dehao Chen, Blake Hechtman, Yanping Huang, Rahul Joshi, Maxim Krikun, Dmitry Lepikhin, Andy Ly, Marcello Maggioni, Ruoming Pang, Noam Shazeer, Shibo Wang, Tao Wang, Yonghui Wu, and Zhifeng Chen. 2021. Gspmd: general and scalable parallelization for ml computation graphs. arXiv preprint arXiv:2105.04663 (2021)."},{"key":"e_1_3_2_1_78_1","volume-title":"GVARP: Detecting Performance Variance on Large-Scale Heterogeneous Systems. In The International Conference for High Performance Computing, Networking, Storage, and Analysis.","author":"You Xin","year":"2024","unstructured":"Xin You, Zhibo Xuan, Hailong Yang, Zhongzhi Luan, Yi Liu, and Depei Qian. 2024. GVARP: Detecting Performance Variance on Large-Scale Heterogeneous Systems. In The International Conference for High Performance Computing, Networking, Storage, and Analysis."},{"key":"e_1_3_2_1_79_1","volume-title":"Geon-Woo Kim, Soojeong Kim, and Byung-Gon Chun.","author":"Yu Gyeong-In","year":"2022","unstructured":"Gyeong-In Yu, Joo Seong Jeong, Geon-Woo Kim, Soojeong Kim, and Byung-Gon Chun. 2022. Orca: A distributed serving system for Transformer-Based generative models. In USENIX OSDI."},{"key":"e_1_3_2_1_80_1","unstructured":"Chenggang Zhao Chengqi Deng Chong Ruan Damai Dai Huazuo Gao Jiashi Li Liyue Zhang Panpan Huang Shangyan Zhou Shirong Ma Wenfeng Liang Ying He Yuqing Wang Yuxuan Liu and Y. X. Wei. 2025. Insights into DeepSeek-V3: Scaling Challenges and Reflections on Hardware for AI Architectures. arXiv preprint arXiv:2505.09343 (2025)."},{"key":"e_1_3_2_1_81_1","volume-title":"Shuaiwen Leon Song, and Wenguang Chen","author":"Zheng Liyan","year":"2022","unstructured":"Liyan Zheng, Jidong Zhai, Xiongchao Tang, Haojie Wang, Teng Yu, Yuyang Jin, Shuaiwen Leon Song, and Wenguang Chen. 2022. Vapro: performance variance detection and diagnosis for production-run parallel applications. In ACM PPoPP."},{"key":"e_1_3_2_1_82_1","unstructured":"Yinmin Zhong Shengyu Liu Junda Chen Jianbo Hu Yibo Zhu Xuanzhe Liu Xin Jin and Hao Zhang. 2024. DistServe: Disaggregating Prefill and Decoding for Goodput-optimized Large Language Model Serving. In USENIX OSDI."},{"key":"e_1_3_2_1_83_1","volume-title":"IEEE\/ACM International Symposium on Code Generation and Optimization.","author":"Zhou Keren","year":"2021","unstructured":"Keren Zhou, Xiaozhu Meng, Ryuichi Sai, and John Mellor-Crummey. 2021. Gpa: A gpu performance advisor based on instruction sampling. In IEEE\/ACM International Symposium on Code Generation and Optimization."}],"event":{"name":"SIGCOMM '25: ACM SIGCOMM 2025 Conference","location":"S\u00e3o Francisco Convent Coimbra Portugal","acronym":"SIGCOMM '25","sponsor":["SIGCOMM ACM Special Interest Group on Data Communication"]},"container-title":["Proceedings of the ACM SIGCOMM 2025 Conference"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3718958.3750506","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,27]],"date-time":"2025-08-27T16:57:01Z","timestamp":1756313821000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3718958.3750506"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,27]]},"references-count":83,"alternative-id":["10.1145\/3718958.3750506","10.1145\/3718958"],"URL":"https:\/\/doi.org\/10.1145\/3718958.3750506","relation":{},"subject":[],"published":{"date-parts":[[2025,8,27]]},"assertion":[{"value":"2025-08-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}