{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,25]],"date-time":"2026-04-25T08:45:24Z","timestamp":1777106724221,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":56,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,9,8]]},"DOI":"10.1145\/3718958.3754352","type":"proceedings-article","created":{"date-parts":[[2025,8,27]],"date-time":"2025-08-27T16:54:11Z","timestamp":1756313651000},"page":"963-978","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":4,"title":["ByteScale: Communication-Efficient Scaling of LLM Training with a 2048K Context Length on 16384 GPUs"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-3367-7486","authenticated-orcid":false,"given":"Hao","family":"Ge","sequence":"first","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3664-6615","authenticated-orcid":false,"given":"Junda","family":"Feng","sequence":"additional","affiliation":[{"name":"ByteDance Inc., Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-2143-7494","authenticated-orcid":false,"given":"Qi","family":"Huang","sequence":"additional","affiliation":[{"name":"ByteDance Inc., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1658-0380","authenticated-orcid":false,"given":"Fangcheng","family":"Fu","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6766-757X","authenticated-orcid":false,"given":"Xiaonan","family":"Nie","sequence":"additional","affiliation":[{"name":"ByteDance Inc., San Jose, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7423-1675","authenticated-orcid":false,"given":"Lei","family":"Zuo","sequence":"additional","affiliation":[{"name":"ByteDance Inc., Seattle, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4879-5335","authenticated-orcid":false,"given":"Haibin","family":"Lin","sequence":"additional","affiliation":[{"name":"ByteDance Inc., Seattle, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1681-4677","authenticated-orcid":false,"given":"Bin","family":"Cui","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-8346-3323","authenticated-orcid":false,"given":"Xin","family":"Liu","sequence":"additional","affiliation":[{"name":"ByteDance Inc., Seattle, USA"}]}],"member":"320","published-online":{"date-parts":[[2025,8,27]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"TensorFlow: A System for Large-Scale Machine Learning. In 12th USENIX Symposium on Operating Systems Design and Implementation (OSDI","author":"Abadi Mart\u00edn","year":"2016","unstructured":"Mart\u00edn Abadi, Paul Barham, Jianmin Chen, Zhifeng Chen, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Geoffrey Irving, Michael Isard, Manjunath Kudlur, Josh Levenberg, Rajat Monga, Sherry Moore, Derek Gordon Murray, Benoit Steiner, Paul A. Tucker, Vijay Vasudevan, Pete Warden, Martin Wicke, Yuan Yu, and Xiaoqiang Zheng. 2016. TensorFlow: A System for Large-Scale Machine Learning. In 12th USENIX Symposium on Operating Systems Design and Implementation (OSDI 2016). 265\u2013283."},{"key":"e_1_3_2_1_2_1","volume-title":"LLM-Deliberation: Evaluating LLMs with Interactive Multi-Agent Negotiation Games. CoRR","author":"Abdelnabi Sahar","year":"2023","unstructured":"Sahar Abdelnabi, Amr Gomaa, Sarath Sivaprasad, Lea Sch\u00f6nherr, and Mario Fritz. 2023. LLM-Deliberation: Evaluating LLMs with Interactive Multi-Agent Negotiation Games. CoRR (2023)."},{"key":"e_1_3_2_1_3_1","volume-title":"Mistral: Tokenization. https:\/\/docs.mistral.ai\/guides\/tokenization\/.","author":"Mistral","year":"2024","unstructured":"Mistral AI. 2024. Mistral: Tokenization. https:\/\/docs.mistral.ai\/guides\/tokenization\/."},{"key":"e_1_3_2_1_4_1","unstructured":"Anthropic. 2024. Introducing the next generation of Claude. https:\/\/www.anthropic.com\/news\/claude-3-family."},{"key":"e_1_3_2_1_5_1","volume-title":"Longformer: The Long-Document Transformer. CoRR abs\/2004.05150","author":"Beltagy Iz","year":"2020","unstructured":"Iz Beltagy, Matthew E. Peters, and Arman Cohan. 2020. Longformer: The Long-Document Transformer. CoRR abs\/2004.05150 (2020)."},{"key":"e_1_3_2_1_6_1","volume-title":"Striped Attention: Faster Ring Attention for Causal Transformers. CoRR abs\/2311.09431","author":"Brandon William","year":"2023","unstructured":"William Brandon, Aniruddha Nrusimha, Kevin Qian, Zachary Ankner, Tian Jin, Zhiye Song, and Jonathan Ragan-Kelley. 2023. Striped Attention: Faster Ring Attention for Causal Transformers. CoRR abs\/2311.09431 (2023)."},{"key":"e_1_3_2_1_7_1","volume-title":"Annual Conference on Neural Information Processing Systems 2020 (NeurIPS","author":"Brown Tom B.","year":"2020","unstructured":"Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language Models are Few-Shot Learners. In Annual Conference on Neural Information Processing Systems 2020 (NeurIPS 2020)."},{"key":"e_1_3_2_1_8_1","unstructured":"Krzysztof Choromanski Valerii Likhosherstov David Dohan Xingyou Song Andreea Gane Tamas Sarlos Peter Hawkins Jared Davis Afroz Mohiuddin Lukasz Kaiser et al. 2020. Rethinking attention with performers. arXiv preprint arXiv:2009.14794 (2020)."},{"key":"e_1_3_2_1_9_1","article-title":"PaLM: Scaling Language Modeling with Pathways","volume":"24","author":"Chowdhery Aakanksha","year":"2023","unstructured":"Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, Parker Schuh, Kensen Shi, Sasha Tsvyashchenko, Joshua Maynez, Abhishek Rao, Parker Barnes, Yi Tay, Noam Shazeer, Vinodkumar Prabhakaran, Emily Reif, Nan Du, Ben Hutchinson, Reiner Pope, James Bradbury, Jacob Austin, Michael Isard, Guy Gur-Ari, Pengcheng Yin, Toju Duke, Anselm Levskaya, Sanjay Ghemawat, Sunipa Dev, Henryk Michalewski, Xavier Garcia, Vedant Misra, Kevin Robinson, Liam Fedus, Denny Zhou, Daphne Ippolito, David Luan, Hyeontaek Lim, Barret Zoph, Alexander Spiridonov, Ryan Sepassi, David Dohan, Shivani Agrawal, Mark Omernick, Andrew M. Dai, Thanumalayan Sankaranarayana Pillai, Marie Pellat, Aitor Lewkowycz, Erica Moreira, Rewon Child, Oleksandr Polozov, Katherine Lee, Zongwei Zhou, Xuezhi Wang, Brennan Saeta, Mark Diaz, Orhan Firat, Michele Catasta, Jason Wei, Kathy Meier-Hellstern, Douglas Eck, Jeff Dean, Slav Petrov, and Noah Fiedel. 2023. PaLM: Scaling Language Modeling with Pathways. Journal of Machine Learning Research (JMLR) 24 (2023), 240:1\u2013240:113.","journal-title":"Journal of Machine Learning Research (JMLR)"},{"key":"e_1_3_2_1_10_1","volume-title":"FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning. CoRR abs\/2307.08691","author":"Dao Tri","year":"2023","unstructured":"Tri Dao. 2023. FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning. CoRR abs\/2307.08691 (2023)."},{"key":"e_1_3_2_1_11_1","volume-title":"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. In Annual Conference on Neural Information Processing Systems 2022 (NeurIPS","author":"Dao Tri","year":"2022","unstructured":"Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher R\u00e9. 2022. FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. In Annual Conference on Neural Information Processing Systems 2022 (NeurIPS 2022)."},{"key":"e_1_3_2_1_12_1","volume-title":"Large Scale Distributed Deep Networks. In 26th Annual Conference on Neural Information Processing Systems 2012 (NeurIPS","author":"Dean Jeffrey","year":"2022","unstructured":"Jeffrey Dean, Greg Corrado, Rajat Monga, Kai Chen, Matthieu Devin, Quoc V. Le, Mark Z. Mao, Marc'Aurelio Ranzato, Andrew W. Senior, Paul A. Tucker, Ke Yang, and Andrew Y. Ng. 2012. Large Scale Distributed Deep Networks. In 26th Annual Conference on Neural Information Processing Systems 2012 (NeurIPS 2022). 1232\u20131240."},{"key":"e_1_3_2_1_13_1","volume-title":"DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. CoRR abs\/2501.12948","author":"AI.","year":"2025","unstructured":"DeepSeek-AI. 2025. DeepSeek-R1: Incentivizing Reasoning Capability in LLMs via Reinforcement Learning. CoRR abs\/2501.12948 (2025)."},{"key":"e_1_3_2_1_14_1","unstructured":"Abhimanyu Dubey Abhinav Jauhri Abhinav Pandey Abhishek Kadian et al. 2024. The Llama 3 Herd of Models. CoRR (2024)."},{"key":"e_1_3_2_1_15_1","volume-title":"How to Train Long-Context Language Models (Effectively). CoRR","author":"Gao Tianyu","year":"2024","unstructured":"Tianyu Gao, Alexander Wettig, Howard Yen, and Danqi Chen. 2024. How to Train Long-Context Language Models (Effectively). CoRR (2024)."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3694715.3695969"},{"key":"e_1_3_2_1_17_1","unstructured":"Google. 2024. Gemini 1.5 Pro 2M context window code execution capabilities and Gemma 2 are available today. https:\/\/developers.googleblog.com\/en\/new-features-for-the-gemini-api-and-google-ai-studio\/."},{"key":"e_1_3_2_1_18_1","unstructured":"Google. 2024. Introducing Gemini: our largest and most capable AI model. https:\/\/blog.google\/technology\/ai\/google-gemini-ai\/."},{"key":"e_1_3_2_1_19_1","volume-title":"Mamba: Linear-Time Sequence Modeling with Selective State Spaces. CoRR abs\/2312.00752","author":"Gu Albert","year":"2023","unstructured":"Albert Gu and Tri Dao. 2023. Mamba: Linear-Time Sequence Modeling with Selective State Spaces. CoRR abs\/2312.00752 (2023)."},{"key":"e_1_3_2_1_20_1","unstructured":"Jordan Hoffmann Sebastian Borgeaud Arthur Mensch Elena Buchatskaya Trevor Cai Eliza Rutherford Diego de Las Casas Lisa Anne Hendricks Johannes Welbl Aidan Clark Tom Hennigan Eric Noland Katie Millican George van den Driessche Bogdan Damoc Aurelia Guy Simon Osindero Karen Simonyan Erich Elsen Jack W. Rae Oriol Vinyals and Laurent Sifre. 2022. Training Compute-Optimal Large Language Models. CoRR abs\/2203.15556 (2022)."},{"key":"e_1_3_2_1_21_1","volume-title":"Annual Conference on Neural Information Processing Systems 2019 (NeurIPS","author":"Huang Yanping","year":"2019","unstructured":"Yanping Huang, Youlong Cheng, Ankur Bapna, Orhan Firat, Dehao Chen, Mia Xu Chen, HyoukJoong Lee, Jiquan Ngiam, Quoc V. Le, Yonghui Wu, and Zhifeng Chen. 2019. GPipe: Efficient Training of Giant Neural Networks using Pipeline Parallelism. In Annual Conference on Neural Information Processing Systems 2019 (NeurIPS 2019). 103\u2013112."},{"key":"e_1_3_2_1_22_1","volume-title":"Samyam Rajbhandari, and Yuxiong He.","author":"Jacobs Sam Ade","year":"2023","unstructured":"Sam Ade Jacobs, Masahiro Tanaka, Chengming Zhang, Minjia Zhang, Shuaiwen Leon Song, Samyam Rajbhandari, and Yuxiong He. 2023. DeepSpeed Ulysses: System Optimizations for Enabling Training of Extreme Long Sequence Transformer Models. CoRR abs\/2309.14509 (2023)."},{"key":"e_1_3_2_1_23_1","volume-title":"Proceedings of the 21st USENIX Symposium on Networked Systems Design and Implementation","author":"Jiang Ziheng","year":"2024","unstructured":"Ziheng Jiang, Haibin Lin, et al. 2024. MegaScale: scaling large language model training to more than 10,000 GPUs. In Proceedings of the 21st USENIX Symposium on Networked Systems Design and Implementation (Santa Clara, CA, USA) (NSDI'24). USENIX Association, USA, Article 41, 16 pages."},{"key":"e_1_3_2_1_24_1","volume-title":"A Comprehensive Survey on Process-Oriented Automatic Text Summarization with Exploration of LLM-Based Methods. CoRR abs\/2403.02901","author":"Jin Hanlei","year":"2024","unstructured":"Hanlei Jin, Yang Zhang, Dan Meng, Jun Wang, and Jinghua Tan. 2024. A Comprehensive Survey on Process-Oriented Automatic Text Summarization with Exploration of LLM-Based Methods. CoRR abs\/2403.02901 (2024)."},{"key":"e_1_3_2_1_25_1","volume-title":"Scaling Laws for Neural Language Models. CoRR abs\/2001.08361","author":"Kaplan Jared","year":"2020","unstructured":"Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B. Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. 2020. Scaling Laws for Neural Language Models. CoRR abs\/2001.08361 (2020)."},{"key":"e_1_3_2_1_26_1","volume-title":"International Conference on Machine Learning 2020 (ICML","volume":"119","author":"Katharopoulos Angelos","year":"2020","unstructured":"Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and Fran\u00e7ois Fleuret. 2020. Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention. In International Conference on Machine Learning 2020 (ICML 2020), Vol. 119. 5156\u20135165."},{"key":"e_1_3_2_1_27_1","volume-title":"Reformer: The Efficient Transformer. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=rkgNKkHtvB","author":"Kitaev Nikita","year":"2020","unstructured":"Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. 2020. Reformer: The Efficient Transformer. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=rkgNKkHtvB"},{"key":"e_1_3_2_1_28_1","volume-title":"Reducing Activation Recomputation in Large Transformer Models. CoRR abs\/2205.05198","author":"Korthikanti Vijay","year":"2022","unstructured":"Vijay Korthikanti, Jared Casper, Sangkug Lym, Lawrence McAfee, Michael Andersch, Mohammad Shoeybi, and Bryan Catanzaro. 2022. Reducing Activation Recomputation in Large Transformer Models. CoRR abs\/2205.05198 (2022)."},{"key":"e_1_3_2_1_29_1","volume-title":"Efficient sequence packing without cross-contamination: Accelerating large language models without impacting performance. CoRR abs\/2107.02027","author":"Krell Mario Michael","year":"2021","unstructured":"Mario Michael Krell, Matej Kosec, Sergio P Perez, and Andrew Fitzgibbon. 2021. Efficient sequence packing without cross-contamination: Accelerating large language models without impacting performance. CoRR abs\/2107.02027 (2021)."},{"key":"e_1_3_2_1_30_1","volume-title":"LightSeq: Sequence Level Parallelism for Distributed Training of Long Context Transformers. CoRR abs\/2310.03294","author":"Li Dacheng","year":"2023","unstructured":"Dacheng Li, Rulin Shao, Anze Xie, Eric P. Xing, Joseph E. Gonzalez, Ion Stoica, Xuezhe Ma, and Hao Zhang. 2023. LightSeq: Sequence Level Parallelism for Distributed Training of Long Context Transformers. CoRR abs\/2310.03294 (2023)."},{"key":"e_1_3_2_1_31_1","volume-title":"Hetu v2: A General and Scalable Deep Learning System with Hierarchical and Heterogeneous Single Program Multiple Data Annotations. arXiv preprint arXiv:2504.20490","author":"Li Haoyang","year":"2025","unstructured":"Haoyang Li, Fangcheng Fu, Hao Ge, Sheng Lin, Xuanyu Wang, Jiawen Niu, Xupeng Miao, and Bin Cui. 2025. Hetu v2: A General and Scalable Deep Learning System with Hierarchical and Heterogeneous Single Program Multiple Data Annotations. arXiv preprint arXiv:2504.20490 (2025)."},{"key":"e_1_3_2_1_32_1","volume-title":"Demystifying workload imbalances in large transformer model training over variable-length sequences. arXiv preprint arXiv:2412.07894","author":"Li Haoyang","year":"2024","unstructured":"Haoyang Li, Fangcheng Fu, Sheng Lin, Hao Ge, Xuanyu Wang, Jiawen Niu, Jie Jiang, and Bin Cui. 2024. Demystifying workload imbalances in large transformer model training over variable-length sequences. arXiv preprint arXiv:2412.07894 (2024)."},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.14778\/3415478.3415530"},{"key":"e_1_3_2_1_34_1","volume-title":"Ring Attention with Blockwise Transformers for Near-Infinite Context. CoRR abs\/2310.01889","author":"Liu Hao","year":"2023","unstructured":"Hao Liu, Matei Zaharia, and Pieter Abbeel. 2023. Ring Attention with Blockwise Transformers for Near-Infinite Context. CoRR abs\/2310.01889 (2023)."},{"key":"e_1_3_2_1_35_1","volume-title":"Proceedings of the 13th USENIX Conference on Operating Systems Design and Implementation","author":"Moritz Philipp","year":"2018","unstructured":"Philipp Moritz, Robert Nishihara, Stephanie Wang, Alexey Tumanov, Richard Liaw, Eric Liang, Melih Elibol, Zongheng Yang, William Paul, Michael I. Jordan, and Ion Stoica. 2018. Ray: a distributed framework for emerging AI applications. In Proceedings of the 13th USENIX Conference on Operating Systems Design and Implementation (Carlsbad, CA, USA) (OSDI'18). USENIX Association, USA, 561\u2013577."},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/3597503.3639187"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1145\/3341301.3359646"},{"key":"e_1_3_2_1_38_1","volume-title":"Memory-Efficient Pipeline-Parallel DNN Training. In International Conference on Machine Learning 2021 (ICML","volume":"139","author":"Narayanan Deepak","year":"2021","unstructured":"Deepak Narayanan, Amar Phanishayee, Kaiyu Shi, Xie Chen, and Matei Zaharia. 2021. Memory-Efficient Pipeline-Parallel DNN Training. In International Conference on Machine Learning 2021 (ICML 2021), Vol. 139. 7937\u20137947."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1145\/3458817.3476209"},{"key":"e_1_3_2_1_40_1","volume-title":"NVIDIA: Context Parallelism. https:\/\/docs.nvidia.com\/megatron-core\/developer-guide\/latest\/api-guide\/context_parallel.html.","author":"NVIDIA.","year":"2024","unstructured":"NVIDIA. 2024. NVIDIA: Context Parallelism. https:\/\/docs.nvidia.com\/megatron-core\/developer-guide\/latest\/api-guide\/context_parallel.html."},{"key":"e_1_3_2_1_41_1","unstructured":"OpenAI. 2023. GPT-4 Technical Report. CoRR abs\/2303.08774 (2023)."},{"key":"e_1_3_2_1_42_1","unstructured":"OpenAI. 2024. Hello GPT-4o. https:\/\/openai.com\/index\/hello-gpt-4o\/."},{"key":"e_1_3_2_1_43_1","unstructured":"OpenAI. 2024. Introducing OpenAI o1. https:\/\/openai.com\/o1\/."},{"key":"e_1_3_2_1_44_1","volume-title":"High-Performance Deep Learning Library. In Annual Conference on Neural Information Processing Systems 2019 (NeurIPS","author":"Paszke Adam","year":"2019","unstructured":"Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas K\u00f6pf, Edward Z. Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. Py-Torch: An Imperative Style, High-Performance Deep Learning Library. In Annual Conference on Neural Information Processing Systems 2019 (NeurIPS 2019). 8024\u20138035."},{"key":"e_1_3_2_1_45_1","volume-title":"Ferdinand Mom, Atsushi Saito, Xiangru Tang, Bolun Wang, Johan S. Wind, Stanislaw Wozniak, Ruichong Zhang, Zhenyuan Zhang, Qihang Zhao, Peng Zhou, Jian Zhu, and Rui-Jie Zhu.","author":"Peng Bo","year":"2023","unstructured":"Bo Peng, Eric Alcaide, Quentin Anthony, Alon Albalak, Samuel Arcadinho, Huanqi Cao, Xin Cheng, Michael Chung, Matteo Grella, Kranthi Kiran G. V., Xuzheng He, Haowen Hou, Przemyslaw Kazienko, Jan Kocon, Jiaming Kong, Bartlomiej Koptyra, Hayden Lau, Krishna Sri Ipsit Mantri, Ferdinand Mom, Atsushi Saito, Xiangru Tang, Bolun Wang, Johan S. Wind, Stanislaw Wozniak, Ruichong Zhang, Zhenyuan Zhang, Qihang Zhao, Peng Zhou, Jian Zhu, and Rui-Jie Zhu. 2023. RWKV: Reinventing RNNs for the Transformer Era. CoRR abs\/2305.13048 (2023)."},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1109\/SC41405.2020.00024"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3406703"},{"key":"e_1_3_2_1_48_1","volume-title":"Horovod: fast and easy distributed deep learning in TensorFlow. CoRR abs\/1802.05799","author":"Sergeev Alexander","year":"2018","unstructured":"Alexander Sergeev and Mike Del Balso. 2018. Horovod: fast and easy distributed deep learning in TensorFlow. CoRR abs\/1802.05799 (2018)."},{"key":"e_1_3_2_1_49_1","volume-title":"Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism. CoRR abs\/1909.08053","author":"Shoeybi Mohammad","year":"2019","unstructured":"Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. 2019. Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism. CoRR abs\/1909.08053 (2019)."},{"key":"e_1_3_2_1_50_1","volume-title":"Retentive network: A successor to transformer for large language models. arXiv preprint arXiv:2307.08621","author":"Sun Yutao","year":"2023","unstructured":"Yutao Sun, Li Dong, Shaohan Huang, Shuming Ma, Yuqing Xia, Jilong Xue, Jianyong Wang, and Furu Wei. 2023. Retentive network: A successor to transformer for large language models. arXiv preprint arXiv:2307.08621 (2023)."},{"key":"e_1_3_2_1_51_1","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale Dan Bikel Lukas Blecher Cristian Canton-Ferrer Moya Chen Guillem Cucurull David Esiobu Jude Fernandes Jeremy Fu Wenyin Fu Brian Fuller Cynthia Gao Vedanuj Goswami Naman Goyal Anthony Hartshorn Saghar Hosseini Rui Hou Hakan Inan Marcin Kardas Viktor Kerkez Madian Khabsa Isabel Kloumann Artem Korenev Punit Singh Koura Marie-Anne Lachaux Thibaut Lavril Jenya Lee Diana Liskovich Yinghai Lu Yuning Mao Xavier Martinet Todor Mihaylov Pushkar Mishra Igor Molybog Yixin Nie Andrew Poulton Jeremy Reizenstein Rashi Rungta Kalyan Saladi Alan Schelten Ruan Silva Eric Michael Smith Ranjan Subramanian Xiaoqing Ellen Tan Binh Tang Ross Taylor Adina Williams Jian Xiang Kuan Puxin Xu Zheng Yan Iliyan Zarov Yuchen Zhang Angela Fan Melanie Kambadur Sharan Narang Aur\u00e9lien Rodriguez Robert Stojnic Sergey Edunov and Thomas Scialom. 2023. Llama 2: Open Foundation and Fine-Tuned Chat Models. CoRR abs\/2307.09288 (2023)."},{"key":"e_1_3_2_1_52_1","volume-title":"Annual Conference on Neural Information Processing Systems 2017 (NeurIPS","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is All you Need. In Annual Conference on Neural Information Processing Systems 2017 (NeurIPS 2017). 5998\u20136008."},{"key":"e_1_3_2_1_53_1","volume-title":"European Conference on Computer Vision. Springer, 58\u201376","author":"Wang Xiaohan","year":"2025","unstructured":"Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. 2025. Videoagent: Long-form video understanding with large language model as agent. In European Conference on Computer Vision. Springer, 58\u201376."},{"key":"e_1_3_2_1_54_1","volume-title":"Proceedings, Part XXXIII.","author":"Weng Yuetian","year":"2024","unstructured":"Yuetian Weng, Mingfei Han, Haoyu He, Xiaojun Chang, and Bohan Zhuang. 2024. LongVLM: Efficient Long Video Understanding via Large Language Models. In Computer Vision - ECCV 2024: 18th European Conference, Milan, Italy, September 29\u2013October 4, 2024, Proceedings, Part XXXIII."},{"key":"e_1_3_2_1_55_1","volume-title":"CoRR abs\/2412.15115","author":"Yang An","year":"2025","unstructured":"An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tianyi Tang, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. 2025. Qwen2.5 Technical Report. CoRR abs\/2412.15115 (2025)."},{"key":"e_1_3_2_1_56_1","volume-title":"Big Bird: Transformers for Longer Sequences. In Annual Conference on Neural Information Processing Systems 2020 (NeurIPS","author":"Zaheer Manzil","year":"2020","unstructured":"Manzil Zaheer, Guru Guruganesh, Kumar Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Onta\u00f1\u00f3n, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, and Amr Ahmed. 2020. Big Bird: Transformers for Longer Sequences. In Annual Conference on Neural Information Processing Systems 2020 (NeurIPS 2020)."}],"event":{"name":"SIGCOMM '25: ACM SIGCOMM 2025 Conference","location":"S\u00e3o Francisco Convent Coimbra Portugal","acronym":"SIGCOMM '25","sponsor":["SIGCOMM ACM Special Interest Group on Data Communication"]},"container-title":["Proceedings of the ACM SIGCOMM 2025 Conference"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3718958.3754352","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,27]],"date-time":"2025-08-27T16:58:58Z","timestamp":1756313938000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3718958.3754352"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,27]]},"references-count":56,"alternative-id":["10.1145\/3718958.3754352","10.1145\/3718958"],"URL":"https:\/\/doi.org\/10.1145\/3718958.3754352","relation":{},"subject":[],"published":{"date-parts":[[2025,8,27]]},"assertion":[{"value":"2025-08-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}