{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,5]],"date-time":"2026-03-05T15:34:40Z","timestamp":1772724880549,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":55,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,3,30]],"date-time":"2025-03-30T00:00:00Z","timestamp":1743292800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,3,30]]},"DOI":"10.1145\/3676641.3715998","type":"proceedings-article","created":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T16:47:32Z","timestamp":1743094052000},"page":"421-436","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":4,"title":["FlexSP: Accelerating Large Language Model Training via Flexible Sequence Parallelism"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-8375-493X","authenticated-orcid":false,"given":"Yujie","family":"Wang","sequence":"first","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-5959-1652","authenticated-orcid":false,"given":"Shiju","family":"Wang","sequence":"additional","affiliation":[{"name":"Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-0267-775X","authenticated-orcid":false,"given":"Shenhan","family":"Zhu","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1658-0380","authenticated-orcid":false,"given":"Fangcheng","family":"Fu","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-5863-2613","authenticated-orcid":false,"given":"Xinyi","family":"Liu","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-8258-1243","authenticated-orcid":false,"given":"Xuefeng","family":"Xiao","sequence":"additional","affiliation":[{"name":"ByteDance Inc., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-5968-3793","authenticated-orcid":false,"given":"Huixia","family":"Li","sequence":"additional","affiliation":[{"name":"ByteDance Inc., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-7171-8527","authenticated-orcid":false,"given":"Jiashi","family":"Li","sequence":"additional","affiliation":[{"name":"ByteDance Inc., Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4256-3277","authenticated-orcid":false,"given":"Faming","family":"Wu","sequence":"additional","affiliation":[{"name":"ByteDance Inc., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1681-4677","authenticated-orcid":false,"given":"Bin","family":"Cui","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2025,3,30]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"2021. NVIDIA collective communications library (NCCL). https:\/\/ developer.nvidia.com\/nccl."},{"key":"e_1_3_2_1_2_1","unstructured":"2025. FlexSP Appendix. https:\/\/github.com\/AFDWang\/ASPLOS25- FlexSP-Supplemental-Material\/blob\/main\/Appendix.pdf."},{"key":"e_1_3_2_1_3_1","volume-title":"Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al.","author":"Achiam Josh","year":"2023","unstructured":"Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023)."},{"key":"e_1_3_2_1_4_1","volume-title":"Longformer: The Long-Document Transformer. CoRR","author":"Beltagy Iz","year":"2020","unstructured":"Iz Beltagy, Matthew E. Peters, and Arman Cohan. 2020. Longformer: The Long-Document Transformer. CoRR, Vol. abs\/2004.05150 (2020). showeprint[arXiv]2004.05150 https:\/\/arxiv.org\/abs\/2004.05150"},{"key":"e_1_3_2_1_5_1","unstructured":"Suresh Bolusani Mathieu Besan\u00e7on Ksenia Bestuzheva Antonia Chmiela Jo ao Dion\u00edsio Tim Donkiewicz Jasper van Doornmalen Leon Eifler Mohammed Ghannam Ambros Gleixner Christoph Graczyk Katrin Halbig Ivo Hedtke Alexander Hoen Christopher Hojny Rolf van der Hulst Dominik Kamp Thorsten Koch Kevin Kofler Jurgen Lentz Julian Manns Gioni Mexi Erik M\u00fchmer Marc E. Pfetsch Franziska Schl\u00f6sser Felipe Serrano Yuji Shinano Mark Turner Stefan Vigerske Dieter Weninger and Lixing Xu. 2024. The SCIP Optimization Suite 9.0. Technical Report. Optimization Online. https:\/\/optimization-online.org\/2024\/02\/the-scip-optimization-suite-9-0\/"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2311.09431"},{"key":"e_1_3_2_1_7_1","unstructured":"Tom B. Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell Sandhini Agarwal Ariel Herbert-Voss Gretchen Krueger Tom Henighan Rewon Child Aditya Ramesh Daniel M. Ziegler Jeffrey Wu Clemens Winter Christopher Hesse Mark Chen Eric Sigler Mateusz Litwin Scott Gray Benjamin Chess Jack Clark Christopher Berner Sam McCandlish Alec Radford Ilya Sutskever and Dario Amodei. 2020. Language Models are Few-Shot Learners. In NeurIPS."},{"key":"e_1_3_2_1_8_1","volume-title":"Quoc Viet Le, and Ruslan Salakhutdinov","author":"Dai Zihang","year":"2019","unstructured":"Zihang Dai, Zhilin Yang, Yiming Yang, Jaime G. Carbonell, Quoc Viet Le, and Ruslan Salakhutdinov. 2019. Transformer-XL: Attentive Language Models beyond a Fixed-Length Context. In ACL. 2978--2988."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2307.08691"},{"key":"e_1_3_2_1_10_1","volume-title":"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022","author":"Dao Tri","year":"2022","unstructured":"Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher R\u00e9. 2022. FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (Eds.). http:\/\/papers.nips.cc\/paper_files\/paper\/2022\/hash\/67d57c32e20fd0a7a302cb81d36e40d5-Abstract-Conference.html"},{"key":"e_1_3_2_1_11_1","unstructured":"DeepSeek-AI Aixin Liu Bei Feng Bin Wang Bingxuan Wang Bo Liu Chenggang Zhao Chengqi Dengr Chong Ruan Damai Dai Daya Guo Dejian Yang Deli Chen Dongjie Ji Erhang Li Fangyun Lin Fuli Luo Guangbo Hao Guanting Chen Guowei Li H. Zhang Hanwei Xu Hao Yang Haowei Zhang Honghui Ding Huajian Xin Huazuo Gao Hui Li Hui Qu J. L. Cai Jian Liang Jianzhong Guo Jiaqi Ni Jiashi Li Jin Chen Jingyang Yuan Junjie Qiu Junxiao Song Kai Dong Kaige Gao Kang Guan Lean Wang Lecong Zhang Lei Xu Leyi Xia Liang Zhao Liyue Zhang Meng Li Miaojun Wang Mingchuan Zhang Minghua Zhang Minghui Tang Mingming Li Ning Tian Panpan Huang Peiyi Wang Peng Zhang Qihao Zhu Qinyu Chen Qiushi Du R. J. Chen R. L. Jin Ruiqi Ge Ruizhe Pan Runxin Xu Ruyi Chen S. S. Li Shanghao Lu Shangyan Zhou Shanhuang Chen Shaoqing Wu Shengfeng Ye Shirong Ma Shiyu Wang Shuang Zhou Shuiping Yu Shunfeng Zhou Size Zheng T. Wang Tian Pei Tian Yuan Tianyu Sun W. L. Xiao Wangding Zeng Wei An Wen Liu Wenfeng Liang Wenjun Gao Wentao Zhang et al. 2024. DeepSeek-V2: A Strong Economical and Efficient Mixture-of-Experts Language Model. arxiv: 2405.04434 [cs.CL] https:\/\/arxiv.org\/abs\/2405.04434"},{"key":"e_1_3_2_1_12_1","volume-title":"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In NAACL-HLT. 4171--4186.","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In NAACL-HLT. 4171--4186."},{"key":"e_1_3_2_1_13_1","volume-title":"Fewer Truncations Improve Language Modeling. In Forty-first International Conference on Machine Learning, ICML 2024","author":"Ding Hantian","year":"2024","unstructured":"Hantian Ding, Zijian Wang, Giovanni Paolini, Varun Kumar, Anoop Deoras, Dan Roth, and Stefano Soatto. 2024a. Fewer Truncations Improve Language Modeling. In Forty-first International Conference on Machine Learning, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net. https:\/\/openreview.net\/forum?id=kRxCDDFNpp"},{"key":"e_1_3_2_1_14_1","volume-title":"Chengruidong Zhang, Yuanyuan Xu, Ning Shang, Jiahang Xu, Fan Yang, and Mao Yang.","author":"Ding Yiran","year":"2024","unstructured":"Yiran Ding, Li Lyna Zhang, Chengruidong Zhang, Yuanyuan Xu, Ning Shang, Jiahang Xu, Fan Yang, and Mao Yang. 2024b. LongRoPE: Extending LLM Context Window Beyond 2 Million Tokens. arxiv: 2402.13753 [cs.CL] https:\/\/arxiv.org\/abs\/2402.13753"},{"key":"e_1_3_2_1_15_1","unstructured":"Abhimanyu Dubey Abhinav Jauhri Abhinav Pandey Abhishek Kadian Ahmad Al-Dahle Aiesha Letman Akhil Mathur Alan Schelten Amy Yang Angela Fan Anirudh Goyal Anthony Hartshorn Aobo Yang Archi Mitra Archie Sravankumar Artem Korenev Arthur Hinsvark Arun Rao Aston Zhang Aurelien Rodriguez Austen Gregerson Ava Spataru Baptiste Roziere Bethany Biron Binh Tang Bobbie Chern Charlotte Caucheteux Chaya Nayak Chloe Bi Chris Marra Chris McConnell Christian Keller Christophe Touret Chunyang Wu Corinne Wong Cristian Canton Ferrer Cyrus Nikolaidis Damien Allonsius Daniel Song Danielle Pintz Danny Livshits David Esiobu Dhruv Choudhary Dhruv Mahajan Diego Garcia-Olano Diego Perino Dieuwke Hupkes Egor Lakomkin Ehab AlBadawy Elina Lobanova Emily Dinan Eric Michael Smith Filip Radenovic Frank Zhang Gabriel Synnaeve Gabrielle Lee Georgia Lewis Anderson Graeme Nail Gregoire Mialon Guan Pang Guillem Cucurell Hailey Nguyen Hannah Korevaar Hu Xu Hugo Touvron Iliyan Zarov Imanol Arrieta Ibarra Isabel Kloumann Ishan Misra Ivan Evtimov Jade Copet Jaewon Lee Jan Geffert Jana Vranes Jason Park Jay Mahadeokar Jeet Shah Jelmer van der Linde Jennifer Billock Jenny Hong Jenya Lee Jeremy Fu Jianfeng Chi Jianyu Huang Jiawen Liu Jie Wang Jiecao Yu Joanna Bitton Joe Spisak Jongsoo Park Joseph Rocca Joshua Johnstun Joshua Saxe Junteng Jia Kalyan Vasuden Alwala Kartikeya Upasani Kate Plawiak Ke Li Kenneth Heafield Kevin Stone Khalid El-Arini Krithika Iyer Kshitiz Malik Kuenley Chiu Kunal Bhalla Lauren Rantala-Yeary Laurens van der Maaten Lawrence Chen Liang Tan Liz Jenkins Louis Martin Lovish Madaan et al. 2024. The Llama 3 Herd of Models. arxiv: 2407.21783 [cs.AI] https:\/\/arxiv.org\/abs\/2407.21783"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1007\/S11390-024-3872-3"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1016\/S0167-8191(06)80021-9"},{"key":"e_1_3_2_1_18_1","unstructured":"Yanping Huang Youlong Cheng Ankur Bapna et al. 2019. GPipe: Efficient Training of Giant Neural Networks using Pipeline Parallelism. In NeurIPS."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2309.14509"},{"key":"e_1_3_2_1_20_1","volume-title":"2022 USENIX Annual Technical Conference (USENIX ATC 22)","author":"Jia Xianyan","year":"2022","unstructured":"Xianyan Jia, Le Jiang, Ang Wang, Wencong Xiao, Ziji Shi, Jie Zhang, Xinyuan Li, Langshi Chen, Yong Li, Zhen Zheng, et al. 2022. Whale: Efficient giant model training over heterogeneous {GPUs}. In 2022 USENIX Annual Technical Conference (USENIX ATC 22). 673--688."},{"key":"e_1_3_2_1_21_1","unstructured":"Zhihao Jia Matei Zaharia and Alex Aiken. 2019. Beyond Data and Model Parallelism for Deep Neural Networks. In MLSys."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2205.05198"},{"key":"e_1_3_2_1_23_1","volume-title":"Efficient sequence packing without cross-contamination: Accelerating large language models without impacting performance. arXiv preprint arXiv:2107.02027","author":"Krell Mario Michael","year":"2021","unstructured":"Mario Michael Krell, Matej Kosec, Sergio P Perez, and Andrew Fitzgibbon. 2021. Efficient sequence packing without cross-contamination: Accelerating large language models without impacting performance. arXiv preprint arXiv:2107.02027 (2021)."},{"key":"e_1_3_2_1_24_1","volume-title":"DISTFLASHATTN: Distributed Memory-efficient Attention for Long-context LLMs Training. In First Conference on Language Modeling.","author":"Li Dacheng","year":"2024","unstructured":"Dacheng Li, Rulin Shao, Anze Xie, Eric P Xing, Xuezhe Ma, Ion Stoica, Joseph E Gonzalez, and Hao Zhang. 2024. DISTFLASHATTN: Distributed Memory-efficient Attention for Long-context LLMs Training. In First Conference on Language Modeling."},{"key":"e_1_3_2_1_25_1","volume-title":"AMP: Automatically Finding Model Parallel Strategies with Heterogeneity Awareness. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems","author":"Li Dacheng","year":"2022","unstructured":"Dacheng Li, Hongyi Wang, Eric P. Xing, and Hao Zhang. 2022. AMP: Automatically Finding Model Parallel Strategies with Heterogeneity Awareness. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (Eds.). http:\/\/papers.nips.cc\/paper_files\/paper\/2022\/hash\/2b4bfa1cebe78d125fefd7ea6ffcfc6d-Abstract-Conference.html"},{"key":"e_1_3_2_1_26_1","first-page":"98","article-title":"Memory-Enhanced Transformer for Representation Learning on Temporal Heterogeneous Graphs. Data Sci","volume":"8","author":"Li Longhai","year":"2023","unstructured":"Longhai Li, Lei Duan, Junchen Wang, Chengxin He, Zihao Chen, Guicai Xie, Song Deng, and Zhaohang Luo. 2023a. Memory-Enhanced Transformer for Representation Learning on Temporal Heterogeneous Graphs. Data Sci. Eng., Vol. 8, 2 (2023), 98--111.","journal-title":"Eng."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.18653\/V1\/2023.ACL-LONG.134"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.14778\/3415478.3415530"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2310.01889"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.14778\/3570690.3570697"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"crossref","unstructured":"Deepak Narayanan Aaron Harlap Amar Phanishayee Vivek Seshadri Nikhil R. Devanur Gregory R. Ganger Phillip B. Gibbons and Matei Zaharia. 2019. PipeDream: generalized pipeline parallelism for DNN training. In SOSP. 1--15.","DOI":"10.1145\/3341301.3359646"},{"key":"e_1_3_2_1_32_1","volume-title":"International Conference on Machine Learning. PMLR, 7937--7947","author":"Narayanan Deepak","year":"2021","unstructured":"Deepak Narayanan, Amar Phanishayee, Kaiyu Shi, Xie Chen, and Matei Zaharia. 2021a. Memory-efficient pipeline-parallel dnn training. In International Conference on Machine Learning. PMLR, 7937--7947."},{"key":"e_1_3_2_1_33_1","first-page":"1","article-title":"Efficient large-scale language model training on GPU clusters using megatron-LM","volume":"58","author":"Narayanan Deepak","year":"2021","unstructured":"Deepak Narayanan, Mohammad Shoeybi, Jared Casper, et al. 2021b. Efficient large-scale language model training on GPU clusters using megatron-LM. In SC. ACM, 58:1--58:15.","journal-title":"SC. ACM"},{"key":"e_1_3_2_1_34_1","volume-title":"2020 USENIX Annual Technical Conference, USENIX ATC 2020","author":"Park Jay H.","year":"2020","unstructured":"Jay H. Park, Gyeongchan Yun, Chang M. Yi, Nguyen T. Nguyen, Seungmin Lee, Jaesik Choi, Sam H. Noh, and Young-ri Choi. 2020. HetPipe: Enabling Large DNN Training on (Whimpy) Heterogeneous GPU Clusters through Integration of Pipelined Model Parallelism and Data Parallelism. In 2020 USENIX Annual Technical Conference, USENIX ATC 2020, July 15-17, 2020, Ada Gavrilovska and Erez Zadok (Eds.). USENIX Association, 307--321. https:\/\/www.usenix.org\/conference\/atc20\/presentation\/park"},{"key":"e_1_3_2_1_35_1","unstructured":"Alec Radford Karthik Narasimhan Tim Salimans Ilya Sutskever et al. 2018. Improving language understanding by generative pre-training. (2018)."},{"key":"e_1_3_2_1_36_1","unstructured":"Alec Radford Jeffrey Wu Rewon Child David Luan Dario Amodei Ilya Sutskever et al. 2019. Language models are unsupervised multitask learners. OpenAI blog Vol. 1 8 (2019) 9."},{"key":"e_1_3_2_1_37_1","volume-title":"Liu","author":"Raffel Colin","year":"2020","unstructured":"Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. JMLR (2020)."},{"key":"e_1_3_2_1_38_1","volume-title":"ZeRO: memory optimizations toward training trillion parameter models","author":"Rajbhandari Samyam","unstructured":"Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, and Yuxiong He. 2020. ZeRO: memory optimizations toward training trillion parameter models. In SC. IEEE\/ACM."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3406703"},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1007\/S11432-021-3536-5"},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA47549.2020.00036"},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2302.13971"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale Dan Bikel Lukas Blecher Cristian Canton-Ferrer Moya Chen Guillem Cucurull David Esiobu Jude Fernandes Jeremy Fu Wenyin Fu Brian Fuller Cynthia Gao Vedanuj Goswami Naman Goyal Anthony Hartshorn Saghar Hosseini Rui Hou Hakan Inan Marcin Kardas Viktor Kerkez Madian Khabsa Isabel Kloumann Artem Korenev Punit Singh Koura Marie-Anne Lachaux Thibaut Lavril Jenya Lee Diana Liskovich Yinghai Lu Yuning Mao Xavier Martinet Todor Mihaylov Pushkar Mishra Igor Molybog Yixin Nie Andrew Poulton Jeremy Reizenstein Rashi Rungta Kalyan Saladi Alan Schelten Ruan Silva Eric Michael Smith Ranjan Subramanian Xiaoqing Ellen Tan Binh Tang Ross Taylor Adina Williams Jian Xiang Kuan Puxin Xu Zheng Yan Iliyan Zarov Yuchen Zhang Angela Fan Melanie Kambadur Sharan Narang Aur\u00e9lien Rodriguez Robert Stojnic Sergey Edunov and Thomas Scialom. 2023b. Llama 2: Open Foundation and Fine-Tuned Chat Models. CoRR Vol. abs\/2307.09288 (2023). https:\/\/doi.org\/10.48550\/ARXIV.2307.09288 [arXiv]2307.09288","DOI":"10.48550\/ARXIV.2307.09288"},{"key":"e_1_3_2_1_44_1","volume-title":"Unity: Accelerating DNN Training Through Joint Optimization of Algebraic Transformations and Parallelization. In OSDI. 267--284.","author":"Unger Colin","year":"2022","unstructured":"Colin Unger, Zhihao Jia, Wei Wu, et al. 2022. Unity: Accelerating DNN Training Through Joint Optimization of Algebraic Transformations and Parallelization. In OSDI. 267--284."},{"key":"e_1_3_2_1_45_1","unstructured":"Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Llion Jones Aidan N. Gomez Lukasz Kaiser and Illia Polosukhin. 2017. Attention is All you Need. In NeurIPS. 5998--6008."},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2024.3370614"},{"key":"e_1_3_2_1_47_1","unstructured":"Aiyuan Yang Bin Xiao Bingning Wang Borong Zhang Ce Bian Chao Yin Chenxu Lv Da Pan Dian Wang Dong Yan Fan Yang Fei Deng Feng Wang Feng Liu Guangwei Ai Guosheng Dong Haizhou Zhao Hang Xu Haoze Sun Hongda Zhang Hui Liu Jiaming Ji Jian Xie JunTao Dai Kun Fang Lei Su Liang Song Lifeng Liu Liyun Ru Luyao Ma Mang Wang Mickel Liu MingAn Lin Nuolan Nie Peidong Guo Ruiyang Sun Tao Zhang Tianpeng Li Tianyu Li Wei Cheng Weipeng Chen Xiangrong Zeng Xiaochuan Wang Xiaoxi Chen Xin Men Xin Yu Xuehai Pan Yanjun Shen Yiding Wang Yiyu Li Youxin Jiang Yuchen Gao Yupeng Zhang Zenan Zhou and Zhiying Wu. 2023. Baichuan 2: Open Large-scale Language Models. arxiv: 2309.10305 [cs.CL] https:\/\/arxiv.org\/abs\/2309.10305"},{"key":"e_1_3_2_1_48_1","volume-title":"Big Bird: Transformers for Longer Sequences. In Annual Conference on Neural Information Processing Systems 2020 (NeurIPS","author":"Zaheer Manzil","year":"2020","unstructured":"Manzil Zaheer, Guru Guruganesh, Kumar Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Onta n\u00f3n, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, and Amr Ahmed. 2020. Big Bird: Transformers for Longer Sequences. In Annual Conference on Neural Information Processing Systems 2020 (NeurIPS 2020)."},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11432-023-3956-3"},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2205.01068"},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","unstructured":"Zhen-Xing Zhang Yuan-Bo Wen Han-Qi Lv Chang Liu Rui Zhang Xia-Qing Li Chao Wang Zi-Dong Du Qi Guo Ling Li Xue-Hai Zhou and Yun-Ji Chen. 2024a. AI computing systems for LLMs training: a review. J. Comput. Sci. Technol. (2024). https:\/\/doi.org\/10.1007\/s11390-024-4178-1","DOI":"10.1007\/s11390-024-4178-1"},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.14778\/3611540.3611569"},{"key":"e_1_3_2_1_53_1","volume-title":"Alpa: Automating Inter- and Intra-Operator Parallelism for Distributed Deep Learning. In 16th USENIX Symposium on Operating Systems Design and Implementation, OSDI 2022","author":"Zheng Lianmin","year":"2022","unstructured":"Lianmin Zheng, Zhuohan Li, Hao Zhang, Yonghao Zhuang, Zhifeng Chen, Yanping Huang, Yida Wang, Yuanzhong Xu, Danyang Zhuo, Eric P. Xing, Joseph E. Gonzalez, and Ion Stoica. 2022. Alpa: Automating Inter- and Intra-Operator Parallelism for Distributed Deep Learning. In 16th USENIX Symposium on Operating Systems Design and Implementation, OSDI 2022, Carlsbad, CA, USA, July 11-13, 2022, Marcos K. Aguilera and Hakim Weatherspoon (Eds.). USENIX Association, 559--578. https:\/\/www.usenix.org\/conference\/osdi22\/presentation\/zheng-lianmin"},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1007\/S41019-023-00235-6"},{"key":"e_1_3_2_1_55_1","unstructured":"Dawei Zhu Liang Wang Nan Yang Yifan Song Wenhao Wu Furu Wei and Sujian Li. 2024. LongEmbed: Extending Embedding Models for Long Context Retrieval. arxiv: 2404.12096 [cs.CL] https:\/\/arxiv.org\/abs\/2404.12096"}],"event":{"name":"ASPLOS '25: 30th ACM International Conference on Architectural Support for Programming Languages and Operating Systems","location":"Rotterdam Netherlands","acronym":"ASPLOS '25","sponsor":["SIGPLAN ACM Special Interest Group on Programming Languages","SIGOPS ACM Special Interest Group on Operating Systems","SIGARCH ACM Special Interest Group on Computer Architecture"]},"container-title":["Proceedings of the 30th ACM International Conference on Architectural Support for Programming Languages and Operating Systems, Volume 2"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3676641.3715998","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3676641.3715998","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T11:07:10Z","timestamp":1755774430000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3676641.3715998"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3,30]]},"references-count":55,"alternative-id":["10.1145\/3676641.3715998","10.1145\/3676641"],"URL":"https:\/\/doi.org\/10.1145\/3676641.3715998","relation":{},"subject":[],"published":{"date-parts":[[2025,3,30]]},"assertion":[{"value":"2025-03-30","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}