{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:04:52Z","timestamp":1775228692897,"version":"3.50.1"},"reference-count":34,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,6,19]],"date-time":"2024-06-19T00:00:00Z","timestamp":1718755200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,19]],"date-time":"2024-06-19T00:00:00Z","timestamp":1718755200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,6,19]]},"DOI":"10.1109\/iwqos61813.2024.10682856","type":"proceedings-article","created":{"date-parts":[[2024,9,26]],"date-time":"2024-09-26T17:41:00Z","timestamp":1727372460000},"page":"1-10","source":"Crossref","is-referenced-by-count":5,"title":["Lins: Reducing Communication Overhead of ZeRO for Efficient LLM Training"],"prefix":"10.1109","author":[{"given":"Qiaoling","family":"Chen","sequence":"first","affiliation":[{"name":"NTU,S-Lab"}]},{"given":"Qinghao","family":"Hu","sequence":"additional","affiliation":[{"name":"NTU,S-Lab"}]},{"given":"Guoteng","family":"Wang","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory"}]},{"given":"Yingtong","family":"Xiong","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory"}]},{"given":"Ting","family":"Huang","sequence":"additional","affiliation":[{"name":"SenseTime"}]},{"given":"Xun","family":"Chen","sequence":"additional","affiliation":[{"name":"SenseTime"}]},{"given":"Yang","family":"Gao","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory"}]},{"given":"Hang","family":"Yan","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory"}]},{"given":"Yonggang","family":"Wen","sequence":"additional","affiliation":[{"name":"Nanyang Technological University"}]},{"given":"Tianwei","family":"Zhang","sequence":"additional","affiliation":[{"name":"Nanyang Technological University"}]},{"given":"Peng","family":"Sun","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Training compute-optimal large language models","author":"Hoffmann","year":"2022"},{"key":"ref2","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3406703"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.14778\/3611540.3611569"},{"key":"ref5","first-page":"1","article-title":"Efficient large-scale language model training on gpu clusters using megatron-lm","volume-title":"Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis","author":"Narayanan"},{"key":"ref6","article-title":"Zero++: Extremely efficient collective communication for giant model training","author":"Wang","year":"2023"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.14778\/3561261.3561265"},{"key":"ref8","article-title":"Characterization of large language model development in the datacenter","volume-title":"USENIX Symposium on Networked Systems Design and Implementation (NSDI\u201924)","author":"Hu"},{"key":"ref9","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref10","article-title":"Attention is all you need","volume":"30","author":"Vaswani","year":"2017"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.14778\/3415478.3415530"},{"key":"ref12","article-title":"Gpipe: Efficient training of giant neural networks using pipeline parallelism","volume":"32","author":"Huang","year":"2019"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3341301.3359646"},{"key":"ref14","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2017"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/SC41405.2020.00024"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3605573.3605613"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3341301.3359642"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TBDATA.2019.2957478"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICPADS.2016.0146"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-39924-7_38"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.parco.2009.09.001"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3552326.3587436"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/3582016.3582037"},{"key":"ref24","article-title":"Flashattention-2: Faster attention with better parallelism and work partitioning","author":"Dao","year":"2023"},{"issue":"240","key":"ref25","first-page":"1","article-title":"Palm: Scaling language modeling with pathways","volume":"24","author":"Chowdhery","year":"2023","journal-title":"Journal of Machine Learning Research"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/3437801.3441593"},{"key":"ref27","first-page":"559","article-title":"Alpa: Automating inter-and {Intra-Operator} parallelism for distributed deep learning","volume-title":"16th USENIX Symposium on Operating Systems Design and Implementation (OSDI 22)","author":"Zheng"},{"key":"ref28","first-page":"1","article-title":"Beyond data and model parallelism for deep neural networks","volume-title":"Proceedings of Machine Learning and Systems","volume":"1","author":"Jia"},{"key":"ref29","first-page":"267","article-title":"Unity: Accelerating {DNN} training through joint optimization of algebraic transformations and parallelization","volume-title":"16th USENIX Symposium on Operating Systems Design and Implementation","author":"Unger"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2021.3132413"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/3552326.3567505"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICDCS57875.2023.00054"},{"key":"ref33","article-title":"Hetumoe: An efficient trillion-scale mixture-of-expert distributed training system","author":"Nie","year":"2022"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3406703"}],"event":{"name":"2024 IEEE\/ACM 32nd International Symposium on Quality of Service (IWQoS)","location":"Guangzhou, China","start":{"date-parts":[[2024,6,19]]},"end":{"date-parts":[[2024,6,21]]}},"container-title":["2024 IEEE\/ACM 32nd International Symposium on Quality of Service (IWQoS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10682818\/10682608\/10682856.pdf?arnumber=10682856","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,27]],"date-time":"2024-09-27T18:27:49Z","timestamp":1727461669000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10682856\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,19]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/iwqos61813.2024.10682856","relation":{},"subject":[],"published":{"date-parts":[[2024,6,19]]}}}