{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T05:10:11Z","timestamp":1750137011264,"version":"3.41.0"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,5,6]],"date-time":"2025-05-06T00:00:00Z","timestamp":1746489600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,6]],"date-time":"2025-05-06T00:00:00Z","timestamp":1746489600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,5,6]]},"DOI":"10.23919\/ondm65745.2025.11029332","type":"proceedings-article","created":{"date-parts":[[2025,6,16]],"date-time":"2025-06-16T18:47:12Z","timestamp":1750099632000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["Optical Networks for Distributed AI Training: Flow Completion Time and Training Time Analysis"],"prefix":"10.23919","author":[{"given":"Venkata Virajit","family":"Garbhapu","sequence":"first","affiliation":[{"name":"Huawei Technologies France SASU Boulogne-Billancourt,Optical Communication Technology Lab,France"}]},{"given":"Gabriel","family":"Charlet","sequence":"additional","affiliation":[{"name":"Huawei Technologies France SASU Boulogne-Billancourt,Optical Communication Technology Lab,France"}]},{"given":"Yvan","family":"Pointurier","sequence":"additional","affiliation":[{"name":"Huawei Technologies France SASU Boulogne-Billancourt,Optical Communication Technology Lab,France"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"author":"Nishball","journal-title":"100,000 H100 Clusters: Power, Network Topology, Ethernet vs InfiniBand, Reliability, Failures, Checkpointing","key":"ref2"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1109\/HPEC58863.2023.10363447"},{"key":"ref4","volume":"abs\/2404.06114","author":"Liang","year":"2024","journal-title":"Communication-efficient large-scale distributed deep learning: A comprehensive survey"},{"journal-title":"Distributed training with determined","key":"ref5"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1038\/s41467-021-25841-8"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.1145\/3579371.3589350"},{"author":"Nishad","journal-title":"GPT-4: Discovering OpenAI\u2019s most powerful AI model","key":"ref8"},{"key":"ref9","article-title":"Efficient parallelization layouts for large-scale distributed model training","author":"Hagemann","year":"2023","journal-title":"WANT@NeurIPS 2023"},{"doi-asserted-by":"publisher","key":"ref10","DOI":"10.1145\/3678015.3680478"},{"year":"2024","author":"Duan","journal-title":"Efficient training of large language models on distributed infrastructures: a survey","key":"ref11"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1145\/3651890.3672243"},{"year":"2024","author":"Cheng","journal-title":"EDiT: A Local-SGD-Based Efficient Distributed Training Method for Large Language Models","key":"ref13"},{"key":"ref14","article-title":"Decentralized training over 100km based on optical transport network for artificial intelligence","author":"Sun","year":"2024","journal-title":"ECOC 2024"},{"key":"ref15","first-page":"1299","article-title":"Relevance of latency in Ethernet networking for AI infrastructure","author":"Mi","year":"2024","journal-title":"ECOC 2024"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1145\/3635867"},{"doi-asserted-by":"publisher","key":"ref17","DOI":"10.1109\/ISPASS57527.2023.00035"},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.1007\/978-3-642-12331-3_2"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1017\/CBO9780511667572"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1007\/978-3-030-12842-5_1"},{"doi-asserted-by":"publisher","key":"ref21","DOI":"10.1145\/3651890.3672233"},{"year":"2024","author":"Lab","journal-title":"Zhejiang lab LLM trace program","key":"ref22"},{"key":"ref23","article-title":"Qwen2 technical report","author":"Yang","year":"2024","journal-title":"Alibaba group, Tech. Rep."},{"key":"ref24","article-title":"MegaScale: Scaling Large Language Model Training to More Than 10,000 GPUs","volume-title":"Proceedings of the 21st USENIX Symposium on Networked Systems Design and Implementation","author":"Jiang","year":"2024"}],"event":{"name":"2025 International Conference on Optical Network Design and Modeling (ONDM)","start":{"date-parts":[[2025,5,6]]},"location":"Pisa, Italy","end":{"date-parts":[[2025,5,9]]}},"container-title":["2025 International Conference on Optical Network Design and Modeling (ONDM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11029218\/11029219\/11029332.pdf?arnumber=11029332","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T04:46:43Z","timestamp":1750135603000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11029332\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,6]]},"references-count":24,"URL":"https:\/\/doi.org\/10.23919\/ondm65745.2025.11029332","relation":{},"subject":[],"published":{"date-parts":[[2025,5,6]]}}}