{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,9]],"date-time":"2026-04-09T14:45:16Z","timestamp":1775745916091,"version":"3.50.1"},"reference-count":46,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"National Key R&#x0026;D Program of China","award":["2022ZD0160201"],"award-info":[{"award-number":["2022ZD0160201"]}]},{"name":"Shanghai AI Laboratory, CUHK Interdisciplinary AI Research Institute"},{"name":"Centre for Perceptual and Interactive Intelligence"},{"DOI":"10.13039\/501100003452","name":"Innovation and Technology Commission","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003452","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Parallel Distrib. Syst."],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1109\/tpds.2024.3443255","type":"journal-article","created":{"date-parts":[[2024,8,14]],"date-time":"2024-08-14T17:42:39Z","timestamp":1723657359000},"page":"1867-1878","source":"Crossref","is-referenced-by-count":8,"title":["Proteus: Simulating the Performance of Distributed DNN Training"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6327-2033","authenticated-orcid":false,"given":"Jiangfei","family":"Duan","sequence":"first","affiliation":[{"name":"Department of IE, The Chinese University of Hong Kong, HKSAR, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4896-121X","authenticated-orcid":false,"given":"Xiuhong","family":"Li","sequence":"additional","affiliation":[{"name":"National Engineering Laboratory for Big Data Analysis and Applications, Peking University, Beijing, China"}]},{"given":"Ping","family":"Xu","sequence":"additional","affiliation":[{"name":"SenseTime Research, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-8525-0608","authenticated-orcid":false,"given":"Xingcheng","family":"Zhang","sequence":"additional","affiliation":[{"name":"Shanghai AI Lab, Shanghai, China"}]},{"given":"Shengen","family":"Yan","sequence":"additional","affiliation":[{"name":"Department of Electronic Engineering, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9076-7998","authenticated-orcid":false,"given":"Yun","family":"Liang","sequence":"additional","affiliation":[{"name":"School of EECS, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8865-7896","authenticated-orcid":false,"given":"Dahua","family":"Lin","sequence":"additional","affiliation":[{"name":"Department of Information Engineering, The Chinese University of Hong Kong, Hong Kong, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref3","article-title":"Improving language understanding by generative pre-training","author":"Radford","year":"2018"},{"key":"ref4","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Brown"},{"key":"ref5","first-page":"1223","article-title":"Large scale distributed deep networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Dean"},{"key":"ref6","article-title":"Megatron-LM: Training multi-billion parameter language models using model parallelism","author":"Shoeybi","year":"2019"},{"key":"ref7","first-page":"58:1","article-title":"Efficient large-scale language model training on GPU clusters using megatron-LM","volume-title":"Proc. Int. Conf. High Perform. Comput. Netw. Storage Anal.","author":"Narayanan"},{"key":"ref8","article-title":"TeraPipe: Token-level pipeline parallelism for training large-scale language models","author":"Li","year":"2021"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/SC41405.2020.00024"},{"key":"ref10","article-title":"ZeRO-offload: Democratizing billion-scale model training","author":"Ren","year":"2021"},{"key":"ref11","first-page":"1","article-title":"Beyond data and model parallelism for deep neural networks.","volume-title":"Proc. Mach. Learn. Syst.","volume":"1","author":"Jia","year":"2019"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3302424.3303953"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3437801.3441593"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3341301.3359646"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.5555\/3454287.3455008"},{"key":"ref16","first-page":"265","article-title":"TensorFlow: A system for large-scale machine learning","volume-title":"Proc. 12th USENIX Symp. Operating Syst. Des. Implementation","author":"Abadi"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/HIPC.2009.5433179"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA.2011.5749745"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/PACT52795.2021.00020"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-32820-6_90"},{"key":"ref21","first-page":"578","article-title":"TVM: An automated end-to-end optimizing compiler for deep learning","volume-title":"Proc. 13th USENIX Symp. Operating Syst. Des. Implementation","author":"Chen"},{"key":"ref22","first-page":"181","article-title":"A deep learning based cost model for automatic code optimization","volume-title":"Proc. Mach. Learn. Syst.","volume":"3","author":"Baghdadi"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2916550"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/2783258.2783270"},{"key":"ref25","article-title":"PALEO: A performance model for deep neural networks","volume-title":"Proc. 5th Int. Conf. Learn. Representations","author":"Qi"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/IPDPS49936.2021.00111"},{"key":"ref27","article-title":"Training deep nets with sublinear memory cost","author":"Chen","year":"2016"},{"key":"ref28","first-page":"559","article-title":"Alpa: Automating inter-and intra-operator parallelism for distributed deep learning","volume-title":"Proc. 16th USENIX Symp. Operating Syst. Des. Implementation","author":"Zheng"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3406703"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.48550\/arxiv.1811.06965"},{"key":"ref31","first-page":"267","article-title":"Unity: Accelerating DNN training through joint optimization of algebraic transformations and parallelization","volume-title":"Proc. 16th USENIX Symp. Operating Syst. Des. Implementation","author":"Unger"},{"key":"ref32","article-title":"GSPMD: General and scalable parallelization for ML computation graphs","author":"Xu","year":"2021"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/HiPC56025.2022.00019"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3579371.3589072"},{"key":"ref35","article-title":"Chakra: Advancing performance benchmarking and co-design using standardized execution traces","author":"Sridharan","year":"2023"},{"key":"ref37","article-title":"On optimizing the communication of model parallelism","volume-title":"Proc. Mach. Learn. Syst.","volume":"5","author":"Zhuang","year":"2023"},{"key":"ref38","article-title":"IOS: Inter-operator scheduler for CNN acceleration","volume-title":"Proc. Mach. Learn. Syst.","author":"Ding"},{"key":"ref39","article-title":"Pollux: Co-adaptive cluster scheduling for goodput-optimized deep learning","volume-title":"Proc. 15th USENIX Symp. Operating Syst. Des. Implementation","author":"Qiao"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CloudIntelligence52565.2021.00012"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/79173.79181"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"},{"key":"ref43","article-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2014"},{"issue":"8","key":"ref44","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019"},{"key":"ref45","article-title":"Deep learning recommendation model for personalization and recommendation systems","author":"Naumov","year":"2019"},{"key":"ref46","article-title":"One weird trick for parallelizing convolutional neural networks","author":"Krizhevsky","year":"2014"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CLUSTER.2012.25"}],"container-title":["IEEE Transactions on Parallel and Distributed Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/71\/10631781\/10636756.pdf?arnumber=10636756","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,18]],"date-time":"2024-09-18T18:03:00Z","timestamp":1726682580000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10636756\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10]]},"references-count":46,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tpds.2024.3443255","relation":{},"ISSN":["1045-9219","1558-2183","2161-9883"],"issn-type":[{"value":"1045-9219","type":"print"},{"value":"1558-2183","type":"electronic"},{"value":"2161-9883","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10]]}}}