{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T19:14:16Z","timestamp":1774120456529,"version":"3.50.1"},"reference-count":41,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,5,17]],"date-time":"2023-05-17T00:00:00Z","timestamp":1684281600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,5,17]],"date-time":"2023-05-17T00:00:00Z","timestamp":1684281600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,5,17]]},"DOI":"10.1109\/infocom53939.2023.10228874","type":"proceedings-article","created":{"date-parts":[[2023,8,29]],"date-time":"2023-08-29T13:40:43Z","timestamp":1693316443000},"page":"1-10","source":"Crossref","is-referenced-by-count":15,"title":["PipeMoE: Accelerating Mixture-of-Experts through Adaptive Pipelining"],"prefix":"10.1109","author":[{"given":"Shaohuai","family":"Shi","sequence":"first","affiliation":[{"name":"Harbin Institute of Technology,School of Computer Science and Technology,Shenzhen"}]},{"given":"Xinglin","family":"Pan","sequence":"additional","affiliation":[{"name":"Hong Kong Baptist University,Department of Computer Science"}]},{"given":"Xiaowen","family":"Chu","sequence":"additional","affiliation":[{"name":"Hong Kong Baptist University,Department of Computer Science"}]},{"given":"Bo","family":"Li","sequence":"additional","affiliation":[{"name":"The Hong Kong University of Science and Technology,Department of Computer Science and Engineering"}]}],"member":"263","reference":[{"key":"ref13","article-title":"Doubling all2all performance with nvidia collective communication library 2.12","year":"0"},{"key":"ref35","article-title":"Mixture-of-experts with expert choice routing","author":"zhou","year":"2022"},{"key":"ref12","article-title":"Taming sparsely activated transformer with stochastic experts","author":"zuo","year":"2022","journal-title":"International Conference on Learning Representations"},{"key":"ref34","article-title":"Horovod: fast and easy distributed deep learning in TensorFlow","author":"sergeev","year":"2018"},{"key":"ref15","article-title":"DeepSpeed-MoE: Advancing mixture-of-experts inference and training to power next-generation ai scale","author":"rajbhandari","year":"2022"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM.2019.8737367"},{"key":"ref14","article-title":"HetuMoE: An efficient trillion-scale mixture-of-expert distributed training system","author":"nie","year":"2022"},{"key":"ref36","first-page":"13 782","article-title":"Gating dropout: Communication-efficient regularization for sparsely activated transformers","author":"liu","year":"2022","journal-title":"International Conference on Learning Representations"},{"key":"ref31","article-title":"RoBERTa: A robustly optimized BERT pretraining approach","author":"liu","year":"2019"},{"key":"ref30","article-title":"Attention is all you need","volume":"30","author":"vaswani","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref11","first-page":"6265","article-title":"BASE layers: Simplifying training of large, sparse models","author":"lewis","year":"2021","journal-title":"International Conference on Machine Learning"},{"key":"ref33","first-page":"181","article-title":"Poseidon: An efficient communication architecture for distributed deep learning on GPU clusters","author":"zhang","year":"2017","journal-title":"2017 USENIX Annual Technical Conference (USENIX ATC 17)"},{"key":"ref10","article-title":"Tutel: Adaptive mixture-of-experts at scale","author":"hwang","year":"2022"},{"key":"ref32","article-title":"Pointer sentinel mixture models","author":"stephen","year":"2017","journal-title":"International Conference on Learning Representations"},{"key":"ref2","article-title":"Palm: Scaling language modeling with pathways","author":"chowdhery","year":"2022","journal-title":"Proceedings of Machine Learning and Systems 2022"},{"key":"ref1","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref17","first-page":"1223","article-title":"Large scale distributed deep networks","author":"dean","year":"2012","journal-title":"Advances in neural information processing systems"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM41043.2020.9155446"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/SC41404.2022.00051"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/3341301.3359642"},{"key":"ref19","article-title":"Large batch optimization for deep learning: Training BERT in 76 minutes","author":"you","year":"2020","journal-title":"International Conference on Learning Representations"},{"key":"ref18","article-title":"Highly scalable deep learning training system with mixed-precision: Training ImageNet in four minutes","author":"jia","year":"2018","journal-title":"Proc of Workshop on Systems for ML and Open Source Software collocated with NeurIPS 2018"},{"key":"ref24","article-title":"SE-MoE: A scalable and efficient mixture-of-experts distributed training and inference system","author":"shen","year":"2022"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/SC.2000.10024"},{"key":"ref26","author":"grama","year":"2003","journal-title":"Introduction to Parallel Computing"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/0167-8191(94)00110-V"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CGO53902.2022.9741270"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM48880.2022.9796787"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/71.642949"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/505202.505215"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-4009"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/b98874"},{"key":"ref29","article-title":"Pytorch: An imperative style, high-performance deep learning library","volume":"32","author":"paszke","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3503221.3508417"},{"key":"ref7","first-page":"8583","article-title":"Scaling vision with sparse mixture of experts","volume":"34","author":"riquelme","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3503221.3508418"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1991.3.1.79"},{"key":"ref3","first-page":"1","article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","volume":"23","author":"fedus","year":"2022","journal-title":"Journal of Machine Learning Research"},{"key":"ref6","article-title":"GShard: Scaling giant models with conditional computation and automatic sharding","author":"lepikhin","year":"2021","journal-title":"International Conference on Learning Representations"},{"key":"ref5","article-title":"Outrageously large neural networks: The sparsely-gated mixture-of-experts layer","author":"shazeer","year":"2017","journal-title":"International Conference on Learning Representations"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM42981.2021.9488803"}],"event":{"name":"IEEE INFOCOM 2023 - IEEE Conference on Computer Communications","location":"New York City, NY, USA","start":{"date-parts":[[2023,5,17]]},"end":{"date-parts":[[2023,5,20]]}},"container-title":["IEEE INFOCOM 2023 - IEEE Conference on Computer Communications"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10228851\/10228852\/10228874.pdf?arnumber=10228874","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,18]],"date-time":"2023-09-18T13:44:39Z","timestamp":1695044679000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10228874\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,5,17]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/infocom53939.2023.10228874","relation":{},"subject":[],"published":{"date-parts":[[2023,5,17]]}}}