{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T18:14:54Z","timestamp":1773857694593,"version":"3.50.1"},"reference-count":39,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,1]],"date-time":"2025-04-01T00:00:00Z","timestamp":1743465600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Parallel Distrib. Syst."],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1109\/tpds.2025.3539297","type":"journal-article","created":{"date-parts":[[2025,2,6]],"date-time":"2025-02-06T18:47:06Z","timestamp":1738867626000},"page":"677-688","source":"Crossref","is-referenced-by-count":6,"title":["EfficientMoE: Optimizing Mixture-of-Experts Model Training With Adaptive Load Balance"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2026-417X","authenticated-orcid":false,"given":"Yan","family":"Zeng","sequence":"first","affiliation":[{"name":"Hangzhou Dianzi University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-3966-6143","authenticated-orcid":false,"given":"Chengchuang","family":"Huang","sequence":"additional","affiliation":[{"name":"Hangzhou Dianzi University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-6581-1028","authenticated-orcid":false,"given":"Yipeng","family":"Mei","sequence":"additional","affiliation":[{"name":"Hangzhou Dianzi University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-1856-5760","authenticated-orcid":false,"given":"Lifu","family":"Zhang","sequence":"additional","affiliation":[{"name":"Hangzhou Dianzi University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-9517-2845","authenticated-orcid":false,"given":"Teng","family":"Su","sequence":"additional","affiliation":[{"name":"Huawei Technologies Co Ltd, Distributed Computing Lab, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-7969-179X","authenticated-orcid":false,"given":"Wei","family":"Ye","sequence":"additional","affiliation":[{"name":"Hangzhou Dianzi University, Hangzhou, China"}]},{"given":"Wenqi","family":"Shi","sequence":"additional","affiliation":[{"name":"Huawei Technologies Co Ltd, Distributed Computing Lab, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2995-9832","authenticated-orcid":false,"given":"Shengnan","family":"Wang","sequence":"additional","affiliation":[{"name":"Huawei Technologies Co Ltd, Distributed Computing Lab, Shenzhen, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref2","article-title":"Megatron-LM: Training multi-billion parameter language models using model parallelism","author":"Shoeybi","year":"2019"},{"key":"ref3","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018"},{"key":"ref4","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020"},{"key":"ref5","first-page":"4055","article-title":"Image transformer","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Parmar"},{"key":"ref6","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Brown"},{"key":"ref7","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref8","article-title":"M6: A chinese multimodal pretrainer","author":"Lin","year":"2021"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1991.3.1.79"},{"issue":"120","key":"ref10","first-page":"1","article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","volume":"23","author":"Fedus","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref11","doi-asserted-by":"crossref","DOI":"10.18653\/v1\/2024.acl-long.70","article-title":"DeepSeekMoE: Towards ultimate expert specialization in mixture-of-experts language models","author":"Dai","year":"2024"},{"key":"ref12","article-title":"Mixtral of experts","author":"Jiang","year":"2024"},{"key":"ref13","article-title":"GPT-4 technical report","author":"Achiam","year":"2023"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3503221.3508418"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/IPDPS57955.2024.00086"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.5555\/3454287.3455008"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/MM.2019.2935967"},{"key":"ref18","article-title":"Large batch training of convolutional networks with layer-wise adaptive rate scaling","author":"Ginsburg","year":"2018"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.48550\/arxiv.1811.06965"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3341301.3359646"},{"key":"ref21","first-page":"10435","article-title":"Mesh-tensorflow: Deep learning for supercomputers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Shazeer"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/SC41405.2020.00024"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/3605573.3605613"},{"key":"ref24","article-title":"GShard: Scaling giant models with conditional computation and automatic sharding","author":"Lepikhin","year":"2020"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3603269.3604869"},{"key":"ref26","first-page":"945","article-title":"Accelerating distributed MoE training and inference with lina","volume-title":"Proc. USENIX Annu. Tech. Conf.","author":"Li"},{"key":"ref27","first-page":"288","article-title":"MegaBlocks: Efficient sparse training with mixture-of-experts","volume-title":"Proc. Mach. Learn. Syst.","volume":"5","author":"Gale"},{"key":"ref28","first-page":"269","article-title":"Tutel: Adaptive mixture-of-experts at scale","volume-title":"Proc. Mach. Learn. Syst.","volume":"5","author":"Hwang"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2021.3132413"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA51647.2021.00071"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/2038558.2038571"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.98"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P16-1144"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.21236\/ADA273556"},{"key":"ref35","article-title":"Benchmarking TPU, GPU, and CPU platforms for deep learning","author":"Wang","year":"2019"},{"key":"ref36","article-title":"Scaling language models: Methods, analysis & insights from training gopher","author":"Rae","year":"2021"},{"key":"ref37","article-title":"Outrageously large neural networks: The sparsely-gated mixture-of-experts layer","author":"Shazeer","year":"2017"},{"key":"ref38","first-page":"18332","article-title":"DeepSpeed-MoE: Advancing mixture-of-experts inference and training to power next-generation AI scale","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rajbhandari"},{"key":"ref39","first-page":"6265","article-title":"Base layers: Simplifying training of large, sparse models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Lewis"}],"container-title":["IEEE Transactions on Parallel and Distributed Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/71\/10908515\/10876795.pdf?arnumber=10876795","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,3]],"date-time":"2025-03-03T18:39:48Z","timestamp":1741027188000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10876795\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4]]},"references-count":39,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tpds.2025.3539297","relation":{},"ISSN":["1045-9219","1558-2183","2161-9883"],"issn-type":[{"value":"1045-9219","type":"print"},{"value":"1558-2183","type":"electronic"},{"value":"2161-9883","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,4]]}}}