{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:06:26Z","timestamp":1775228786478,"version":"3.50.1"},"reference-count":94,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Science and Technology Major Project of China","award":["2022ZD0119103"],"award-info":[{"award-number":["2022ZD0119103"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62325201"],"award-info":[{"award-number":["62325201"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62102045"],"award-info":[{"award-number":["62102045"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Mobile Comput."],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1109\/tmc.2024.3442430","type":"journal-article","created":{"date-parts":[[2024,8,13]],"date-time":"2024-08-13T17:44:46Z","timestamp":1723571086000},"page":"14344-14360","source":"Crossref","is-referenced-by-count":5,"title":["Efficient, Scalable, and Sustainable DNN Training on SoC-Clustered Edge Servers"],"prefix":"10.1109","volume":"23","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6271-6993","authenticated-orcid":false,"given":"Mengwei","family":"Xu","sequence":"first","affiliation":[{"name":"State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6775-0688","authenticated-orcid":false,"given":"Daliang","family":"Xu","sequence":"additional","affiliation":[{"name":"Key Laboratory of High Confidence Software Technologies, Ministry of Education, School of Computer Science, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-1994-9947","authenticated-orcid":false,"given":"Chiheng","family":"Lou","sequence":"additional","affiliation":[{"name":"Key Laboratory of High Confidence Software Technologies, Ministry of Education, School of Computer Science, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0779-8310","authenticated-orcid":false,"given":"Li","family":"Zhang","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Networking and Switching Technology, Beijing University of Posts and Telecommunications, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4686-3181","authenticated-orcid":false,"given":"Gang","family":"Huang","sequence":"additional","affiliation":[{"name":"Key Laboratory of High Confidence Software Technologies, Ministry of Education, School of Computer Science, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8741-5847","authenticated-orcid":false,"given":"Xin","family":"Jin","sequence":"additional","affiliation":[{"name":"Key Laboratory of High Confidence Software Technologies, Ministry of Education, School of Computer Science, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7908-8484","authenticated-orcid":false,"given":"Xuanzhe","family":"Liu","sequence":"additional","affiliation":[{"name":"Key Laboratory of High Confidence Software Technologies, Ministry of Education, School of Computer Science, Peking University, Beijing, China"}]}],"member":"263","reference":[{"key":"ref2","first-page":"285","article-title":"More is different: Prototyping and analyzing a new form of edge server with massive mobile SoCs","volume-title":"Proc. USENIX Annu. Tech. Conf.","author":"Zhang"},{"key":"ref3","first-page":"339","article-title":"High-density mobile cloud gaming on edge SoC farms","volume-title":"Proc. USENIX Annu. Tech. Conf.","author":"Zhang"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3487552.3487815"},{"key":"ref8","first-page":"374","article-title":"Towards federated learning at scale: System design","volume-title":"Proc. Mach. Learn. Syst.","volume":"1","author":"Bonawitz","year":"2019"},{"key":"ref9","article-title":"Federated learning for mobile keyboard prediction","author":"Hard","year":"2018"},{"key":"ref10","article-title":"Federated learning: Strategies for improving communication efficiency","author":"Kone\u010dny","year":"2016"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3498361.3538928"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3495243.3560545"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3287075"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3442381.3449942"},{"key":"ref15","article-title":"Vertical semi-federated learning for efficient online advertising","author":"Li","year":"2022"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.ins.2022.04.027"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/2668332.2668349"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3081333.3081360"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3498361.3538932"},{"key":"ref20","article-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2014"},{"key":"ref21","first-page":"463","article-title":"A unified architecture for accelerating distributed DNN training in heterogeneous GPU\/CPU clusters","volume-title":"Proc. 14th USENIX Symp. Operating Syst. Des. Implementation","author":"Jiang"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.14778\/3415478.3415530"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/3341301.3359646"},{"key":"ref24","article-title":"Horovod: Fast and easy distributed deep learning in tensorflow","author":"Sergeev","year":"2018"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3469116.3470009"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/PerComWorkshops53856.2022.9767442"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3485447.3512148"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1145\/3503222.3507713"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref30","article-title":"Hierarchical federated learning with privacy","author":"Chandrasekaran","year":"2022"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TNSE.2021.3053588"},{"key":"ref32","article-title":"MNN: A universal and efficient inference engine","author":"Jiang","year":"2020"},{"key":"ref33","first-page":"265","article-title":"TensorFlow: A system for large-scale machine learning","volume-title":"Proc. 12th USENIX Symp. Operating Syst. Des. Implementation","author":"Abadi"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3477132.3483553"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/BigData55660.2022.10021119"},{"key":"ref36","first-page":"19","article-title":"Communication efficient distributed machine learning with the parameter server","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Li"},{"key":"ref37","first-page":"1273","article-title":"Communication-efficient learning of deep networks from decentralized data","volume-title":"Proc. Int. Conf. Artif. Intell. Statist.","author":"McMahan"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/3575693.3575712"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/SEC54971.2022.00024"},{"key":"ref40","article-title":"Sharing a GPU between MPI processes: Multiprocess service(MPS)","author":"Lite","year":"2019"},{"key":"ref41","first-page":"533","article-title":"AntMan: Dynamic scaling on GPU clusters for deep learning","volume-title":"Proc. 14th USENIX Symp. Operating Syst. Des. Implementation","author":"Xiao"},{"key":"ref42","first-page":"217","article-title":"PilotFish: Harvesting free cycles of cloud gaming with deep learning training","volume-title":"Proc. USENIX Annu. Tech. Conf.","author":"Zhang"},{"key":"ref43","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2022.3149787"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/MLHPC.2018.8638639"},{"key":"ref48","article-title":"Shuffle-exchange brings faster: Reduce the idle time during communication for decentralized neural network training","author":"Yang","year":"2020"},{"key":"ref49","article-title":"AutoFedNLP: An efficient FedNLP framework","author":"Cai","year":"2022"},{"key":"ref50","first-page":"3043","article-title":"Asynchronous decentralized parallel stochastic gradient descent","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Lian"},{"key":"ref51","first-page":"693","article-title":"HOGWILD!: A lock-free approach to parallelizing stochastic gradient descent","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Recht"},{"key":"ref52","article-title":"Accurate, large minibatch SGD: Training ImageNet in 1 hour","author":"Goyal","year":"2017"},{"key":"ref53","first-page":"1729","article-title":"Train longer, generalize better: Closing the generalization gap in large batch training of neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Hoffer"},{"key":"ref54","article-title":"The limit of the batch size","author":"You","year":"2020"},{"key":"ref55","article-title":"The step decay schedule: A near optimal, geometrically decaying learning rate procedure for least squares","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ge"},{"key":"ref56","article-title":"How does learning rate decay help modern neural networks?","author":"You","year":"2019"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4613-0303-9_16"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1006\/jagm.1998.0938"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1145\/3065386"},{"key":"ref61","article-title":"Scaling distributed machine learning with system and algorithm co-design","author":"Li","year":"2017"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.5555\/3454287.3455008"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00204"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijthermalsci.2022.107951"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1016\/j.egypro.2017.12.215"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/26.2790"},{"key":"ref67","article-title":"Lenet-5, convolutional neural networks","author":"LeCun","year":"2015"},{"key":"ref68","article-title":"LEAF: A benchmark for federated settings","author":"Caldas","year":"2018"},{"key":"ref69","article-title":"Fashion-MNIST: A novel image dataset for benchmarking machine learning algorithms","author":"Xiao","year":"2017"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.425"},{"key":"ref71","article-title":"MobileNets: Efficient convolutional neural networks for mobile vision applications","author":"Howard","year":"2017"},{"key":"ref72","article-title":"CINIC-10 is not ImageNet or CIFAR-10","author":"Darlow","year":"2018"},{"key":"ref74","article-title":"Deep gradient compression: Reducing the communication bandwidth for distributed training","author":"Lin"},{"key":"ref75","article-title":"GPT3-to-plan: Extracting plans from text using GPT-3","author":"Olmo","year":"2021"},{"key":"ref81","article-title":"A survey of resource-efficient LLM and multimodal foundation models","author":"Xu","year":"2024"},{"key":"ref82","article-title":"EdgeMoE: Fast on-device inference of MoE-based large language models","author":"Yi","year":"2023"},{"key":"ref83","article-title":"The cap principle for LLM serving","author":"Zeng","year":"2024"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1145\/3636534.3649361"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1145\/3570361.3592505"},{"key":"ref86","first-page":"579","article-title":"FwdLLM: Efficient federated finetuning of large language models with perturbed inferences","volume-title":"Proc. USENIX Annu. Tech. Conf.","author":"Xu"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2019.2962435"},{"key":"ref88","first-page":"5151","article-title":"Scalable methods for 8-bit training of neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Banner"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2017.7966159"},{"key":"ref90","first-page":"3123","article-title":"BinaryConnect: Training deep neural networks with binary weights during propagations","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Courbariaux"},{"key":"ref91","first-page":"1223","article-title":"Large scale distributed deep networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Dean"},{"key":"ref92","first-page":"1","article-title":"Beyond data and model parallelism for deep neural networks.","volume-title":"Proc. Mach. Learn. Syst.","volume":"1","author":"Jia","year":"2019"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1145\/3492321.3519584"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.48550\/arxiv.1811.06965"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1109\/TC.2021.3054656"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1145\/3337821.3337873"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1145\/3419111.3421307"},{"issue":"76","key":"ref98","first-page":"1","article-title":"GADMM: Fast and communication efficient framework for distributed machine learning","volume":"21","author":"Elgabli","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref99","first-page":"418","article-title":"TicTac: Accelerating distributed deep learning with communication scheduling","volume-title":"Proc. Mach. Learn. Syst.","volume":"1","author":"Hashemi","year":"2019"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1145\/3503222.3507778"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/ICDCS.2019.00150"},{"key":"ref102","first-page":"1223","article-title":"More effective distributed ML via a stale synchronous parallel parameter server","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ho"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1145\/3320060"},{"key":"ref104","first-page":"713","article-title":"On convergence of model parallel proximal gradient algorithm for stale synchronous parallel system","volume-title":"Proc. Int. Conf. Artif. Intell. Statist.","author":"Zhou"},{"key":"ref105","article-title":"Hierarchical federated learning through LAN-WAN orchestration","author":"Yuan","year":"2020"},{"key":"ref106","article-title":"Scalable smartphone cluster for deep learning","author":"Na","year":"2021"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.1145\/3575693.3575710"}],"container-title":["IEEE Transactions on Mobile Computing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7755\/10746253\/10634823.pdf?arnumber=10634823","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:24:24Z","timestamp":1732667064000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10634823\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12]]},"references-count":94,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tmc.2024.3442430","relation":{},"ISSN":["1536-1233","1558-0660","2161-9875"],"issn-type":[{"value":"1536-1233","type":"print"},{"value":"1558-0660","type":"electronic"},{"value":"2161-9875","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12]]}}}