{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T17:01:37Z","timestamp":1775667697033,"version":"3.50.1"},"reference-count":58,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Hong Kong Research Grants Council"},{"name":"AoE\/E-601\/22-R"},{"name":"NSFC\/RGC Collaborative Research Scheme","award":["CRS_HKUST603\/22"],"award-info":[{"award-number":["CRS_HKUST603\/22"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Mobile Comput."],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1109\/tmc.2024.3461852","type":"journal-article","created":{"date-parts":[[2024,9,17]],"date-time":"2024-09-17T18:55:50Z","timestamp":1726599350000},"page":"435-448","source":"Crossref","is-referenced-by-count":9,"title":["Achieving Linear Speedup in Asynchronous Federated Learning With Heterogeneous Clients"],"prefix":"10.1109","volume":"24","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5267-3464","authenticated-orcid":false,"given":"Xiaolu","family":"Wang","sequence":"first","affiliation":[{"name":"Software Engineering Institute, East China Normal University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5094-3697","authenticated-orcid":false,"given":"Zijian","family":"Li","sequence":"additional","affiliation":[{"name":"Department of Electronic and Computer Engineering, The Hong Kong University of Science and Technology, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0271-6021","authenticated-orcid":false,"given":"Shi","family":"Jin","sequence":"additional","affiliation":[{"name":"National Mobile Communications Research Laboratory, Southeast University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5222-1898","authenticated-orcid":false,"given":"Jun","family":"Zhang","sequence":"additional","affiliation":[{"name":"Department of Electronic and Computer Engineering, The Hong Kong University of Science and Technology, Hong Kong"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1561\/2200000083"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.seta.2022.102987"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.3390\/s22020450"},{"key":"ref4","volume-title":"Parallel and Distributed Computation: Numerical Methods","author":"Bertsekas","year":"2015"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2020.2975749"},{"key":"ref6","first-page":"1273","article-title":"Communication-efficient learning of deep networks from decentralized data","volume-title":"Proc. 20th Int. Conf. Artif. Intell. Statist.","author":"McMahan"},{"key":"ref7","article-title":"Local SGD converges fast and communicates little","volume-title":"Proc. 7th Int. Conf. Learn. Representations","author":"Stich"},{"key":"ref8","article-title":"Achieving linear speedup with partial worker participation in non-IID federated learning","volume-title":"Proc. 9th Int. Conf. Learn. Representations","author":"Yang"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2022.3162147"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2020.3026619"},{"key":"ref11","first-page":"11082","article-title":"Local SGD with periodic averaging: Tighter analysis and adaptive synchronization","volume-title":"Proc. 32nd Adv. Neural. Inf. Process. Syst.","author":"Haddadpour"},{"issue":"1","key":"ref12","first-page":"9709","article-title":"Cooperative SGD: A unified framework for the design and analysis of local-update SGD algorithms","volume":"22","author":"Wang","year":"2021","journal-title":"J. Mach. Learn. Res."},{"issue":"1","key":"ref13","first-page":"9613","article-title":"The error-feedback framework: Better rates for SGD with delayed gradients and compressed updates","volume":"21","author":"Stich","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref14","first-page":"4519","article-title":"Tighter theory for local SGD on identical and heterogeneous data","volume-title":"Proc. 23rd Int. Conf. Artif. Intell. Statist.","author":"Khaled"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/447"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33015693"},{"key":"ref17","first-page":"7184","article-title":"On the linear speedup analysis of communication efficient momentum SGD for distributed non-convex optimization","volume-title":"Proc. 36th Int. Conf. Mach. Learn.","author":"Yu"},{"key":"ref18","first-page":"5132","article-title":"SCAFFOLD: Stochastic controlled averaging for federated learning","volume-title":"Proc. 37th Int. Conf. Mach. Learn.","author":"Karimireddy"},{"key":"ref19","first-page":"12 052","article-title":"Fast federated learning in the presence of arbitrary device unavailability","volume-title":"Proc. 34th Adv. Neural. Inf. Process. Syst.","author":"Gu"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CDC49753.2023.10383924"},{"key":"ref21","first-page":"873","article-title":"Distributed delayed stochastic optimization","volume-title":"Proc. 24th Adv. Neural. Inf. Process. Syst.","author":"Agarwal"},{"key":"ref22","first-page":"2737","article-title":"Asynchronous parallel stochastic gradient for nonconvex optimization","volume-title":"Proc. 28th Adv. Neural. Inf. Process. Syst.","author":"Lian"},{"key":"ref23","first-page":"803","article-title":"Slow and stale gradients can win the race: Error-runtime trade-offs in distributed SGD","volume-title":"Proc. 27th Int. Joint Conf. Artif. Intell.","author":"Dutta"},{"key":"ref24","first-page":"111","article-title":"A tight convergence analysis for stochastic gradient descent with delayed updates","volume-title":"Proc. 31st Int. Conf. Algorithm Learn. Theory","author":"Arjevani"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1137\/1.9781611976700.50"},{"key":"ref26","first-page":"17 202","article-title":"Sharper convergence guarantees for asynchronous SGD for distributed and federated learning","volume-title":"Proc. 35th Adv. Neural. Inf. Process. Syst.","author":"Koloskova"},{"key":"ref27","first-page":"3581","article-title":"Federated learning with buffered asynchronous aggregation","volume-title":"Proc. 25th Int. Conf. Artif. Intell. Statist.","author":"Nguyen"},{"key":"ref28","article-title":"Tackling the data heterogeneity in asynchronous federated learning with cached update calibration","volume-title":"Proc. 12th Int. Conf. Learn. Representations","author":"Wang"},{"key":"ref29","article-title":"Asynchronous federated optimization","author":"Xie","year":"2019"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/BigData50022.2020.9378161"},{"issue":"110","key":"ref31","first-page":"1","article-title":"A general theory for federated optimization with asynchronous and heterogeneous clients updates","volume":"24","author":"Fraboni","year":"2023","journal-title":"J. Mach. Learn. Res."},{"key":"ref32","article-title":"QuAFL: Federated averaging can be both asynchronous and communication-efficient","author":"Zakerinia","year":"2022"},{"key":"ref33","article-title":"FAVANO: Federated averaging with asynchronous nodes","author":"Leconte","year":"2023"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2021.3118435"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TNSM.2023.3252818"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1137\/S0363012993250220"},{"key":"ref37","first-page":"2595","article-title":"Parallelized stochastic gradient descent","volume-title":"Proc. 23rd Adv. Neural. Inf. Process. Syst.","author":"Zinkevich"},{"key":"ref38","first-page":"429","article-title":"Federated optimization in heterogeneous networks","volume-title":"Proc. Mach. Learn. Syst.","author":"Li"},{"key":"ref39","first-page":"7611","article-title":"Tackling the objective inconsistency problem in heterogeneous federated optimization","volume-title":"Proc. 33rd Adv. Neural. Inf. Process. Syst.","author":"Wang"},{"key":"ref40","article-title":"On the convergence of FedAvg on non-IID data","volume-title":"Proc. 7th Int. Conf. Learn. Representations","author":"Li"},{"key":"ref41","first-page":"425","article-title":"Federated learning under arbitrary communication patterns","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","author":"Avdiukhin"},{"key":"ref42","first-page":"41 399","article-title":"No one idles: Efficient heterogeneous federated learning with parallel edge and server computation","volume-title":"Proc. 40th Int. Conf. Mach. Learn.","author":"Zhang"},{"key":"ref43","article-title":"Fashion-MNIST: A novel image dataset for benchmarking machine learning algorithms","author":"Xiao","year":"2017"},{"key":"ref44","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"key":"ref45","article-title":"Client selection in federated learning: Convergence analysis and power-of-choice selection strategies","author":"Cho","year":"2020"},{"key":"ref46","article-title":"Federated optimization: Distributed machine learning for on-device intelligence","author":"Kone\u010dny","year":"2016"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2904348"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/ICC.2019.8761315"},{"key":"ref49","article-title":"Federated learning with non-IID data","author":"Zhao","year":"2018"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2022.3190512"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/tmc.2023.3338021"},{"key":"ref52","first-page":"10 533","article-title":"DReS-FL: Dropout-resilient secure federated learning for non-IID clients via secret data sharing","volume-title":"Proc. 35th Adv. Neural. Inf. Process. Syst.","author":"Shao"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2023.3334732"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1038\/s41467-023-44383-9"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2023.3336277"},{"key":"ref56","first-page":"7252","article-title":"Bayesian nonparametric federated learning of neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Yurochkin"},{"key":"ref57","article-title":"Measuring the effects of non-identical data distribution for federated visual classification","author":"Hsu","year":"2019"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICDE53745.2022.00077"}],"container-title":["IEEE Transactions on Mobile Computing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7755\/10777875\/10681663.pdf?arnumber=10681663","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,15]],"date-time":"2025-01-15T20:26:36Z","timestamp":1736972796000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10681663\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,1]]},"references-count":58,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/tmc.2024.3461852","relation":{},"ISSN":["1536-1233","1558-0660","2161-9875"],"issn-type":[{"value":"1536-1233","type":"print"},{"value":"1558-0660","type":"electronic"},{"value":"2161-9875","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,1]]}}}