{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T06:00:15Z","timestamp":1770357615300,"version":"3.49.0"},"reference-count":49,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100008982","name":"National Science Foundation","doi-asserted-by":"publisher","award":["2047177"],"award-info":[{"award-number":["2047177"]}],"id":[{"id":"10.13039\/501100008982","id-type":"DOI","asserted-by":"publisher"}]},{"name":"RPI-IBM Artificial Intelligence Research Collaboration"},{"name":"ONR","award":["N000141712162"],"award-info":[{"award-number":["N000141712162"]}]},{"name":"AFOSR MURI","award":["FA9550-18-1-0502"],"award-info":[{"award-number":["FA9550-18-1-0502"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Signal Process."],"published-print":{"date-parts":[[2021]]},"DOI":"10.1109\/tsp.2021.3099977","type":"journal-article","created":{"date-parts":[[2021,7,27]],"date-time":"2021-07-27T20:11:01Z","timestamp":1627416661000},"page":"4637-4651","source":"Crossref","is-referenced-by-count":20,"title":["Communication-Adaptive Stochastic Gradient Methods for Distributed Learning"],"prefix":"10.1109","volume":"69","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3477-1439","authenticated-orcid":false,"given":"Tianyi","family":"Chen","sequence":"first","affiliation":[]},{"given":"Yuejiao","family":"Sun","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6697-9731","authenticated-orcid":false,"given":"Wotao","family":"Yin","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","first-page":"362","article-title":"DiSCO: Distributed optimization for self-concordant empirical loss","author":"zhang","year":"2015","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref38","first-page":"1000","article-title":"Communication-efficient distributed optimization using an approximate Newton-type method","author":"shamir","year":"2014","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref33","article-title":"Improving the sample and communication complexity for decentralized non-convex optimization: A joint gradient estimation and tracking approach","author":"sun","year":"0"},{"key":"ref32","article-title":"Federated optimization in heterogeneous networks","author":"li","year":"0"},{"key":"ref31","first-page":"393","article-title":"Efficient decentralized deep learning by dynamic model averaging","author":"kamp","year":"2018","journal-title":"Proc Eur Conf Mach Learn Knowl Discovery Databases"},{"key":"ref30","article-title":"SlowMo: Improving communication-efficient distributed SGD with slow momentum","author":"wang","year":"2020","journal-title":"Proc Intl Conf Learn Representations"},{"key":"ref37","article-title":"Communication-efficient distributed strongly convex stochastic optimization: non-asymptotic rates","author":"sahu","year":"2018"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2020.2983167"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2019.2907258"},{"key":"ref34","first-page":"1662","article-title":"Communication-efficient distributed optimization in networks with gradient tracking and variance reduction","author":"li","year":"2020","journal-title":"Proc Int Conf Artif Intell Stat"},{"key":"ref28","first-page":"5132","article-title":"SCAFFOLD: Stochastic controlled averaging for on-device federated learning","author":"karimireddy","year":"2020","journal-title":"Proc IEEE Intern Conf on Machine Learning"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33015693"},{"key":"ref29","first-page":"11080","article-title":"Local sgd with periodic averaging: Tighter analysis and adaptive synchronization","author":"haddadpour","year":"2019","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2008.2009515"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1214\/aoms\/1177729586"},{"key":"ref20","first-page":"5973","article-title":"The convergence of sparsified gradient methods","author":"alistarh","year":"2018","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref22","first-page":"9850","article-title":"Atomo: Communication-efficient learning via atomic sparsification","author":"wang","year":"2018","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref21","first-page":"1299","article-title":"Gradient sparsification for communication-efficient distributed optimization","author":"wangni","year":"2018","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref24","first-page":"685","article-title":"Deep learning with elastic averaging SGD","author":"zhang","year":"2015","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref23","author":"peterson","year":"2007","journal-title":"Computer Networks A Systems Approach"},{"key":"ref26","article-title":"Cooperative SGD: A unified framework for the design and analysis of communication-efficient SGD algorithms","author":"wang","year":"2019","journal-title":"Proc Int Conf Mach Learn Workshop Coding Theory Large-Scale ML"},{"key":"ref25","article-title":"Local SGD converges fast and communicates little","author":"stich","year":"2019","journal-title":"Proc Intl Conf Learn Representations"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2011.2171686"},{"key":"ref11","article-title":"1-bit stochastic gradient descent and its application to data-parallel distributed training of speech DNNs","author":"seide","year":"2014","journal-title":"Proc Conf Int Speech Commun Assoc"},{"key":"ref40","first-page":"3068","article-title":"Communication-efficient distributed dual coordinate ascent","author":"jaggi","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref12","first-page":"559","article-title":"SignSGD: Compressed optimisation for non-convex problems","author":"bernstein","year":"2018","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref13","first-page":"1709","article-title":"QSGD: Communication-efficient SGD via gradient quantization and encoding","author":"alistarh","year":"2017","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref14","first-page":"5325","article-title":"Error compensated quantized SGD and its applications to large-scale distributed optimization","author":"wu","year":"2018","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref15","first-page":"4035","article-title":"Zipml: Training linear models with end-to-end low precision, and a little bit of deep learning","author":"zhang","year":"2017","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref16","first-page":"1509","article-title":"Terngrad: Ternary gradients to reduce communication in distributed deep learning","author":"wen","year":"2017","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1045"},{"key":"ref18","article-title":"Deep gradient compression: Reducing the communication bandwidth for distributed training","author":"lin","year":"2018","journal-title":"Proc Intl Conf Learn Representations"},{"key":"ref19","first-page":"4447","article-title":"Sparsified SGD with memory","author":"stich","year":"2018","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref4","first-page":"1223","article-title":"Large scale distributed deep networks","author":"dean","year":"2012","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-41589-5_14"},{"key":"ref6","first-page":"1273","article-title":"Communication-efficient learning of deep networks from decentralized data","author":"mcmahan","year":"2017","journal-title":"Proc Int Conf Artif Intell Stat"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1137\/16M1080173"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1080\/01621459.2018.1429274"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"953","DOI":"10.1109\/JPROC.2018.2817461","article-title":"Network topology and communication-computation tradeoffs in decentralized optimization","volume":"106","author":"nedi?","year":"2018","journal-title":"Proc IEEE"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2020.3024629"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2005.843546"},{"key":"ref46","first-page":"795","article-title":"Linear convergence of gradient and proximal-gradient methods under the Polyak-?ojasiewicz condition","author":"karimi","year":"2016","journal-title":"Proc Eur Conf Mach Learn"},{"key":"ref45","first-page":"3054","article-title":"A comprehensive linear speedup analysis for asynchronous stochastic parallel optimization from zeroth-order to first-order","author":"lian","year":"2016","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref48","year":"1983","journal-title":"Problem Complexity and Method Efficiency in Optimization"},{"key":"ref47","article-title":"Making gradient descent optimal for strongly convex stochastic optimization","author":"rakhlin","year":"0"},{"key":"ref42","first-page":"3370","article-title":"Communication-efficient distributed learning via lazily aggregated quantized gradients","author":"sun","year":"2019","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref41","first-page":"5050","article-title":"LAG: Lazily aggregated gradient for communication-efficient distributed learning","author":"chen","year":"2018","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref44","first-page":"315","article-title":"Accelerating stochastic gradient descent using predictive variance reduction","author":"johnson","year":"2013","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1137\/120880811"}],"container-title":["IEEE Transactions on Signal Processing"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielam\/78\/9307529\/9497717-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/78\/9307529\/09497717.pdf?arnumber=9497717","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:50:36Z","timestamp":1652194236000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9497717\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"references-count":49,"URL":"https:\/\/doi.org\/10.1109\/tsp.2021.3099977","relation":{},"ISSN":["1053-587X","1941-0476"],"issn-type":[{"value":"1053-587X","type":"print"},{"value":"1941-0476","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021]]}}}