{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,13]],"date-time":"2025-12-13T07:14:13Z","timestamp":1765610053046,"version":"3.37.3"},"reference-count":125,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2020,11,1]],"date-time":"2020-11-01T00:00:00Z","timestamp":1604188800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,11,1]],"date-time":"2020-11-01T00:00:00Z","timestamp":1604188800000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,11,1]],"date-time":"2020-11-01T00:00:00Z","timestamp":1604188800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,11,1]],"date-time":"2020-11-01T00:00:00Z","timestamp":1604188800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["CCF-1845076","IIS-1838179"],"award-info":[{"award-number":["CCF-1845076","IIS-1838179"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000183","name":"Army Research Office","doi-asserted-by":"publisher","award":["W911NF-19-1-0027"],"award-info":[{"award-number":["W911NF-19-1-0027"]}],"id":[{"id":"10.13039\/100000183","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["CCF-1453073","CCF-1907658","OAC-1940074"],"award-info":[{"award-number":["CCF-1453073","CCF-1907658","OAC-1940074"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000183","name":"Army Research Office","doi-asserted-by":"publisher","award":["W911NF-17-1-0546"],"award-info":[{"award-number":["W911NF-17-1-0546"]}],"id":[{"id":"10.13039\/100000183","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000185","name":"DARPA Lagrange Program under ONR\/NIWC","doi-asserted-by":"publisher","award":["N660011824020"],"award-info":[{"award-number":["N660011824020"]}],"id":[{"id":"10.13039\/100000185","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["Proc. IEEE"],"published-print":{"date-parts":[[2020,11]]},"DOI":"10.1109\/jproc.2020.3021381","type":"journal-article","created":{"date-parts":[[2020,9,29]],"date-time":"2020-09-29T22:27:36Z","timestamp":1601418456000},"page":"1984-2012","source":"Crossref","is-referenced-by-count":17,"title":["Scaling-Up Distributed Processing of Data Streams for Machine Learning"],"prefix":"10.1109","volume":"108","author":[{"given":"Matthew","family":"Nokleby","sequence":"first","affiliation":[]},{"given":"Haroon","family":"Raja","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4406-5263","authenticated-orcid":false,"given":"Waheed U.","family":"Bajwa","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4939-1384-8_8"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-21551-3_7"},{"key":"ref33","article-title":"SlowMo: Improving communication-efficient distributed SGD with slow momentum","author":"wang","year":"2020","journal-title":"Proc Int Conf Learn Represent (ICLR)"},{"key":"ref32","first-page":"3368","article-title":"Gradient coding: Avoiding stragglers in distributed learning","author":"tandon","year":"2017","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref31","article-title":"Revisiting distributed synchronous SGD","author":"chen","year":"2016","journal-title":"Proc Int Conf Learni Represent Workshop Track"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM.2016.0074"},{"key":"ref37","first-page":"2422","article-title":"Path-SGD: Path-normalized optimization in deep neural networks","author":"neyshabur","year":"2015","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref36","first-page":"265","article-title":"On optimization methods for deep learning","author":"le","year":"2011","journal-title":"Proc 28th Int Conf Mach Learn (ICML)"},{"key":"ref35","article-title":"Stochastic convex optimization","author":"shalev-shwartz","year":"2009","journal-title":"Proc Conf Learn Theory (COLT)"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/72.788640"},{"key":"ref28","first-page":"693","article-title":"Hogwild!: A lock-free approach to parallelizing stochastic gradient descent","author":"recht","year":"2011","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-30218-6_19"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/Allerton.2012.6483403"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2020.2975749"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2020.2973345"},{"key":"ref21","article-title":"Advances and open problems in federated learning","author":"kairouz","year":"2019","journal-title":"arXiv 1912 04977"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/2640087.2644155"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2020.2974267"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2017.2648041"},{"article-title":"An introduction to the MPI standard","year":"1995","author":"dongarra","key":"ref26"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2016.2548989"},{"key":"ref25","first-page":"265","article-title":"TensorFlow: A system for large-scale machine learning","author":"abadi","year":"2016","journal-title":"Proc of USENIX Symp on Operating Systems Design and Implementation (OSDI)"},{"key":"ref50","first-page":"1","article-title":"Information-theoretic lower bounds on the oracle complexity of convex optimization","author":"agarwal","year":"2009","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref51","first-page":"2543","article-title":"Dual averaging methods for regularized stochastic learning and online optimization","volume":"11","author":"xiao","year":"2010","journal-title":"J Mach Learn Res"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1137\/0147010"},{"key":"ref58","article-title":"Fast stochastic methods for nonsmooth nonconvex optimization","author":"reddi","year":"2016","journal-title":"arXiv 1605 06900"},{"key":"ref57","first-page":"314","article-title":"Stochastic variance reduction for nonconvex optimization","author":"reddi","year":"2016","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1137\/120895731"},{"key":"ref55","first-page":"1833","article-title":"On graduated optimization for stochastic non-convex problems","author":"hazan","year":"2016","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref54","first-page":"797","article-title":"Escaping from saddle points&#x2014;Online stochastic gradient for tensor decomposition","author":"ge","year":"2015","journal-title":"Proc Conf Learn Theory (COLT)"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-015-0871-8"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1137\/120880811"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1214\/aoms\/1177729586"},{"key":"ref4","first-page":"315","article-title":"Accelerating stochastic gradient descent using predictive variance reduction","author":"johnson","year":"2013","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-010-0434-y"},{"journal-title":"Numerical Optimization","year":"2006","author":"nocedal","key":"ref6"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/2623330.2623612"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.1984.1103385"},{"key":"ref49","volume":"35","author":"kushner","year":"2003","journal-title":"Stochastic Approximation and Recursive Algorithms and Applications"},{"key":"ref7","article-title":"Federated optimization: Distributed machine learning for on-device intelligence","author":"kone?n\u00fd","year":"2016","journal-title":"arXiv 1610 02527"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.sysconle.2004.02.022"},{"article-title":"Efficient estimators from a slowly convergent Robbins-Monro process","year":"1988","author":"ruppert","key":"ref46"},{"key":"ref45","first-page":"98","article-title":"A new method of stochastic approximation type","volume":"5","author":"polyak","year":"1990","journal-title":"Avtomat i Telemekh"},{"key":"ref48","first-page":"543","article-title":"A method for solving the convex programming problem with convergence rate O(1\/k&#x00B2;)","volume":"269","author":"nesterov","year":"1983","journal-title":"Dokl Akad Nauk SSSR"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1137\/0330046"},{"key":"ref42","first-page":"257","article-title":"Convergence of stochastic gradient descent for PCA","author":"shamir","year":"2016","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref41","first-page":"451","article-title":"Non-asymptotic analysis of stochastic approximation algorithms for machine learning","author":"moulines","year":"2011","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref44","first-page":"728","article-title":"Competing with the empirical risk minimizer in a single pass","author":"frostig","year":"2015","journal-title":"Proc Conf Learning Theory (COLT)"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2017.2671377"},{"key":"ref125","first-page":"560","article-title":"signSGD: Compressed optimisation for non-convex problems","author":"bernstein","year":"2018","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref124","first-page":"1509","article-title":"TernGrad: Ternary gradients to reduce communication in distributed deep learning","author":"wen","year":"2017","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1145\/3188745.3188796"},{"key":"ref72","first-page":"12393","article-title":"Exponentially convergent stochastic k-PCA without variance reduction","author":"tang","year":"2019","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/FOCS.2017.51"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1016\/0041-5553(69)90135-9"},{"key":"ref76","first-page":"1225","article-title":"Train faster, generalize better: Stability of stochastic gradient descent","author":"hardt","year":"2016","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref77","first-page":"2121","article-title":"Adaptive subgradient methods for online learning and stochastic optimization","volume":"12","author":"duchi","year":"2011","journal-title":"J Mach Learn Res"},{"key":"ref74","article-title":"History PCA: A new algorithm for streaming PCA","author":"yang","year":"2018","journal-title":"arXiv 1802 05447"},{"key":"ref75","article-title":"Distributed stochastic algorithms for high-rate streaming principal component analysis","author":"raja","year":"2020","journal-title":"arXiv 2001 01017"},{"key":"ref78","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv 1412 6980"},{"key":"ref79","first-page":"1","article-title":"On the convergence of Adam and beyond","author":"reddi","year":"2018","journal-title":"Proc Int Conf Learn Represent (ICLR)"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1137\/0329055"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1137\/S1052623497319225"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1137\/S0363012995293670"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2008.917738"},{"key":"ref64","first-page":"1674","article-title":"Non-convex learning via stochastic gradient Langevin dynamics: A nonasymptotic analysis","author":"raginsky","year":"2017","journal-title":"Proc Conf Learn Theory (COLT)"},{"key":"ref65","first-page":"9671","article-title":"Global non-convex optimization with discretized diffusions","author":"erdogdu","year":"2018","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref66","article-title":"On learning rates and Schr&#x00F6;dinger operators","author":"shi","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref67","first-page":"3174","article-title":"The fast convergence of incremental PCA","author":"balsubramani","year":"2013","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref68","first-page":"1147","article-title":"Streaming PCA: Matching matrix Bernstein and near-optimal finite sample guarantees for Oja&#x2019;s algorithm","author":"jain","year":"2016","journal-title":"Proc Conf Learn Theory (COLT)"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-7908-2604-3_16"},{"key":"ref69","first-page":"699","article-title":"Variance reduction for faster non-convex optimization","author":"allen-zhu","year":"2016","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature14539"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1109\/TSIPN.2016.2620440"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1109\/LCSYS.2018.2834316"},{"key":"ref108","first-page":"165","article-title":"Optimal distributed online prediction using mini-batches","volume":"13","author":"dekel","year":"2012","journal-title":"J Mach Learn Res"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2017.2672698"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.1214\/18-AOS1713"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1137\/14096668X"},{"key":"ref106","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2015.2465300"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2020.2969721"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.1016\/j.sigpro.2014.03.037"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2016.2529285"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2011.2118742"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2014.2364096"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2018.2818081"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2017.2685559"},{"key":"ref111","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-012-0572-5"},{"key":"ref112","doi-asserted-by":"publisher","DOI":"10.1109\/ALLERTON.2014.7028543"},{"key":"ref110","doi-asserted-by":"publisher","DOI":"10.1109\/TSIPN.2018.2866320"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.1109\/GlobalSIP.2013.6736937"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2014.2304432"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2011.2161027"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2012.6426375"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2012.2198470"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.1986.1104412"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2008.2009515"},{"key":"ref13","first-page":"1","article-title":"MapReduce: Simplified data processing on large clusters","author":"dean","year":"2004","journal-title":"Proc 6th Conf Symp Operat Syst Des Implement (OSDI)"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2006.887293"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1137\/110837462"},{"key":"ref118","article-title":"Improving the sample and communication complexity for decentralized non-convex optimization: A joint gradient estimation and tracking approach","author":"sun","year":"2019","journal-title":"arXiv 1910 05857"},{"key":"ref16","volume":"3","author":"boyd","year":"2011","journal-title":"Distributed optimization and statistical learning via the alternating direction method of multipliers"},{"key":"ref82","first-page":"2680","article-title":"Natasha 2: Faster non-convex optimization than SGD","author":"allen-zhu","year":"2018","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref117","first-page":"1662","article-title":"Communication-efficient distributed optimization in networks with gradient tracking and variance reduction","author":"li","year":"2020","journal-title":"Proc Int Conf Artif Intell Statist (AISTATS)"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1561\/9781601988515"},{"key":"ref81","first-page":"89","article-title":"Natasha: Faster non-convex stochastic optimization via strongly non-convex parameter","author":"allen-zhu","year":"2017","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-97142-1_1"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1162\/089976699300016304"},{"key":"ref119","first-page":"344","article-title":"Stochastic gradient push for distributed deep learning","author":"assran","year":"2018","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref19","doi-asserted-by":"crossref","first-page":"953","DOI":"10.1109\/JPROC.2018.2817461","article-title":"Network topology and communication-computation tradeoffs in decentralized optimization","volume":"106","author":"nedi?","year":"2018","journal-title":"Proc IEEE"},{"key":"ref83","first-page":"463","article-title":"Rademacher and Gaussian complexities: Risk bounds and structural results","volume":"3","author":"bartlett","year":"2002","journal-title":"J Mach Learn Res"},{"key":"ref114","first-page":"4592","article-title":"Riemannian SVRG: Fast stochastic optimization on Riemannian manifolds","author":"zhang","year":"2016","journal-title":"Proc Conf Neural Inf Process Syst"},{"journal-title":"Pattern Recognition and Machine Learning","year":"2006","author":"bishop","key":"ref113"},{"key":"ref116","doi-asserted-by":"publisher","DOI":"10.1137\/17M1116787"},{"key":"ref80","first-page":"2348","article-title":"Non-convex finite-sum optimization via SCSG methods","author":"lei","year":"2017","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref115","first-page":"1080","article-title":"Improved SVRG for non-strongly-convex or sum-of-non-convex objectives","author":"allen-zhu","year":"2016","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"article-title":"Learning multiple layers of features from tiny images","year":"2009","author":"krizhevsky","key":"ref120"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2010.2052531"},{"key":"ref121","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2008.2009515"},{"key":"ref122","first-page":"1737","article-title":"Deep learning with limited numerical precision","author":"gupta","year":"2015","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref123","article-title":"DoReFa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients","author":"zhou","year":"2016","journal-title":"arXiv 1606 06160 [cs]"},{"key":"ref85","first-page":"499","article-title":"Stability and generalization","volume":"2","author":"bousquet","year":"2002","journal-title":"J Mach Learn Res"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1007\/s10444-004-7634-z"},{"key":"ref87","first-page":"2635","article-title":"Learnability, stability and uniform convergence","volume":"11","author":"shalev-shwartz","year":"2010","journal-title":"J Mach Learn Res"},{"key":"ref88","article-title":"Horovod: Fast and easy distributed deep learning in TensorFlow","author":"sergeev","year":"2018","journal-title":"arXiv 1802 05799"}],"container-title":["Proceedings of the IEEE"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielam\/5\/9241485\/9206551-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5\/9241485\/09206551.pdf?arnumber=9206551","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T13:39:53Z","timestamp":1651066793000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9206551\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,11]]},"references-count":125,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/jproc.2020.3021381","relation":{},"ISSN":["0018-9219","1558-2256"],"issn-type":[{"type":"print","value":"0018-9219"},{"type":"electronic","value":"1558-2256"}],"subject":[],"published":{"date-parts":[[2020,11]]}}}