{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T01:59:11Z","timestamp":1774058351175,"version":"3.50.1"},"reference-count":43,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"NSF","award":["61573331"],"award-info":[{"award-number":["61573331"]}]},{"name":"NSF","award":["61973324"],"award-info":[{"award-number":["61973324"]}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"name":"NSF","award":["1509040"],"award-info":[{"award-number":["1509040"]}]},{"name":"NSF","award":["1508993"],"award-info":[{"award-number":["1508993"]}]},{"name":"NSF","award":["1711471"],"award-info":[{"award-number":["1711471"]}]},{"name":"NSF","award":["1901134"],"award-info":[{"award-number":["1901134"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Signal Process."],"published-print":{"date-parts":[[2020]]},"DOI":"10.1109\/tsp.2020.3012952","type":"journal-article","created":{"date-parts":[[2020,7,31]],"date-time":"2020-07-31T20:14:42Z","timestamp":1596226482000},"page":"4583-4596","source":"Crossref","is-referenced-by-count":188,"title":["Federated Variance-Reduced Stochastic Gradient Descent With Robustness to Byzantine Attacks"],"prefix":"10.1109","volume":"68","author":[{"given":"Zhaoxian","family":"Wu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4222-5964","authenticated-orcid":false,"given":"Qing","family":"Ling","sequence":"additional","affiliation":[]},{"given":"Tianyi","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0196-0260","authenticated-orcid":false,"given":"Georgios B.","family":"Giannakis","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2020.2968280"},{"key":"ref38","first-page":"4855","article-title":"D2: Decentralized training over decentralized data","author":"tang","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref33","first-page":"2647","article-title":"On variance reduction in stochastic gradient descent and its asynchronous variants","author":"reddi","year":"0","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM.2016.0022"},{"key":"ref31","article-title":"Distributed SAGA: Maintaining linear convergence rate with limited communication","author":"calauzenes","year":"2017","journal-title":"arXiv 1705 10405"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682527"},{"key":"ref37","first-page":"5055","article-title":"LAG: Lazily aggregated gradient for communication-efficient distributed learning","author":"chen","year":"0","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref36","first-page":"1273","article-title":"Communication-efficient learning of deep networks from decentralized data","author":"mcmahan","year":"0","journal-title":"Proc AISTATS"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/s10479-008-0352-z"},{"key":"ref34","first-page":"177","article-title":"Large-scale machine learning with stochastic gradient descent","author":"bottou","year":"0","journal-title":"Proc COMPSTAT"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.3150\/14-BEJ645"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683121"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3154503"},{"key":"ref12","article-title":"Generalized Byzantine-tolerant SGD","author":"xie","year":"2018","journal-title":"arXiv 1802 10116"},{"key":"ref13","first-page":"5636","article-title":"Byzantine-robust distributed learning: Towards optimal statistical rates","author":"yin","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref14","first-page":"12:1","article-title":"Securing distributed machine learning in high dimensions","volume":"3","author":"su","year":"2018","journal-title":"Proc ACM Meas Anal Comput Syst"},{"key":"ref15","first-page":"118","article-title":"Machine learning with adversaries: Byzantine tolerant gradient descent","author":"blanchard","year":"0","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33011544"},{"key":"ref17","article-title":"Distributed robust learning","author":"feng","year":"2014","journal-title":"arXiv 1409 5937"},{"key":"ref18","first-page":"7074","article-title":"Defending against saddle point attack in Byzantine-robust distributed learning","author":"yin","year":"0","journal-title":"Proc ICML"},{"key":"ref19","first-page":"902","article-title":"DRACO: Byzantine-resilient distributed training via redundant gradients","author":"chen","year":"0","journal-title":"Proc ICML"},{"key":"ref28","first-page":"8194","article-title":"Katyusha: The first direct acceleration of stochastic gradient methods","volume":"18","author":"allen-zhu","year":"2017","journal-title":"J Mach Learn Res"},{"key":"ref4","article-title":"Federated optimization: Distributed machine learning for on-device intelligence","author":"konecny","year":"2016","journal-title":"arXiv 1610 02527"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-016-1030-6"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2018.2846297"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2013.2262116"},{"key":"ref29","first-page":"1622","article-title":"Stochastic optimization with variance reduction for infinite datasets with finite sum structure","author":"bietti","year":"0","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref5","author":"yang","year":"2019","journal-title":"Federated Learning"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/357172.357176"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2018.2842097"},{"key":"ref2","first-page":"1529","article-title":"Local privacy and minimax bounds: Sharp rates for probability estimation","author":"duchi","year":"0","journal-title":"Proc 26th Int Conf Neural Inf Process Syst"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2020.2973345"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/342009.335438"},{"key":"ref20","first-page":"10320","article-title":"DETOX: A redundancy-based framework for faster and more robust gradient aggregation","author":"rajput","year":"0","journal-title":"Proc NIPS"},{"key":"ref22","article-title":"Accurate, large minibatch SGD: Training imagenet in 1 hour","author":"goyal","year":"2017","journal-title":"arXiv 1706 02677"},{"key":"ref21","article-title":"Fall of empires: Breaking Byzantine-tolerant SGD by inner product manipulation","author":"xie","year":"0","journal-title":"Proc UAI"},{"key":"ref42","article-title":"BRIDGE: Byzantine-resilient decentralized gradient descent","author":"yang","year":"2019","journal-title":"arXiv 1908 08098"},{"key":"ref24","first-page":"567","article-title":"Stochastic dual coordinate ascent methods for regularized loss minimization","volume":"14","author":"shalev-shwartz","year":"2013","journal-title":"J Mach Learn Res"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2015.2471755"},{"key":"ref23","first-page":"315","article-title":"Accelerating stochastic gradient descent using predictive variance reduction","author":"johnson","year":"0","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref26","first-page":"1646","article-title":"SAGA: A fast incremental gradient method with support for non-strongly convex composite objectives","author":"defazio","year":"0","journal-title":"Proc Int Conf Neural Inf Process"},{"key":"ref43","author":"nesterov","year":"2013","journal-title":"Introductory Lectures on Convex Optimization A Basic Course"},{"key":"ref25","first-page":"2613","article-title":"SARAH: A novel method for machine learning problems using stochastic recursive gradient","author":"nguyen","year":"0","journal-title":"Proc Int Conf Mach Learn"}],"container-title":["IEEE Transactions on Signal Processing"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielam\/78\/8933520\/9153949-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/78\/8933520\/09153949.pdf?arnumber=9153949","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T14:41:11Z","timestamp":1651070471000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9153949\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"references-count":43,"URL":"https:\/\/doi.org\/10.1109\/tsp.2020.3012952","relation":{},"ISSN":["1053-587X","1941-0476"],"issn-type":[{"value":"1053-587X","type":"print"},{"value":"1941-0476","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]}}}