{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T01:33:18Z","timestamp":1768959198526,"version":"3.49.0"},"reference-count":35,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Singapore Ministry of Education AcRF Tier 2","award":["A-8000423-00-00"],"award-info":[{"award-number":["A-8000423-00-00"]}]},{"name":"AcRF Tier 1","award":["A-8000980-00-00"],"award-info":[{"award-number":["A-8000980-00-00"]}]},{"name":"AcRF Tier 1","award":["A-8002934-00-00"],"award-info":[{"award-number":["A-8002934-00-00"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Signal Process."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tsp.2025.3539883","type":"journal-article","created":{"date-parts":[[2025,2,7]],"date-time":"2025-02-07T18:48:21Z","timestamp":1738954101000},"page":"827-842","source":"Crossref","is-referenced-by-count":1,"title":["A Mirror Descent-Based Algorithm for Corruption-Tolerant Distributed Gradient Descent"],"prefix":"10.1109","volume":"73","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1582-4873","authenticated-orcid":false,"given":"Shuche","family":"Wang","sequence":"first","affiliation":[{"name":"Institute of Operations Research and Analytics, National University of Singapore, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5008-4527","authenticated-orcid":false,"given":"Vincent Y. F.","family":"Tan","sequence":"additional","affiliation":[{"name":"Department of Mathematics, National University of Singapore, Singapore"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ISIT57864.2024.10619340"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1561\/2200000016"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1080\/01621459.2018.1429274"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.14778\/2212351.2212354"},{"issue":"75","key":"ref5","first-page":"1","article-title":"Distributed coordinate descent method for learning with big data","volume":"17","author":"Richt\u00e1rik","year":"2016","journal-title":"J. Mach. Learn. Res."},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-015-0901-6"},{"key":"ref7","first-page":"321","article-title":"Lag: Lazily aggregated gradient for communication-efficient distributed learning","volume":"31","author":"Chen","year":"2018","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref8","first-page":"3368","article-title":"Gradient coding: Avoiding stragglers in distributed learning","author":"Tandon","year":"2017","journal-title":"Int. Conf. Mach. Learn."},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/JSAIT.2020.2991361"},{"key":"ref10","first-page":"903","article-title":"DRACO: Byzantine-resilient distributed training via redundant gradients","author":"Chen","year":"2018","journal-title":"Int. Conf. Mach. Learn."},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3154503"},{"key":"ref12","first-page":"5650","article-title":"Byzantine-robust distributed learning: Towards optimal statistical rates","author":"Yin","year":"2018","journal-title":"Int. Conf. Mach. Learn."},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2019.2946020"},{"key":"ref14","first-page":"431","article-title":"Machine learning with adversaries: Byzantine tolerant gradient descent","volume":"30","author":"Blanchard","year":"2017","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-010-0434-y"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1137\/110848864"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-013-0677-5"},{"key":"ref18","first-page":"1019","article-title":"On acceleration with noise-corrupted gradients","author":"Cohen","year":"2018","journal-title":"Int. Conf. Mach. Learn."},{"issue":"223","key":"ref19","first-page":"1","article-title":"On acceleration for convex composite minimization with noise-corrupted gradients and approximate proximal mapping","volume":"23","author":"Zhou","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref20","article-title":"Gradient descent: Robustness to adversarial corruption","volume-title":"Proc. OPT Optim. Mach. Learn. (NeurIPS 2022 Workshop)","author":"Chang","year":"2022"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ISIT54713.2023.10206794"},{"key":"ref22","first-page":"7409","article-title":"On optimal robustness to adversarial corruption in online decision problems","volume":"34","author":"Ito","year":"2021","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref23","first-page":"467","article-title":"An optimal algorithm for stochastic and adversarial bandits","volume-title":"Proc. 22nd Int. Conf. Artif. Intell. Statist.","author":"Zimmert","year":"2019"},{"key":"ref24","first-page":"12772","article-title":"Probabilistic sequential shrinking: A best arm identification algorithm for stochastic bandits with corruptions","author":"Zhong","year":"2021","journal-title":"Int. Conf. Mach. Learn."},{"key":"ref25","first-page":"691","article-title":"Between stochastic and adversarial online convex optimization: Improved regret bounds via smoothness","volume":"35","author":"Sachs","year":"2022","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref26","volume-title":"Convex analysis and optimization","volume":"1","author":"Bertsekas","year":"2003"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-007-0149-x"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1137\/18M1172314"},{"key":"ref29","volume-title":"Problem Complexity and Method Efficiency in Optimization","author":"Nemirovskij","year":"1983"},{"key":"ref30","article-title":"Handbook of convergence theorems for (stochastic) gradient methods","author":"Garrigos","year":"2023"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"issue":"7","key":"ref32","article-title":"Coordinate descent method for large-scale l2-loss linear support vector machines","volume":"9","author":"Chang","year":"2008","journal-title":"J. Mach. Learn. Res."},{"key":"ref33","article-title":"Deep learning using linear support vector machines","author":"Tang","year":"2013"},{"key":"ref34","first-page":"345","article-title":"A little is enough: Circumventing defenses for distributed learning","volume":"32","author":"Baruch","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1561\/2200000018"}],"container-title":["IEEE Transactions on Signal Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/78\/10807692\/10877931.pdf?arnumber=10877931","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,19]],"date-time":"2025-02-19T19:09:51Z","timestamp":1739992191000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10877931\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":35,"URL":"https:\/\/doi.org\/10.1109\/tsp.2025.3539883","relation":{},"ISSN":["1053-587X","1941-0476"],"issn-type":[{"value":"1053-587X","type":"print"},{"value":"1941-0476","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}