{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,3]],"date-time":"2025-05-03T04:30:05Z","timestamp":1746246605088},"reference-count":26,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018,10]]},"DOI":"10.1109\/allerton.2018.8636017","type":"proceedings-article","created":{"date-parts":[[2019,3,1]],"date-time":"2019-03-01T03:04:31Z","timestamp":1551409471000},"page":"863-870","source":"Crossref","is-referenced-by-count":8,"title":["Data Encoding for Byzantine-Resilient Distributed Gradient Descent"],"prefix":"10.1109","author":[{"given":"Deepesh","family":"Data","sequence":"first","affiliation":[]},{"given":"Linqi","family":"Song","sequence":"additional","affiliation":[]},{"given":"Suhas","family":"Diggavi","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2005.858979"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511804441"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/2408776.2408794"},{"key":"ref13","first-page":"5440","article-title":"Straggler mitigation in distributed optimization through data encoding","author":"karakus","year":"2017","journal-title":"In Advances in Neural Information Processing Systems NIPS 2017"},{"key":"ref14","first-page":"3518","article-title":"The hidden vulnerability of distributed learning in byzantium","author":"mhamdi","year":"2018","journal-title":"Proceedings of the 35th International Conference on Machine Learning ICML 2018"},{"key":"ref15","first-page":"3368","article-title":"Gradient coding: Avoiding stragglers in distributed learning","author":"tandon","year":"2017","journal-title":"Proceedings of the 34th International Conference on Machine Learning ICML 2017"},{"key":"ref16","first-page":"4302","article-title":"Gradient coding from cyclic MDS codes and expander graphs","author":"raviv","year":"2018","journal-title":"Proceedings of the 35th International Conference on Machine Learning ICML 2018"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ISIT.2018.8437887"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ISIT.2018.8437467"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2017.2736066"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICDCS.2018.00112"},{"key":"ref3","first-page":"5336","article-title":"Can decentralized algorithms outperform centralized algorithms? A case study for decentralized parallel stochastic gradient descent","author":"lian","year":"2017","journal-title":"Advances in Neural Information Processing Systems NIPS 2017"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/357172.357176"},{"article-title":"Stochastic, distributed and federated optimization for machine learning","year":"2017","author":"konecn\u00fd","key":"ref5"},{"key":"ref8","first-page":"902","article-title":"DRACO: byzantine-resilient distributed training via redundant gradients","author":"chen","year":"2018","journal-title":"Proceedings of the 35th International Conference on Machine Learning ICML 2018"},{"key":"ref7","first-page":"118","article-title":"Machine learning with adversaries: Byzantine tolerant gradient descent","author":"blanchard","year":"2017","journal-title":"Advances in Neural Information Processing Systems NIPS 2017"},{"key":"ref2","first-page":"2595","article-title":"Parallelized stochastic gradient descent","author":"zinkevich","year":"2010","journal-title":"Advances in neural information processing systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3154503"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/1327452.1327492"},{"key":"ref20","first-page":"2092","article-title":"Short-dot: Computing large linear transforms distributedly using coded short dot products","author":"dutta","year":"2016","journal-title":"Advances in Neural Information Processing Systems 29 Annual Conference on Neural Information Processing Systems 2016"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2008.927802"},{"key":"ref21","first-page":"5636","article-title":"Byzantine-robust distributed learning: Towards optimal statistical rates","author":"yin","year":"2018","journal-title":"Proceedings of the 35th International Conference on Machine Learning ICML 2018"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1137\/0607059"},{"journal-title":"Probability and Measure ser","year":"1995","author":"billingsley","key":"ref23"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2007.914344"},{"journal-title":"Linear Algebra","year":"1971","author":"hoffman","key":"ref25"}],"event":{"name":"2018 56th Annual Allerton Conference on Communication, Control, and Computing (Allerton)","start":{"date-parts":[[2018,10,2]]},"location":"Monticello, IL, USA","end":{"date-parts":[[2018,10,5]]}},"container-title":["2018 56th Annual Allerton Conference on Communication, Control, and Computing (Allerton)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8630872\/8635635\/08636017.pdf?arnumber=8636017","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2020,8,24]],"date-time":"2020-08-24T03:04:08Z","timestamp":1598238248000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8636017\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,10]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/allerton.2018.8636017","relation":{},"subject":[],"published":{"date-parts":[[2018,10]]}}}