{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,12]],"date-time":"2026-04-12T02:58:30Z","timestamp":1775962710641,"version":"3.50.1"},"reference-count":38,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"9","license":[{"start":{"date-parts":[[2020,9,1]],"date-time":"2020-09-01T00:00:00Z","timestamp":1598918400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"Fraunhofer Society through the MPI-FhG Collaboration Project \u201cTheory & Practice for Reduced Learning Machines,\u201d"},{"DOI":"10.13039\/501100004937","name":"German Ministry for Education and Research as Berlin Big Data Center","doi-asserted-by":"publisher","award":["01IS14013A"],"award-info":[{"award-number":["01IS14013A"]}],"id":[{"id":"10.13039\/501100004937","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004937","name":"Berlin Center for Machine Learning","doi-asserted-by":"publisher","award":["01IS18037I"],"award-info":[{"award-number":["01IS18037I"]}],"id":[{"id":"10.13039\/501100004937","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001659","name":"DFG","doi-asserted-by":"publisher","award":["EXC 2046\/1"],"award-info":[{"award-number":["EXC 2046\/1"]}],"id":[{"id":"10.13039\/501100001659","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001659","name":"DFG","doi-asserted-by":"publisher","award":["390685689"],"award-info":[{"award-number":["390685689"]}],"id":[{"id":"10.13039\/501100001659","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Information & Communications Technology Planning & Evaluation"},{"DOI":"10.13039\/501100003621","name":"Korea Government","doi-asserted-by":"publisher","award":["2017-0-00451"],"award-info":[{"award-number":["2017-0-00451"]}],"id":[{"id":"10.13039\/501100003621","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2020,9]]},"DOI":"10.1109\/tnnls.2019.2944481","type":"journal-article","created":{"date-parts":[[2019,11,1]],"date-time":"2019-11-01T15:58:02Z","timestamp":1572623882000},"page":"3400-3413","source":"Crossref","is-referenced-by-count":1301,"title":["Robust and Communication-Efficient Federated Learning From Non-i.i.d. Data"],"prefix":"10.1109","volume":"31","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9425-2238","authenticated-orcid":false,"given":"Felix","family":"Sattler","sequence":"first","affiliation":[{"name":"Fraunhofer Heinrich Hertz Institute, Berlin, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5144-3758","authenticated-orcid":false,"given":"Simon","family":"Wiedemann","sequence":"additional","affiliation":[{"name":"Fraunhofer Heinrich Hertz Institute, Berlin, Germany"}]},{"given":"Klaus-Robert","family":"Muller","sequence":"additional","affiliation":[{"name":"Technische Universit\u00e4t Berlin, Berlin, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6283-3265","authenticated-orcid":false,"given":"Wojciech","family":"Samek","sequence":"additional","affiliation":[{"name":"Fraunhofer Heinrich Hertz Institute, Berlin, Germany"}]}],"member":"263","reference":[{"key":"ref38","article-title":"An empirical investigation of catastrophic forgetting in gradient-based neural networks","author":"goodfellow","year":"2013","journal-title":"arXiv 1312 6211"},{"key":"ref33","first-page":"4447","article-title":"Sparsified SGD with memory","author":"stich","year":"2018","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref32","article-title":"Federated learning with non-IID data","author":"zhao","year":"2018","journal-title":"arXiv 1806 00582"},{"key":"ref31","author":"lecun","year":"1998","journal-title":"The MNIST Database of Handwritten Digits"},{"key":"ref30","author":"krizhevsky","year":"2014","journal-title":"CIFAR-10 Dataset"},{"key":"ref37","article-title":"Fashion-MNIST: A novel image dataset for benchmarking machine learning algorithms","author":"xiao","year":"2017","journal-title":"ArXiv 1708 07747"},{"key":"ref36","article-title":"Speech commands: A dataset for limited-vocabulary speech recognition","author":"warden","year":"2018","journal-title":"arXiv 1804 03209"},{"key":"ref35","first-page":"1945","article-title":"Batch renormalization: Towards reducing minibatch dependence in batch-normalized models","author":"ioffe","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.1966.1053907"},{"key":"ref10","article-title":"Communication-efficient learning of deep networks from decentralized data","author":"mcmahan","year":"2016","journal-title":"arXiv 1602 05629"},{"key":"ref11","article-title":"How to backdoor federated learning","author":"bagdasaryan","year":"2018","journal-title":"arXiv 1807 00459"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3133982"},{"key":"ref13","article-title":"Private federated learning on vertically partitioned data via entity resolution and additively homomorphic encryption","author":"hardy","year":"2017","journal-title":"arXiv 1711 10677"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978318"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2019.8852172"},{"key":"ref18","article-title":"Towards federated learning at scale: System design","author":"bonawitz","year":"2019","journal-title":"arXiv 1902 01046"},{"key":"ref19","article-title":"TernGrad: Ternary gradients to reduce communication in distributed deep learning","author":"wen","year":"2017","journal-title":"arXiv 1705 07878"},{"key":"ref28","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"2014","journal-title":"arXiv 1409 1556"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"436","DOI":"10.1038\/nature14539","article-title":"Deep learning","volume":"521","author":"lecun","year":"2015","journal-title":"Nature"},{"key":"ref27","article-title":"Federated learning: Strategies for improving communication efficiency","author":"kone?n\u00fd","year":"2016","journal-title":"arXiv 1610 05492"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2019.8852119"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2760518"},{"key":"ref29","article-title":"signSGD with majority vote is communication efficient and byzantine fault tolerant","author":"bernstein","year":"2018","journal-title":"arXiv 1810 05291"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"ref8","first-page":"3104","article-title":"Sequence to sequence learning with neural networks","author":"sutskever","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.223"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2910073"},{"key":"ref9","first-page":"39","article-title":"Explainable artificial intelligence: Understanding, visualizing and interpreting deep learning models","volume":"1","author":"samek","year":"2018","journal-title":"ITU J ICT Discoveries"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/IMPACT.2015.7365193"},{"key":"ref20","first-page":"1707","article-title":"QSGD: Communication-efficient SGD via gradient quantization and encoding","author":"alistarh","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref22","article-title":"signSGD: Compressed optimisation for non-convex problems","author":"bernstein","year":"2018","journal-title":"arXiv 1802 04434"},{"key":"ref21","article-title":"ATOMO: Communication-efficient learning via atomic sparsification","author":"wang","year":"2018","journal-title":"arXiv 1806 04090"},{"key":"ref24","first-page":"1488","article-title":"Scalable distributed DNN training using commodity GPU cloud computing","author":"strom","year":"2015","journal-title":"Proc Annu Conf Int Speech Commun Assoc"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1045"},{"key":"ref26","article-title":"Variance-based gradient compression for efficient distributed deep learning","author":"tsuzuku","year":"2018","journal-title":"arXiv 1802 06058"},{"key":"ref25","article-title":"Deep gradient compression: Reducing the communication bandwidth for distributed training","author":"lin","year":"2017","journal-title":"arXiv 1712 01887"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/9184294\/08889996.pdf?arnumber=8889996","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,12]],"date-time":"2022-01-12T11:36:41Z","timestamp":1641987401000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8889996\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,9]]},"references-count":38,"journal-issue":{"issue":"9"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2019.2944481","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020,9]]}}}