{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:05:23Z","timestamp":1775228723246,"version":"3.50.1"},"reference-count":67,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62272435"],"award-info":[{"award-number":["62272435"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U22A2094"],"award-info":[{"award-number":["U22A2094"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62222117"],"award-info":[{"award-number":["62222117"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1109\/tnnls.2023.3304453","type":"journal-article","created":{"date-parts":[[2023,10,3]],"date-time":"2023-10-03T17:53:57Z","timestamp":1696355637000},"page":"17479-17492","source":"Crossref","is-referenced-by-count":28,"title":["FedGAMMA: Federated Learning With Global Sharpness-Aware Minimization"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0009-0007-1735-0786","authenticated-orcid":false,"given":"Rong","family":"Dai","sequence":"first","affiliation":[{"name":"School of Information Science and Technology, University of Science and Technology of China, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0201-1638","authenticated-orcid":false,"given":"Xun","family":"Yang","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, University of Science and Technology of China, Hefei, China"}]},{"given":"Yan","family":"Sun","sequence":"additional","affiliation":[{"name":"School of Computer Science, University of Sydney, Sydney, NSW, Australia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5659-3464","authenticated-orcid":false,"given":"Li","family":"Shen","sequence":"additional","affiliation":[{"name":"JD Explore Academy, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5952-8753","authenticated-orcid":false,"given":"Xinmei","family":"Tian","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, University of Science and Technology of China, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3094-7735","authenticated-orcid":false,"given":"Meng","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Computer Science and Information Engineering, Hefei University of Technology, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1151-1792","authenticated-orcid":false,"given":"Yongdong","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Information Science and Technology, University of Science and Technology of China, Hefei, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1","article-title":"Smartphone ownership and internet usage continues to climb in emerging economies","volume":"22","author":"Poushter","year":"2016","journal-title":"Pew Res. Center"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2016.2535242"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.223"},{"key":"ref5","first-page":"1","article-title":"Sequence to sequence learning with neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"27","author":"Sutskever"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1038\/nature14539"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-57959-7"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2007.1050"},{"key":"ref9","first-page":"1273","article-title":"Communication-efficient learning of deep networks from decentralized data","volume-title":"Proc. 20th Int. Conf. Artif. Intell. Statist.","author":"McMahan"},{"key":"ref10","article-title":"Federated optimization: Distributed machine learning for on-device intelligence","author":"Kone\u0107n\u00fd","year":"2016","journal-title":"arXiv:1610.02527"},{"key":"ref11","first-page":"429","article-title":"Federated optimization in heterogeneous networks","volume-title":"Proc. Mach. Learn. Syst.","volume":"2","author":"Li"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2020.2975749"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3298981"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.2979762"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2944481"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3015958"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3072238"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3124599"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3140131"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2020.2967670"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2019.2936565"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3130265"},{"key":"ref23","first-page":"5132","article-title":"SCAFFOLD: Stochastic controlled averaging for federated learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Karimireddy"},{"key":"ref24","article-title":"Federated learning based on dynamic regularization","author":"Acar","year":"2021","journal-title":"arXiv:2111.04263"},{"key":"ref25","article-title":"FedCM: Federated learning with client-level momentum","author":"Xu","year":"2021","journal-title":"arXiv:2106.10874"},{"key":"ref26","first-page":"7611","article-title":"Tackling the objective inconsistency problem in heterogeneous federated optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","volume":"33","author":"Wang"},{"key":"ref27","first-page":"2351","article-title":"Ensemble distillation for robust model fusion in federated learning","volume-title":"Proc. NIPS","volume":"33","author":"Lin"},{"key":"ref28","first-page":"12878","article-title":"Data-free knowledge distillation for heterogeneous federated learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhu"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2022.3152581"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3119550"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3077044"},{"key":"ref32","first-page":"1","article-title":"Principles of risk minimization for learning theory","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"4","author":"Vapnik"},{"key":"ref33","article-title":"Generalized federated learning via sharpness aware minimization","author":"Qu","year":"2022","journal-title":"arXiv:2206.02618"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20050-2_38"},{"key":"ref35","article-title":"Sharpness-aware minimization for efficiently improving generalization","author":"Foret","year":"2020","journal-title":"arXiv:2010.01412"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-016-1030-6"},{"key":"ref37","first-page":"1","article-title":"Accelerating stochastic gradient descent using predictive variance reduction","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"26","author":"Johnson"},{"key":"ref38","first-page":"1","article-title":"SAGA: A fast incremental gradient method with support for non-strongly convex composite objectives","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"27","author":"Defazio"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01057"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00993"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1088\/1742-5468\/ab39d9"},{"key":"ref42","first-page":"1019","article-title":"Sharp minima can generalize for deep nets","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Dinh"},{"key":"ref43","first-page":"1","article-title":"Simplifying neural nets by discovering flat minima","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"7","author":"Hochreiter"},{"key":"ref44","article-title":"On large-batch training for deep learning: Generalization gap and sharp minima","author":"Keskar","year":"2016","journal-title":"arXiv:1609.04836"},{"key":"ref45","first-page":"1","article-title":"Surrogate gap minimization improves sharpness-aware training","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Zhuang"},{"key":"ref46","article-title":"Efficient sharpness-aware minimization for improved training of neural networks","author":"Du","year":"2021","journal-title":"arXiv:2110.03141"},{"key":"ref47","first-page":"5905","article-title":"ASAM: Adaptive sharpness-aware minimization for scale-invariant learning of deep neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kwon"},{"key":"ref48","first-page":"30950","article-title":"Make sharpness-aware minimization stronger: A sparsified perturbation approach","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Mi"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-emnlp.300"},{"key":"ref50","article-title":"Achieving linear speedup with partial worker participation in non-IID federated learning","author":"Yang","year":"2021","journal-title":"arXiv:2101.11203"},{"key":"ref51","article-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"key":"ref52","first-page":"3","article-title":"Tiny ImageNet visual recognition challenge","volume":"7","author":"Le","year":"2015","journal-title":"CS 231N"},{"key":"ref53","first-page":"1","article-title":"PyTorch: An imperative style, high-performance deep learning library","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Paszke"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref55","first-page":"4387","article-title":"The non-IID data quagmire of decentralized machine learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Hsieh"},{"key":"ref56","first-page":"1","article-title":"Visualizing the loss landscape of neural nets","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Li"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58607-2_5"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00265"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00140"},{"key":"ref60","article-title":"Benchmarking neural network robustness to common corruptions and perturbations","author":"Hendrycks","year":"2019","journal-title":"arXiv:1903.12261"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.591"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00044"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/tkde.2023.3271851"},{"key":"ref64","first-page":"1139","article-title":"On the importance of initialization and momentum in deep learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Sutskever"},{"key":"ref65","article-title":"SlowMo: Improving communication-efficient distributed SGD with slow momentum","author":"Wang","year":"2019","journal-title":"arXiv:1910.00643"},{"key":"ref66","article-title":"Adaptive federated optimization","author":"Reddi","year":"2020","journal-title":"arXiv:2003.00295"},{"key":"ref67","first-page":"6050","article-title":"STEM: A stochastic two-sided momentum algorithm achieving near-optimal sample and communication complexities for federated learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Khanduri"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10772360\/10269141.pdf?arnumber=10269141","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,15]],"date-time":"2025-01-15T20:04:04Z","timestamp":1736971444000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10269141\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12]]},"references-count":67,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2023.3304453","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12]]}}}