{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:10:33Z","timestamp":1775229033971,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":56,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,8,4]],"date-time":"2023-08-04T00:00:00Z","timestamp":1691107200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["2104880, 2148309, 2153502, 1763620, 1948374, and 214644"],"award-info":[{"award-number":["2104880, 2148309, 2153502, 1763620, 1948374, and 214644"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000183","name":"Army Research Office","doi-asserted-by":"publisher","award":["W911NF-23-1-0072"],"award-info":[{"award-number":["W911NF-23-1-0072"]}],"id":[{"id":"10.13039\/100000183","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,8,6]]},"DOI":"10.1145\/3580305.3599293","type":"proceedings-article","created":{"date-parts":[[2023,8,4]],"date-time":"2023-08-04T18:10:58Z","timestamp":1691172658000},"page":"2898-2907","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":35,"title":["CriticalFL: A Critical Learning Periods Augmented Client Selection Framework for Efficient Federated Learning"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7734-1589","authenticated-orcid":false,"given":"Gang","family":"Yan","sequence":"first","affiliation":[{"name":"State University of New York, Binghamton, Binghamton, NY, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1444-2657","authenticated-orcid":false,"given":"Hao","family":"Wang","sequence":"additional","affiliation":[{"name":"Louisiana State University, Baton Rouge, LA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3775-3033","authenticated-orcid":false,"given":"Xu","family":"Yuan","sequence":"additional","affiliation":[{"name":"University of Louisiana at Lafayette, Lafayette, LA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3642-3569","authenticated-orcid":false,"given":"Jian","family":"Li","sequence":"additional","affiliation":[{"name":"State University of New York, Binghamton, Binghamton, NY, USA"}]}],"member":"320","published-online":{"date-parts":[[2023,8,4]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Proc. of AISTATS.","author":"Reisizadeh","unstructured":"Reisizadeh A., Mokhtari A., and Hassani H. 2020. Fedpaq: A communicationefficient federated learning method with periodic averaging and quantization. In Proc. of AISTATS."},{"key":"e_1_3_2_2_2_1","volume-title":"Proc. of ICLR.","author":"Achille Alessandro","year":"2019","unstructured":"Alessandro Achille, Matteo Rovere, and Stefano Soatto. 2019. Critical Learning Periods in Deep Networks. In Proc. of ICLR."},{"key":"e_1_3_2_2_3_1","volume-title":"Personalized Federated Learning with Gaussian Processes. Proc. of NeurIPS","author":"Achituve Idan","year":"2021","unstructured":"Idan Achituve, Aviv Shamsian, Aviv Navon, Gal Chechik, and Ethan Fetaya. 2021. Personalized Federated Learning with Gaussian Processes. Proc. of NeurIPS (2021)."},{"key":"e_1_3_2_2_4_1","volume-title":"Proc. of NeurIPS.","author":"Basu Debraj","year":"2019","unstructured":"Debraj Basu, Deepesh Data, Can Karakus, and Suhas Diggavi. 2019. Qsparse-local- SGD: Distributed SGD with Quantization, Sparsification and Local Computations. In Proc. of NeurIPS."},{"key":"e_1_3_2_2_5_1","volume-title":"Client Selection in Federated Learning: Convergence Analysis and Power-of-Choice Selection Strategies. arXiv preprint arXiv:2010.01243","author":"Cho Yae Jee","year":"2020","unstructured":"Yae Jee Cho, Jianyu Wang, and Gauri Joshi. 2020. Client Selection in Federated Learning: Convergence Analysis and Power-of-Choice Selection Strategies. arXiv preprint arXiv:2010.01243 (2020)."},{"key":"e_1_3_2_2_6_1","volume-title":"Proc. of AISTATS.","author":"Cho Yae Jee","year":"2022","unstructured":"Yae Jee Cho, Jianyu Wang, and Gauri Joshi. 2022. Towards Understanding Biased Client Selection in Federated Learning. In Proc. of AISTATS."},{"key":"e_1_3_2_2_7_1","volume-title":"Proc. of ICLR.","author":"Frankle Jonathan","year":"2020","unstructured":"Jonathan Frankle, David J Schwab, and Ari S Morcos. 2020. The Early Phase of Neural Network Training. In Proc. of ICLR."},{"key":"e_1_3_2_2_8_1","volume-title":"Proc. of NeurIPS","author":"Golatkar Aditya Sharad","year":"2019","unstructured":"Aditya Sharad Golatkar, Alessandro Achille, and Stefano Soatto. 2019. Time Matters in Regularizing Deep Networks: Weight Decay and Data Augmentation Affect Early Learning Dynamics, Matter Little Near Convergence. Proc. of NeurIPS (2019)."},{"key":"e_1_3_2_2_9_1","volume-title":"Proc. of AISTATS.","author":"Haddadpour Farzin","year":"2021","unstructured":"Farzin Haddadpour, Mohammad Mahdi Kamani, Aryan Mokhtari, and Mehrdad Mahdavi. 2021. Federated Learning with Compression: Unified Analysis and Sharp Guarantees. In Proc. of AISTATS."},{"key":"e_1_3_2_2_10_1","volume-title":"Proc. of ICML.","author":"Hamer Jenny","year":"2020","unstructured":"Jenny Hamer, Mehryar Mohri, and Ananda Theertha Suresh. 2020. FedBoost: A Communication-Efficient Algorithm for Federated Learning. In Proc. of ICML."},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_2_12_1","volume-title":"Proc. of ICLR.","author":"Horv\u00e1th Samuel","year":"2021","unstructured":"Samuel Horv\u00e1th and Peter Richtarik. 2021. A Better Alternative to Error Feedback for Communication-Efficient Distributed Learning. In Proc. of ICLR."},{"key":"e_1_3_2_2_13_1","volume-title":"Urmish Thakker, Shiqiang Wang, Jian Li, and M Hadi Amini.","author":"Imteaj Ahmed","year":"2022","unstructured":"Ahmed Imteaj, Khandaker Mamun Ahmed, Urmish Thakker, Shiqiang Wang, Jian Li, and M Hadi Amini. 2022. Federated Learning for Resource-Constrained IoT Devices: Panoramas and State of the Art. Federated and Transfer Learning (2022), 7--27."},{"key":"e_1_3_2_2_14_1","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2021.3095077"},{"key":"e_1_3_2_2_15_1","volume-title":"Proc. of ICML.","author":"Jastrzebski Stanislaw","year":"2021","unstructured":"Stanislaw Jastrzebski, Devansh Arpit, Oliver Astrand, Giancarlo B Kerg, Huan Wang, Caiming Xiong, Richard Socher, Kyunghyun Cho, and Krzysztof J Geras. 2021. Catastrophic Fisher Explosion: Early Phase Fisher Matrix Impacts Generalization. In Proc. of ICML."},{"key":"e_1_3_2_2_16_1","volume-title":"Proc. of ICLR.","author":"Jastrzebski Stanislaw","year":"2019","unstructured":"Stanislaw Jastrzebski, Zachary Kenton, Nicolas Ballas, Asja Fischer, Yoshua Bengio, and Amos J Storkey. 2019. On the Relation Between the Sharpest Directions of DNN Loss and the SGD Step Length. In Proc. of ICLR."},{"key":"e_1_3_2_2_17_1","volume-title":"Proc. of ICLR.","author":"Jastrzebski Stanislaw","year":"2020","unstructured":"Stanislaw Jastrzebski, Maciej Szymczak, Stanislav Fort, Devansh Arpit, Jacek Tabor, Kyunghyun Cho, and Krzysztof Geras. 2020. The Break-Even Point on Optimization Trajectories of Deep Neural Networks. In Proc. of ICLR."},{"key":"e_1_3_2_2_18_1","volume-title":"Kallista Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al.","author":"Kairouz Peter","year":"2019","unstructured":"Peter Kairouz, H Brendan McMahan, Brendan Avent, Aur\u00e9lien Bellet, Mehdi Bennis, Arjun Nitin Bhagoji, Kallista Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al. 2019. Advances and Open Problems in Federated Learning. arXiv preprint arXiv:1912.04977 (2019)."},{"key":"e_1_3_2_2_19_1","volume-title":"Proc. of ICML.","author":"Karimireddy Sai Praneeth","year":"2020","unstructured":"Sai Praneeth Karimireddy, Satyen Kale, Mehryar Mohri, Sashank Reddi, Sebastian Stich, and Ananda Theertha Suresh. 2020. SCAFFOLD: Stochastic Controlled Averaging for Federated Learning. In Proc. of ICML."},{"key":"e_1_3_2_2_20_1","volume-title":"Proc. of ICML.","author":"Katharopoulos Angelos","year":"2018","unstructured":"Angelos Katharopoulos and Fran\u00e7ois Fleuret. 2018. Not All Samples Are Created Equal: Deep Learning with Importance Sampling. In Proc. of ICML."},{"key":"e_1_3_2_2_21_1","volume-title":"Proc. of AISTATS.","author":"Khaled Ahmed","year":"2020","unstructured":"Ahmed Khaled, Konstantin Mishchenko, and Peter Richt\u00e1rik. 2020. Tighter theory for local SGD on identical and heterogeneous data. In Proc. of AISTATS."},{"key":"e_1_3_2_2_22_1","volume-title":"Proc. of AAAI.","author":"Kim Yoon","year":"2016","unstructured":"Yoon Kim, Yacine Jernite, David Sontag, and Alexander M Rush. 2016. Characteraware neural language models. In Proc. of AAAI."},{"key":"e_1_3_2_2_23_1","unstructured":"Alex Krizhevsky Geoffrey Hinton et al. 2009. Learning Multiple Layers of Features from Tiny Images. (2009)."},{"key":"e_1_3_2_2_24_1","volume-title":"Proc. of NIPS","author":"Krizhevsky Alex","year":"2012","unstructured":"Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. 2012. Imagenet Classification with Deep Convolutional Neural Networks. Proc. of NIPS (2012)."},{"key":"e_1_3_2_2_25_1","volume-title":"Proc. of USENIX OSDI.","author":"Lai Fan","year":"2021","unstructured":"Fan Lai, Xiangfeng Zhu, Harsha V Madhyastha, and Mosharaf Chowdhury. 2021. Oort: Efficient Federated Learning via Guided Participant Selection. In Proc. of USENIX OSDI."},{"key":"e_1_3_2_2_26_1","volume-title":"Proc. of MLSys.","author":"Li Tian","year":"2020","unstructured":"Tian Li, Anit Kumar Sahu, Manzil Zaheer, Maziar Sanjabi, Ameet Talwalkar, and Virginia Smith. 2020. Federated Optimization in Heterogeneous Networks. In Proc. of MLSys."},{"key":"e_1_3_2_2_27_1","volume-title":"Proc. of ICLR.","author":"Li Xiang","year":"2020","unstructured":"Xiang Li, Kaixuan Huang, Wenhao Yang, Shusen Wang, and Zhihua Zhang. 2020. On the Convergence of FedAvg on Non-IID Data. In Proc. of ICLR."},{"key":"e_1_3_2_2_28_1","volume-title":"Variance Reduced Local SGD with Lower Communication Complexity. arXiv preprint arXiv:1912.12844","author":"Liang Xianfeng","year":"2019","unstructured":"Xianfeng Liang, Shuheng Shen, Jingchang Liu, Zhen Pan, Enhong Chen, and Yifei Cheng. 2019. Variance Reduced Local SGD with Lower Communication Complexity. arXiv preprint arXiv:1912.12844 (2019)."},{"key":"e_1_3_2_2_29_1","volume-title":"Proc. of NeurIPS","author":"Lin Tao","year":"2020","unstructured":"Tao Lin, Lingjing Kong, Sebastian U Stich, and Martin Jaggi. 2020. Ensemble distillation for robust model fusion in federated learning. Proc. of NeurIPS (2020)."},{"key":"e_1_3_2_2_30_1","unstructured":"Amiri M. M. Gunduz D. and Kulkarni S. R. 2020. Federated learning with quantized global model updates. arXiv preprint arXiv:2006.10672 (2020)."},{"key":"e_1_3_2_2_31_1","volume-title":"Proc. of ICML.","author":"Malinovskiy Grigory","year":"2020","unstructured":"Grigory Malinovskiy, Dmitry Kovalev, Elnur Gasanov, Laurent Condat, and Peter Richtarik. 2020. From local SGD to local fixed-point methods for federated learning. In Proc. of ICML."},{"key":"e_1_3_2_2_32_1","volume-title":"Proc. of AISTATS.","author":"McMahan Brendan","year":"2017","unstructured":"Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Aguera y Arcas. 2017. Communication-Efficient Learning of Deep Networks from Decentralized Data. In Proc. of AISTATS."},{"key":"e_1_3_2_2_33_1","unstructured":"Adam Paszke Sam Gross Soumith Chintala Gregory Chanan Edward Yang Zachary DeVito Zeming Lin Alban Desmaison Luca Antiga and Adam Lerer. 2017. Automatic differentiation in pytorch. In NIPS-W."},{"key":"e_1_3_2_2_34_1","volume-title":"Proc. of NeurIPS","author":"Pathak Reese","year":"2020","unstructured":"Reese Pathak and Martin JWainwright. 2020. FedSplit: An algorithmic framework for fast federated optimization. Proc. of NeurIPS (2020)."},{"key":"e_1_3_2_2_35_1","volume-title":"Proc. of ICML.","author":"H\u00f6nig","unstructured":"H\u00f6nig R., Zhao Y., and Mullins R. 2022. DAdaQuant: Doubly-adaptive quantization for communication-efficient Federated Learning. In Proc. of ICML."},{"key":"e_1_3_2_2_36_1","volume-title":"Proc. of ICLR.","author":"Reddi Sashank J.","year":"2021","unstructured":"Sashank J. Reddi, Zachary Charles, Manzil Zaheer, Zachary Garrett, Keith Rush, Jakub Kone\u010dn\u00fd, Sanjiv Kumar, and Hugh Brendan McMahan. 2021. Adaptive Federated Optimization. In Proc. of ICLR."},{"key":"e_1_3_2_2_37_1","volume-title":"Communication-Efficient Federated Learning via Optimal Client Sampling. arXiv preprint arXiv:2007.15197","author":"Ribero Monica","year":"2020","unstructured":"Monica Ribero and Haris Vikalo. 2020. Communication-Efficient Federated Learning via Optimal Client Sampling. arXiv preprint arXiv:2007.15197 (2020)."},{"key":"e_1_3_2_2_38_1","volume-title":"Proc. of ICML.","author":"Rothchild Daniel","year":"2020","unstructured":"Daniel Rothchild, Ashwinee Panda, Enayat Ullah, Nikita Ivkin, Ion Stoica, Vladimir Braverman, Joseph Gonzalez, and Raman Arora. 2020. FetchSGD: Communication-Efficient Federated Learning with Sketching. In Proc. of ICML."},{"key":"e_1_3_2_2_39_1","volume-title":"Proc. of AISTATS.","author":"Ruan Yichen","year":"2021","unstructured":"Yichen Ruan, Xiaoxi Zhang, Shu-Che Liang, and Carlee Joe-Wong. 2021. Towards Flexible Device Participation in Federated Learning. In Proc. of AISTATS."},{"key":"e_1_3_2_2_40_1","volume-title":"Proc. of ICLR.","author":"Simonyan Karen","year":"2015","unstructured":"Karen Simonyan and Andrew Zisserman. 2015. Very Deep Convolutional Networks for Large-scale Image Recognition. In Proc. of ICLR."},{"key":"e_1_3_2_2_41_1","first-page":"1","article-title":"The error-feedback framework: Better rates for sgd with delayed gradients and compressed updates","volume":"21","author":"Stich Sebastian U","year":"2020","unstructured":"Sebastian U Stich and Sai Praneeth Karimireddy. 2020. The error-feedback framework: Better rates for sgd with delayed gradients and compressed updates. Journal of Machine Learning Research 21 (2020), 1--36.","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00986"},{"key":"e_1_3_2_2_43_1","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM41043.2020.9155494"},{"key":"e_1_3_2_2_44_1","volume-title":"Proc. of ICLR.","author":"Wang Hongyi","year":"2020","unstructured":"Hongyi Wang, Mikhail Yurochkin, Yuekai Sun, Dimitris Papailiopoulos, and Yasaman Khazaeni. 2020. Federated Learning with Matched Averaging. In Proc. of ICLR."},{"key":"e_1_3_2_2_45_1","volume-title":"Proc. of SysML.","author":"Wang Jianyu","year":"2019","unstructured":"Jianyu Wang and Gauri Joshi. 2019. Adaptive Communication Strategies to Achieve the Best Error-Runtime Trade-off in Local-update SGD. In Proc. of SysML."},{"key":"e_1_3_2_2_46_1","first-page":"1","article-title":"Cooperative SGD: A Unified Framework for the Design and Analysis of Local-Update SGD Algorithms","volume":"22","author":"Wang Jianyu","year":"2021","unstructured":"Jianyu Wang and Gauri Joshi. 2021. Cooperative SGD: A Unified Framework for the Design and Analysis of Local-Update SGD Algorithms. Journal of Machine Learning Research 22, 213 (2021), 1--50.","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_2_47_1","volume-title":"Proc. of NeurIPS","author":"Wang Jianyu","year":"2020","unstructured":"Jianyu Wang, Qinghua Liu, Hao Liang, Gauri Joshi, and H Vincent Poor. 2020. Tackling the Objective Inconsistency Problem in Heterogeneous Federated Optimization. Proc. of NeurIPS (2020)."},{"key":"e_1_3_2_2_48_1","volume-title":"Gradient sparsification for communication-efficient distributed optimization. Advances in Neural Information Processing Systems 31","author":"Wangni Jianqiao","year":"2018","unstructured":"Jianqiao Wangni, Jialei Wang, Ji Liu, and Tong Zhang. 2018. Gradient sparsification for communication-efficient distributed optimization. Advances in Neural Information Processing Systems 31 (2018)."},{"key":"e_1_3_2_2_49_1","volume-title":"Proc. of ICML.","author":"Woodworth Blake","year":"2020","unstructured":"Blake Woodworth, Kumar Kshitij Patel, Sebastian Stich, Zhen Dai, Brian Bullins, Brendan Mcmahan, Ohad Shamir, and Nathan Srebro. 2020. Is local SGD better than minibatch SGD?. In Proc. of ICML."},{"key":"e_1_3_2_2_50_1","volume-title":"Fashion-MNIST: A Novel Image Dataset for Benchmarking Machine Learning Algorithms. arXiv preprint arXiv:1708.07747","author":"Xiao Han","year":"2017","unstructured":"Han Xiao, Kashif Rasul, and Roland Vollgraf. 2017. Fashion-MNIST: A Novel Image Dataset for Benchmarking Machine Learning Algorithms. arXiv preprint arXiv:1708.07747 (2017)."},{"key":"e_1_3_2_2_51_1","volume-title":"Straggler-resilient distributed machine learning with dynamic backup workers. arXiv preprint arXiv:2102.06280","author":"Xiong Guojun","year":"2021","unstructured":"Guojun Xiong, Gang Yan, Rahul Singh, and Jian Li. 2021. Straggler-resilient distributed machine learning with dynamic backup workers. arXiv preprint arXiv:2102.06280 (2021)."},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i8.20859"},{"key":"e_1_3_2_2_53_1","unstructured":"Gang Yan Hao Wang Xu Yuan and Jian Li. 2023. CriticalFL: A Critical Learning Periods Augmented Client Selection Framework for Efficient Federated Learning. (2023). https:\/\/www.dropbox.com\/s\/m501qs0pppmgu9y\/main.pdf?dl=0"},{"key":"e_1_3_2_2_54_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i9.26271"},{"key":"e_1_3_2_2_55_1","doi-asserted-by":"publisher","DOI":"10.4108\/eai.21-10-2021.171595"},{"key":"e_1_3_2_2_56_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33015693"}],"event":{"name":"KDD '23: The 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining","location":"Long Beach CA USA","acronym":"KDD '23","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"]},"container-title":["Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3580305.3599293","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3580305.3599293","content-type":"application\/pdf","content-version":"vor","intended-application":"syndication"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3580305.3599293","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T17:51:16Z","timestamp":1750182676000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3580305.3599293"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,4]]},"references-count":56,"alternative-id":["10.1145\/3580305.3599293","10.1145\/3580305"],"URL":"https:\/\/doi.org\/10.1145\/3580305.3599293","relation":{},"subject":[],"published":{"date-parts":[[2023,8,4]]},"assertion":[{"value":"2023-08-04","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}