{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T21:26:37Z","timestamp":1769030797976,"version":"3.49.0"},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T00:00:00Z","timestamp":1763078400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T00:00:00Z","timestamp":1763078400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,11,14]]},"DOI":"10.1109\/cloudcom67567.2025.11331531","type":"proceedings-article","created":{"date-parts":[[2026,1,20]],"date-time":"2026-01-20T20:37:16Z","timestamp":1768941436000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Greedy Low-Rank Gradient Compression Provably Converges for Distributed Learning"],"prefix":"10.1109","author":[{"given":"Chuyan","family":"Chen","sequence":"first","affiliation":[{"name":"Peking University,Beijing,China"}]},{"given":"Yutong","family":"He","sequence":"additional","affiliation":[{"name":"Peking University,Beijing,China"}]},{"given":"Pengrui","family":"Li","sequence":"additional","affiliation":[{"name":"Beihang University,Beijing,China"}]},{"given":"Weichen","family":"Jia","sequence":"additional","affiliation":[{"name":"Peking University,Beijing,China"}]},{"given":"Yanjie","family":"Dong","sequence":"additional","affiliation":[{"name":"Shenzhen MSU-BIT University,Shenzhen,China"}]},{"given":"Kun","family":"Yuan","sequence":"additional","affiliation":[{"name":"Peking University,Beijing,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2025.3542324"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2016.2579198"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2019.2918951"},{"key":"ref4","article-title":"Federated optimization: Distributed machine learning for on-device intelligence","author":"Konecny","year":"2016","journal-title":"arXiv preprint"},{"key":"ref5","first-page":"1273","article-title":"Communication-efficient learning of deep networks from decentralized data","author":"McMahan","year":"2017","journal-title":"Artificial intelligence and statistics"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1515\/9781400831470"},{"key":"ref7","article-title":"Communication effi-cient distributed machine learning with the parameter server","volume":"27","author":"Li","year":"2014","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref8","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"ar Xiv preprint"},{"key":"ref9","article-title":"Bringing hpc techniques to deep learning","author":"Gibiansky","year":"2017","journal-title":"Baidu Re-search, Tech. Rep"},{"key":"ref10","article-title":"Qsgd: Communication-efficient sgd via gradient quantization and encoding","volume":"30","author":"Alistarh","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref11","article-title":"Stochastic distributed learning with gradient quantization and variance reduction","author":"Horvath","year":"2019","journal-title":"arXiv: Optimization and Control"},{"key":"ref12","article-title":"Gradient sparsification for communication-efficient distributed optimization","volume":"31","author":"Wangni","year":"2018","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref13","article-title":"Sparsified sgd with memory","volume":"31","author":"Stich","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref14","first-page":"25688","article-title":"Smoothness matrices beat smoothness constants: Better communication compression techniques for distributed optimization","volume":"34","author":"Safaryan","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"issue":"2","key":"ref15","first-page":"3","article-title":"Lora: Low-rank adaptation of large language models","volume":"1","author":"Hu","year":"2022","journal-title":"ICLR"},{"key":"ref16","article-title":"Subspace optimization for large language models with convergence guarantees","author":"He","year":"2024","journal-title":"ar Xiv preprint"},{"key":"ref17","article-title":"Separate: A simple low-rank projection for gradient compression in modern large-scale model training process","volume-title":"The Thirteenth International Conference on Learning Representations","author":"Zhao"},{"key":"ref18","article-title":"Deepseek-v3 technical report","author":"Liu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref19","article-title":"Galore: Memory-efficient 11m training by gradient low-rank projection","author":"Zhao","year":"2024","journal-title":"arXiv preprint"},{"key":"ref20","article-title":"Powersgd: Practical low-rank gradient compression for distributed optimization","volume":"32","author":"Vogels","year":"2019","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref21","first-page":"129","article-title":"N atural compression for distributed deep learning","author":"Horvoth","year":"2022","journal-title":"Mathematical and Scientific Machine Learning"},{"key":"ref22","article-title":"Atomo: Communication-efficient learning via atomic spar-sification","volume":"31","author":"Wang","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2014-274"},{"key":"ref24","first-page":"3252","article-title":"Error feedback fixes signsgd and other gradient compression schemes","volume-title":"International Conference on Machine Learning","author":"Karimireddy"},{"key":"ref25","first-page":"4384","article-title":"Ef21: A new, simpler, theoretically better, and practically faster error feedback","volume":"34","author":"Richtarik","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref26","first-page":"18955","article-title":"Lower bounds and nearly optimal algorithms in distributed learning with communication com-pression","volume":"35","author":"Huang","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref27","article-title":"Lower bounds and accelerated algorithms in distributed stochastic optimization with communication compression","author":"He","year":"2023","journal-title":"arXiv preprint"},{"key":"ref28","first-page":"76444","article-title":"Momentum provably im-proves error feedback!","volume":"36","author":"Fatkhullin","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref29","article-title":"Fira: Can we achieve full-rank training of llms under low-rank constraint","author":"Chen","year":"2024","journal-title":"arXiv preprint"},{"key":"ref30","article-title":"A memory efficient randomized subspace optimization method for training large language models","author":"Chen","year":"2025","journal-title":"arXiv preprint"},{"key":"ref31","article-title":"Ldadam: Adaptive optimization from low-dimensional gradient statistics","author":"Robert","year":"2024","journal-title":"arXiv preprint"},{"key":"ref32","article-title":"Decoupled weight decay regularization","author":"Loshchilov","year":"2017","journal-title":"arXiv preprint"},{"key":"ref33","first-page":"10118","article-title":"I-bit adam: Communication efficient large-scale training with adam\u2019s convergence speed","volume-title":"International Conference on Machine Learning","author":"Tang"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref35","author":"Krizhevsky","year":"2009","journal-title":"Learning multiple layers of features from tiny images"},{"issue":"140","key":"ref36","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"Journal of machine learning research"},{"key":"ref37","article-title":"Roberta: A robustly optimized bert pretraining approach","author":"Liu","year":"2019","journal-title":"ar Xiv preprint ar Xiv: 1907.11692"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/w18-5446"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICDCS57875.2023.00031"}],"event":{"name":"2025 lEEE International Conference on Cloud Computing Technology and Science (CloudCom)","location":"Shenzhen, China","start":{"date-parts":[[2025,11,14]]},"end":{"date-parts":[[2025,11,16]]}},"container-title":["2025 lEEE International Conference on Cloud Computing Technology and Science (CloudCom)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11330195\/11331311\/11331531.pdf?arnumber=11331531","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T07:08:33Z","timestamp":1768979313000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11331531\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,14]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/cloudcom67567.2025.11331531","relation":{},"subject":[],"published":{"date-parts":[[2025,11,14]]}}}