{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T07:19:39Z","timestamp":1760080779804,"version":"3.37.3"},"reference-count":59,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2023,4,1]],"date-time":"2023-04-01T00:00:00Z","timestamp":1680307200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,4,1]],"date-time":"2023-04-01T00:00:00Z","timestamp":1680307200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,4,1]],"date-time":"2023-04-01T00:00:00Z","timestamp":1680307200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62025208","61972409"],"award-info":[{"award-number":["62025208","61972409"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2021YFB0301200"],"award-info":[{"award-number":["2021YFB0301200"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE J. Select. Areas Commun."],"published-print":{"date-parts":[[2023,4]]},"DOI":"10.1109\/jsac.2023.3242733","type":"journal-article","created":{"date-parts":[[2023,2,22]],"date-time":"2023-02-22T18:31:21Z","timestamp":1677090681000},"page":"941-963","source":"Crossref","is-referenced-by-count":6,"title":["Compressed Collective Sparse-Sketch for Distributed Data-Parallel Training of Deep Learning Models"],"prefix":"10.1109","volume":"41","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0669-6892","authenticated-orcid":false,"given":"Keshi","family":"Ge","sequence":"first","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]},{"given":"Kai","family":"Lu","sequence":"additional","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7564-5239","authenticated-orcid":false,"given":"Yongquan","family":"Fu","sequence":"additional","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]},{"given":"Xiaoge","family":"Deng","sequence":"additional","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3458-4732","authenticated-orcid":false,"given":"Zhiquan","family":"Lai","sequence":"additional","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9743-2034","authenticated-orcid":false,"given":"Dongsheng","family":"Li","sequence":"additional","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747408"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"article-title":"An image is worth 16 \u00d7 16 words: Transformers for image recognition at scale","volume-title":"Proc. 9th Int. Conf. Learn. Represent. (ICLR)","author":"Dosovitskiy","key":"ref4"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414641"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1285"},{"key":"ref8","first-page":"1","article-title":"DeLighT: Deep and light-weight transformer","volume-title":"Proc. 9th Int. Conf. Learn. Represent. (ICLR)","author":"Mehta"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3320060"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.5555\/2685048.2685095"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3437801.3441593"},{"key":"ref12","first-page":"265","article-title":"TensorFlow: A system for large-scale machine learning","volume-title":"Proc. 12th USENIX Symp. Oper. Syst. Design Implement. (OSDI)","author":"Abadi"},{"key":"ref13","first-page":"8024","article-title":"PyTorch: An imperative style, high-performance deep learning library","volume-title":"Proc. Adv. Neural Inf. Process. Syst., Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"32","author":"Paszke"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3406703"},{"key":"ref15","first-page":"1709","article-title":"QSGD: Communication-efficient SGD via gradient quantization and encoding","volume-title":"Proc. Adv. Neural Inf. Process. Syst., Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"30","author":"Alistarh"},{"key":"ref16","first-page":"1","article-title":"Deep gradient compression: Reducing the communication bandwidth for distributed training","volume-title":"Proc. 6th Int. Conf. Learn. Represent. (ICLR)","author":"Lin"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.jpdc.2008.09.002"},{"volume-title":"NCCL","year":"2022","key":"ref18"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3452296.3472904"},{"key":"ref20","first-page":"14236","article-title":"PowerSGD: Practical low-rank gradient compression for distributed optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst., Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"32","author":"Vogels"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/3302424.3303957"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3295500.3356222"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/1562764.1562789"},{"key":"ref24","first-page":"3252","article-title":"Error feedback fixes SignSGD and other gradient compression schemes","volume-title":"Proc. 36th Int. Conf. Mach. Learn. (ICML)","volume":"97","author":"Karimireddy"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3079856.3080246"},{"key":"ref26","first-page":"1232","article-title":"Large scale distributed deep networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst., 26th Annu. Conf. Neural Inf. Process. Syst.","volume":"25","author":"Dean"},{"key":"ref27","first-page":"571","article-title":"Project Adam: Building an efficient and scalable deep learning training system","volume-title":"Proc. 11th USENIX Symp. Oper. Syst. Design Implement. (OSDI)","author":"Chilimbi"},{"key":"ref28","first-page":"9","article-title":"Scaling deep learning on GPU and knights landing clusters","volume-title":"Proc. Int. Conf. High Perform. Comput., Netw., Storage Anal.","author":"You"},{"volume-title":"Baidu-Allreduce","year":"2022","key":"ref29"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3341301.3359642"},{"key":"ref31","first-page":"181","article-title":"Poseidon: An efficient communication architecture for distributed deep learning on GPU clusters","volume-title":"Proc. USENIX Annu. Tech. Conf. (USENIX ATC)","author":"Zhang"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1016\/j.parco.2019.03.005"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICPP.2014.36"},{"key":"ref34","first-page":"2530","article-title":"A linear speedup analysis of distributed deep learning with sparse and quantized communication","volume-title":"Proc. Adv. Neural Inf. Process. Syst., Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"31","author":"Jiang"},{"key":"ref35","first-page":"4452","article-title":"Sparsified SGD with memory","volume-title":"Proc. Adv. Neural Inf. Process. Syst., Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"31","author":"Stich"},{"key":"ref36","first-page":"1306","article-title":"Gradient sparsification for communication-efficient distributed optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst., Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"31","author":"Wangni"},{"key":"ref37","first-page":"13551","article-title":"ScaleCom: Scalable sparsified gradient compression for communication-efficient distributed training","volume-title":"Proc. Adv. Neural Inf. Process. Syst., Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"33","author":"Chen"},{"key":"ref38","first-page":"4035","article-title":"ZipML: Training linear models with end-to-end low precision, and a little bit of deep learning","volume-title":"Proc. 34th Int. Conf. Mach. Learn. (ICML)","volume":"70","author":"Zhang"},{"key":"ref39","first-page":"3304","article-title":"Don\u2019t waste your bits! Squeeze activations and gradients for deep neural networks via TinyScript","volume-title":"Proc. 37th Int. Conf. Mach. Learn. (ICML)","volume":"119","author":"Fu"},{"key":"ref40","first-page":"3174","article-title":"Adaptive gradient quantization for data-parallel SGD","volume-title":"Proc. Adv. Neural Inf. Process. Syst., Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"33","author":"Faghri"},{"key":"ref41","first-page":"1509","article-title":"TernGrad: Ternary gradients to reduce communication in distributed deep learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst., Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"30","author":"Wen"},{"key":"ref42","first-page":"559","article-title":"SIGNSGD: Compressed optimisation for non-convex problems","volume-title":"Proc. 35th Int. Conf. Mach. Learn. (ICML)","volume":"80","author":"Bernstein"},{"key":"ref43","first-page":"53","article-title":"3LC: Lightweight and effective traffic compression for distributed machine learning","volume-title":"Proc. Mach. Learn. Syst. (MLSys)","author":"Lim"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1145\/3477132.3483553"},{"key":"ref45","first-page":"5129","article-title":"GradiVeQ: Vector quantization for bandwidth-efficient gradient aggregation in distributed CNN training","volume-title":"Proc. Adv. Neural Inf. Process. Syst., Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"31","author":"Yu"},{"key":"ref46","first-page":"13144","article-title":"Communication-efficient distributed SGD with sketching","volume-title":"Proc. Adv. Neural Inf. Process. Syst., Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"32","author":"Ivkin"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01189"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1145\/2500128"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2015-354"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/473"},{"key":"ref51","first-page":"1","article-title":"Google\u2019s neural machine translation system: Bridging the gap between human and machine translation","volume":"abs\/1609.08144","author":"Wu","year":"2016","journal-title":"CoRR"},{"key":"ref52","first-page":"1","article-title":"Pointer sentinel mixture models","volume-title":"Proc. 5th Int. Conf. Learn. Represent. (ICLR)","author":"Merity"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W16-2301"},{"article-title":"Learning multiple layers of features from tiny images","year":"2009","author":"Krizhevsky","key":"ref54"},{"key":"ref55","first-page":"1","article-title":"Critical learning periods in deep neural networks","volume-title":"Proc. 7th Int. Conf. Learn. Represent. (ICLR)","author":"Achille"},{"key":"ref56","first-page":"8253","article-title":"FetchSGD: Communication-efficient federated learning with sketching","volume-title":"Proc. 37th Int. Conf. Mach. Learn. (ICML)","volume":"119","author":"Rothchild"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1561\/2200000083"},{"issue":"1","key":"ref58","first-page":"1929","article-title":"Dropout: A simple way to prevent neural networks from overfitting","volume":"15","author":"Srivastava","year":"2014","journal-title":"J. Mach. Learn. Res."},{"key":"ref59","first-page":"1225","article-title":"Train faster, generalize better: Stability of stochastic gradient descent","volume-title":"Proc. 33rd Int. Conf. Mach. Learn. (ICML)","volume":"48","author":"Hardt"}],"container-title":["IEEE Journal on Selected Areas in Communications"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/49\/10075675\/10050389.pdf?arnumber=10050389","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,2]],"date-time":"2024-03-02T21:27:47Z","timestamp":1709414867000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10050389\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,4]]},"references-count":59,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/jsac.2023.3242733","relation":{},"ISSN":["0733-8716","1558-0008"],"issn-type":[{"type":"print","value":"0733-8716"},{"type":"electronic","value":"1558-0008"}],"subject":[],"published":{"date-parts":[[2023,4]]}}}