{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T00:05:01Z","timestamp":1755907501269,"version":"3.44.0"},"publisher-location":"Cham","reference-count":26,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031998560","type":"print"},{"value":"9783031998577","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T00:00:00Z","timestamp":1755907200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,8,23]],"date-time":"2025-08-23T00:00:00Z","timestamp":1755907200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-031-99857-7_16","type":"book-chapter","created":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T05:17:14Z","timestamp":1755839834000},"page":"221-235","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Saving Memory via\u00a0Residual Reduction for\u00a0DNN Training with\u00a0Compressed Communication"],"prefix":"10.1007","author":[{"given":"Xinjue","family":"Zheng","sequence":"first","affiliation":[]},{"given":"Zhangqiang","family":"Ming","sequence":"additional","affiliation":[]},{"given":"Yuchong","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Chenxuan","family":"Yao","sequence":"additional","affiliation":[]},{"given":"Wenxiang","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Rui","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xun","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Dan","family":"Feng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,8,23]]},"reference":[{"key":"16_CR1","unstructured":"Achiam, J., et\u00a0al.: GPT-4 technical report. arXiv preprint arXiv:2303.08774 (2023)"},{"key":"16_CR2","doi-asserted-by":"crossref","unstructured":"Aji, A.F., et\u00a0al.: Sparse communication for distributed gradient descent. arXiv preprint arXiv:1704.05021 (2017)","DOI":"10.18653\/v1\/D17-1045"},{"key":"16_CR3","unstructured":"Alistarh, D., et\u00a0al.: QSGD: communication-efficient SGD via gradient quantization and encoding. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"16_CR4","doi-asserted-by":"crossref","unstructured":"Bai, Y., et\u00a0al.: Gradient compression supercharged high-performance data parallel DNN training. In: Proceedings of ACM SOSP, pp. 359\u2013375 (2021)","DOI":"10.1145\/3477132.3483553"},{"key":"16_CR5","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown, T., et al.: Language models are few-shot learners. Adv. Neural. Inf. Process. Syst. 33, 1877\u20131901 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"16_CR6","unstructured":"Chen, T., et\u00a0al.: Training deep nets with sublinear memory cost. arXiv preprint arXiv:1604.06174 (2016)"},{"key":"16_CR7","unstructured":"Devlin, J., et\u00a0al.: BERT: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"16_CR8","doi-asserted-by":"crossref","unstructured":"He, K., et\u00a0al.: Deep residual learning for image recognition. In: CVPR, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"16_CR9","unstructured":"Korthikanti, V.A., et\u00a0al.: Reducing activation recomputation in large transformer models. Proc. Mach. Learn. Syst. 5 (2023)"},{"key":"16_CR10","unstructured":"Krizhevsky, A., et\u00a0al.: Learning multiple layers of features from tiny images. Master\u2019s thesis, University of Tront (2009)"},{"key":"16_CR11","unstructured":"Lin, Y., et\u00a0al.: Deep gradient compression: reducing the communication bandwidth for distributed training. arXiv preprint arXiv:1712.01887 (2017)"},{"key":"16_CR12","unstructured":"Merity, S., et\u00a0al.: Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843 (2016)"},{"key":"16_CR13","doi-asserted-by":"crossref","unstructured":"Ming, Z., et\u00a0al.: ADTopk: all-dimension top-k compression for high-performance data-parallel DNN training. In: Proceedings HPDC, pp. 135\u2013147 (2024)","DOI":"10.1145\/3625549.3658678"},{"key":"16_CR14","unstructured":"Paszke, A., et\u00a0al.: PyTorch: an imperative style, high-performance deep learning library. Adv. Neural Inf. Process. Syst. 8026\u20138037 (2019)"},{"issue":"8","key":"16_CR15","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., et al.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019)","journal-title":"OpenAI blog"},{"key":"16_CR16","doi-asserted-by":"crossref","unstructured":"Rajbhandari, S., et\u00a0al.: Zero: Memory optimizations toward training trillion parameter models. In: SC20, pp. 1\u201316. IEEE (2020)","DOI":"10.1109\/SC41405.2020.00024"},{"key":"16_CR17","doi-asserted-by":"crossref","unstructured":"Rajpurkar, P., et\u00a0al.: Know what you don\u2019t know: unanswerable questions for squad. arXiv preprint arXiv:1806.03822 (2018)","DOI":"10.18653\/v1\/P18-2124"},{"key":"16_CR18","doi-asserted-by":"crossref","unstructured":"Seide, F., et\u00a0al.: 1-bit stochastic gradient descent and its application to data-parallel distributed training of speech DNNs. In: INTERSPEECH (2014)","DOI":"10.21437\/Interspeech.2014-274"},{"key":"16_CR19","unstructured":"Sergeev, A., et\u00a0al.: Horovod: fast and easy distributed deep learning in TensorFlow. arXiv preprint arXiv:1802.05799 (2018)"},{"key":"16_CR20","unstructured":"Simonyan, K., et\u00a0al.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)"},{"key":"16_CR21","first-page":"4452","volume":"31","author":"SU Stich","year":"2018","unstructured":"Stich, S.U., et al.: Sparsified SGD with memory. Adv. Neural. Inf. Process. Syst. 31, 4452\u20134463 (2018)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"16_CR22","doi-asserted-by":"crossref","unstructured":"Sun, X., et\u00a0al.: Stronghold: fast and affordable billion-scale deep learning model training. In: SC22, pp. 1\u201317. IEEE (2022)","DOI":"10.1109\/SC41404.2022.00076"},{"key":"16_CR23","unstructured":"Tang, Z., et\u00a0al.: Communication-efficient distributed deep learning: a comprehensive survey. arXiv preprint arXiv:2003.06307 (2023)"},{"key":"16_CR24","doi-asserted-by":"crossref","unstructured":"Wu, D., et\u00a0al.: BIRD: a lightweight and adaptive compressor for communication-efficient distributed learning using tensor-wise bi-random sampling. In: ICCD, pp. 605\u2013613. IEEE (2023)","DOI":"10.1109\/ICCD58817.2023.00096"},{"key":"16_CR25","doi-asserted-by":"crossref","unstructured":"Xu, H., et\u00a0al.: GRACE: a compressed communication framework for distributed machine learning. In: ICDCS, pp. 561\u2013572. IEEE (2021)","DOI":"10.1109\/ICDCS51616.2021.00060"},{"issue":"11","key":"16_CR26","first-page":"3053","volume":"33","author":"Z Zhang","year":"2022","unstructured":"Zhang, Z., et al.: MIPD: an adaptive gradient sparsification framework for distributed DNNs training. IEEE Trans. Parallel Distrib. Syst. 33(11), 3053\u20133066 (2022)","journal-title":"IEEE Trans. Parallel Distrib. Syst."}],"container-title":["Lecture Notes in Computer Science","Euro-Par 2025: Parallel Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-99857-7_16","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T05:17:24Z","timestamp":1755839844000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-99857-7_16"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,23]]},"ISBN":["9783031998560","9783031998577"],"references-count":26,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-99857-7_16","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,8,23]]},"assertion":[{"value":"23 August 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"Euro-Par","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Parallel Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Dresden","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Germany","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25 April 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 April 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"31","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"europar2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/2025.euro-par.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}