{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,25]],"date-time":"2026-01-25T04:11:12Z","timestamp":1769314272342,"version":"3.49.0"},"reference-count":17,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icassp40776.2020.9054164","type":"proceedings-article","created":{"date-parts":[[2020,4,9]],"date-time":"2020-04-09T20:21:13Z","timestamp":1586463673000},"page":"1603-1607","source":"Crossref","is-referenced-by-count":17,"title":["Accelerating Distributed Deep Learning By Adaptive Gradient Quantization"],"prefix":"10.1109","author":[{"given":"Jinrong","family":"Guo","sequence":"first","affiliation":[]},{"given":"Wantao","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Wang","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Jizhong","family":"Han","sequence":"additional","affiliation":[]},{"given":"Ruixuan","family":"Li","sequence":"additional","affiliation":[]},{"given":"Yijun","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Songlin","family":"Hu","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"1509","article-title":"Terngrad: Ternary gradients to reduce communication in distributed deep learning","author":"wen","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3234944.3234978"},{"key":"ref12","doi-asserted-by":"crossref","first-page":"2219","DOI":"10.1109\/ICASSP.2015.7178365","article-title":"Reducing communication overhead in distributed learning by an order of magnitude (almost)","author":"\u00f8land","year":"2015","journal-title":"2015 IEEE International Conference on Acoustics Speech and Signal Processing (I-CASSP)"},{"key":"ref13","first-page":"315","article-title":"Accelerating stochastic gradient descent using predictive variance reduction","author":"johnson","year":"2013","journal-title":"Advances in neural information processing systems"},{"key":"ref14","article-title":"Opening the black box of deep neural networks via information","author":"shwartz-ziv","year":"2017"},{"key":"ref15","article-title":"Variance-based gradient compression for effi-cient distributed deep learning","author":"tsuzuku","year":"2018"},{"key":"ref16","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014"},{"key":"ref17","article-title":"Adding gradient noise improves learning for very deep networks","author":"neelakantan","year":"2015"},{"key":"ref4","article-title":"Gpipe: Efficient training of giant neural networks using pipeline parallelism","author":"huang","year":"2018"},{"key":"ref3","article-title":"Extremely large minibatch sgd: Training resnet-50 on ima-genet in 15 minutes","author":"akiba","year":"2017"},{"key":"ref6","first-page":"181","article-title":"Poseidon: An efficient communication architecture for distributed deep learning on {GPU} clusters","author":"zhang","year":"2017","journal-title":"2017 USENIX Annual Technical Conference ( USENIX ATC 17)"},{"key":"ref5","article-title":"Optimizing network performance for distributed dnn training on gpu clusters: Im-agenet\/alexnet training in 1.5 minutes","author":"sun","year":"2019"},{"key":"ref8","article-title":"Gradiveq: Vector quantization for bandwidth-efficient gradient aggregation in distributed cnn training","author":"yu","year":"2018"},{"key":"ref7","article-title":"1-bit stochastic gradient descent and its application to data-parallel distributed training of speech dnns","author":"seide","year":"2014","journal-title":"Fifteenth Annual Conference of the International Speech Communication Association"},{"key":"ref2","article-title":"Accurate, large minibatch sgd: training imagenet in 1 hour","author":"goyal","year":"2017"},{"key":"ref1","first-page":"1223","article-title":"Large scale distributed deep networks","author":"dean","year":"2012","journal-title":"Advances in neural information processing systems"},{"key":"ref9","first-page":"1709","article-title":"Qsgd: Communication-efficient sgd via gradient quantization and encoding","author":"alistarh","year":"2017","journal-title":"Advances in neural information processing systems"}],"event":{"name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Barcelona, Spain","start":{"date-parts":[[2020,5,4]]},"end":{"date-parts":[[2020,5,8]]}},"container-title":["ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9040208\/9052899\/09054164.pdf?arnumber=9054164","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T00:10:28Z","timestamp":1656375028000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9054164\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":17,"URL":"https:\/\/doi.org\/10.1109\/icassp40776.2020.9054164","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}