{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T16:19:30Z","timestamp":1774628370561,"version":"3.50.1"},"reference-count":33,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018,12]]},"DOI":"10.1109\/padsw.2018.8644932","type":"proceedings-article","created":{"date-parts":[[2019,2,21]],"date-time":"2019-02-21T23:23:38Z","timestamp":1550791418000},"page":"425-432","source":"Crossref","is-referenced-by-count":15,"title":["A DAG Model of Synchronous Stochastic Gradient Descent in Distributed Deep Learning"],"prefix":"10.1109","author":[{"given":"Shaohuai","family":"Shi","sequence":"first","affiliation":[]},{"given":"Qiang","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xiaowen","family":"Chu","sequence":"additional","affiliation":[]},{"given":"Bo","family":"Li","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref31","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Advances in neural information processing systems"},{"key":"ref30","first-page":"181","article-title":"Poseidon: an efficient communication architecture for distributed deep learning on GPU clusters","author":"zhang","year":"2017","journal-title":"Proceedings of the 2017 USENIX Conference on Usenix Annual Technical Conference"},{"key":"ref10","article-title":"MXNet: A flexible and efficient machine learning library for heterogeneous distributed systems","author":"chen","year":"2015","journal-title":"arXiv preprint arXiv 1512 00327"},{"key":"ref11","article-title":"TensorFlow: Large-scale machine learning on heterogeneous systems, 2015","volume":"1","author":"abadi","year":"2015","journal-title":"software available from tensorflow org"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/2939672.2945397"},{"key":"ref13","article-title":"cudnn: Efficient primitives for deep learning","author":"chetlur","year":"2014","journal-title":"arXiv preprint arXiv 1410 0759"},{"key":"ref14","article-title":"Com-parative study of deep learning software frameworks","author":"bahrampour","year":"2015","journal-title":"arXiv preprint arXiv 1511 05271"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CCBD.2016.029"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICDCS.2017.259"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ISPASS.2017.7975270"},{"key":"ref18","year":"2018","journal-title":"Caffe-MPI for Deep Learning"},{"key":"ref19","article-title":"Distriduted deep learning using synchronous stochastic gradient descent","author":"das","year":"2016","journal-title":"arXiv preprint arXiv 1602 04875"},{"key":"ref28","article-title":"Optimized broadcast for deep learning workloads on dense-GPU infiniband clusters: MPI or NCCL?","author":"awan","year":"2017","journal-title":"arXiv preprint arXiv 1707 07816"},{"key":"ref4","first-page":"2834","article-title":"On model parallelization and scheduling strategies for distributed machine learning","author":"lee","year":"2014","journal-title":"Advances in neural information processing systems"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3018743.3018769"},{"key":"ref3","article-title":"Stochastic nonconvex optimization with large minibatches","author":"wang","year":"2017","journal-title":"arXiv preprint arXiv 1709 04396"},{"key":"ref6","article-title":"Scaling SGD batch size to 32k for ImageNet training","author":"you","year":"2017","journal-title":"arXiv preprint arXiv 1708 05227"},{"key":"ref29","article-title":"Deep residual learning for image recognition","author":"he","year":"2015","journal-title":"arXiv preprint arXiv 1512 03385"},{"key":"ref5","first-page":"2595","article-title":"Parallelized stochastic gradient descent","author":"zinkevich","year":"2010","journal-title":"Advances in neural information processing systems"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553486"},{"key":"ref7","article-title":"Accurate, large minibatch SGD: Training ImageNet in 1 hour","author":"goyal","year":"2017","journal-title":"arXiv preprint arXiv 1706 02677"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-7908-2604-3_16"},{"key":"ref9","article-title":"Revisiting distributed synchronous SGD","author":"chen","year":"2016","journal-title":"arXiv preprint arXiv 1604 00981"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"436","DOI":"10.1038\/nature14539","article-title":"Deep learning","volume":"521","author":"lecun","year":"2015","journal-title":"Nature"},{"key":"ref20","article-title":"100-epoch ImageNet training with AlexNet in 24 minutes","author":"you","year":"2017","journal-title":"arXiv preprint arXiv 1709 04396"},{"key":"ref22","first-page":"3","article-title":"Scaling distributed machine learning with the parameter server","volume":"1","author":"li","year":"2014","journal-title":"OSDI"},{"key":"ref21","article-title":"Performance modeling and evaluation of distributed deep learning frameworks on GPUs","author":"shi","year":"2017","journal-title":"ar Xiv preprint arXiv 1711 09856"},{"key":"ref24","doi-asserted-by":"crossref","first-page":"18","DOI":"10.1007\/978-3-319-69179-4_2","article-title":"Distributed training large-scale deep architectures","author":"zou","year":"2017","journal-title":"International Conference on Advanced Data Mining and Applications"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/2901318.2901323"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/2966884.2966912"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.procs.2010.04.056"}],"event":{"name":"2018 IEEE 24th International Conference on Parallel and Distributed Systems (ICPADS)","location":"Singapore, Singapore","start":{"date-parts":[[2018,12,11]]},"end":{"date-parts":[[2018,12,13]]}},"container-title":["2018 IEEE 24th International Conference on Parallel and Distributed Systems (ICPADS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8635632\/8644527\/08644932.pdf?arnumber=8644932","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,27]],"date-time":"2022-01-27T06:46:32Z","timestamp":1643265992000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8644932\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,12]]},"references-count":33,"URL":"https:\/\/doi.org\/10.1109\/padsw.2018.8644932","relation":{},"subject":[],"published":{"date-parts":[[2018,12]]}}}