{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T15:59:48Z","timestamp":1774022388022,"version":"3.50.1"},"reference-count":17,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"5","license":[{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,9,1]],"date-time":"2019-09-01T00:00:00Z","timestamp":1567296000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Micro"],"published-print":{"date-parts":[[2019,9,1]]},"DOI":"10.1109\/mm.2019.2935967","type":"journal-article","created":{"date-parts":[[2019,8,22]],"date-time":"2019-08-22T19:03:20Z","timestamp":1566500600000},"page":"91-101","source":"Crossref","is-referenced-by-count":64,"title":["Optimizing Multi-GPU Parallelization Strategies for Deep Learning Training"],"prefix":"10.1109","volume":"39","author":[{"given":"Saptadeep","family":"Pal","sequence":"first","affiliation":[{"name":"University of California"}]},{"given":"Eiman","family":"Ebrahimi","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Arslan","family":"Zulfiqar","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Yaosheng","family":"Fu","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Victor","family":"Zhang","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Szymon","family":"Migacz","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"David","family":"Nellans","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Puneet","family":"Gupta","sequence":"additional","affiliation":[{"name":"University of California"}]}],"member":"263","reference":[{"key":"ref10","article-title":"PipeDream: Fast and efficient pipeline parallel DNN training","author":"harlap","year":"2018"},{"key":"ref11","article-title":"A hierarchical model for device placement","author":"mirhoseini","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref12","article-title":"Revisiting distributed synchronous SGD","volume":"abs 1604 981","author":"chen","year":"2016","journal-title":"CoRR"},{"key":"ref13","article-title":"On large-batch training for deep learning: Generalization gap and sharp minima","volume":"abs 1609 4836","author":"keskar","year":"2016","journal-title":"CoRR"},{"key":"ref14","article-title":"Google's neural machine translation system: Bridging the gap between human and machine translation","author":"wu","year":"2016"},{"key":"ref15","article-title":"Accurate, large minibatch SGD: Training imageNet in 1 hour","author":"goyal","year":"2017"},{"key":"ref16","article-title":"Exploring the limits of language modeling","author":"j\u00f3zefowicz","year":"2016"},{"key":"ref17","doi-asserted-by":"crossref","DOI":"10.1109\/MM.2019.2935967","article-title":"Optimizing multi-GPU parallelization strategies for deep learning training","author":"pal","year":"2019"},{"key":"ref4","article-title":"Multi-GPU training of convnets","author":"yadan","year":"2013"},{"key":"ref3","article-title":"Distributed deep learning using synchronous stochastic gradient descent","author":"das","year":"2016"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3210377.3210394"},{"key":"ref5","article-title":"On scale-out deep learning training for cloud and HPC","author":"sridharan","year":"2018"},{"key":"ref8","article-title":"Exploring hidden dimensions in parallelizing convolutional neural networks","volume":"abs 1802 4924","author":"jia","year":"2018","journal-title":"CoRR"},{"key":"ref7","first-page":"1223","article-title":"Large scale distributed deep networks","author":"dean","year":"0","journal-title":"Proc 25th Int Conf Neural Inf Process Syst"},{"key":"ref2","article-title":"Rethinking the inception architecture for computer vision","author":"szegedy","year":"2015"},{"key":"ref1","first-page":"1731","article-title":"Train longer, generalize better: Closing the generalization gap in large batch training of neural networks","author":"hoffer","year":"0","journal-title":"Proc 31st Int Conf Neural Inf Process Syst"},{"key":"ref9","article-title":"GPipe: Efficient training of giant neural networks using pipeline parallelism","author":"huang","year":"2018"}],"container-title":["IEEE Micro"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/40\/8833530\/08805338.pdf?arnumber=8805338","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,13]],"date-time":"2022-07-13T21:08:03Z","timestamp":1657746483000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8805338\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,9,1]]},"references-count":17,"journal-issue":{"issue":"5"},"URL":"https:\/\/doi.org\/10.1109\/mm.2019.2935967","relation":{},"ISSN":["0272-1732","1937-4143"],"issn-type":[{"value":"0272-1732","type":"print"},{"value":"1937-4143","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019,9,1]]}}}