{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,20]],"date-time":"2025-05-20T10:42:17Z","timestamp":1747737737659,"version":"3.37.3"},"reference-count":39,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","funder":[{"DOI":"10.13039\/501100003816","name":"Huawei Technologies Canada","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003816","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100000038","name":"Natural Science and Engineering Research Council (NSERC) of Canada through a Discovery Research Grant","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100000038","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Omani Government Postgraduate Scholarship"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE J. Sel. Areas Inf. Theory"],"published-print":{"date-parts":[[2021,6]]},"DOI":"10.1109\/jsait.2021.3079856","type":"journal-article","created":{"date-parts":[[2021,5,12]],"date-time":"2021-05-12T19:46:08Z","timestamp":1620848768000},"page":"784-801","source":"Crossref","is-referenced-by-count":4,"title":["Asynchronous Delayed Optimization With Time-Varying Minibatches"],"prefix":"10.1109","volume":"2","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3686-1289","authenticated-orcid":false,"given":"Haider","family":"Al-Lawati","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7628-7568","authenticated-orcid":false,"given":"Tharindu B.","family":"Adikari","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8100-5599","authenticated-orcid":false,"given":"Stark C.","family":"Draper","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc NIPS"},{"journal-title":"Discrete Stochastic Processes","year":"2012","author":"gallager","key":"ref38"},{"key":"ref33","first-page":"2116","article-title":"Dual averaging method for regularized stochastic learning and online optimization","author":"xiao","year":"2009","journal-title":"Advances in neural information processing systems"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-007-0149-x"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-010-0434-y"},{"key":"ref30","first-page":"1","article-title":"Anytime minibatch with stale gradients","author":"al-lawati","year":"2019","journal-title":"Proc Conf Inf Sci Syst"},{"journal-title":"Wide residual networks","year":"2016","author":"zagoruyko","key":"ref37"},{"journal-title":"A downsampled variant of imagenet as an alternative to the cifar datasets","year":"2017","author":"chrabaszcz","key":"ref36"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"journal-title":"CIFAR-10 Dataset","year":"2014","author":"krizhevsky","key":"ref34"},{"key":"ref10","first-page":"2737","article-title":"Asynchronous parallel stochastic gradient for nonconvex optimization","author":"lian","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/S1570-579X(01)80023-9"},{"key":"ref12","first-page":"2331","article-title":"Slow learners are fast","author":"zinkevich","year":"2009","journal-title":"Advances in neural information processing systems"},{"key":"ref13","first-page":"2595","article-title":"Parallelized stochastic gradient descent","author":"zinkevich","year":"2010","journal-title":"Advances in neural information processing systems"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TSIPN.2018.2866320"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2020.3021381"},{"key":"ref16","first-page":"1223","article-title":"More effective distributed ML via a stale synchronous parallel parameter server","author":"ho","year":"2013","journal-title":"Advances in neural information processing systems"},{"key":"ref17","first-page":"2350","article-title":"Staleness-aware async-SGD for distributed deep learning","author":"zhang","year":"2016","journal-title":"Proc Int Joint Conf Artif Intell"},{"key":"ref18","first-page":"2832","article-title":"Estimation, optimization, and parallelism when data is sparse","author":"duchi","year":"2013","journal-title":"Advances in neural information processing systems"},{"journal-title":"Parallel Coordinate Descent for l1-regularized Loss Minimization","year":"2011","author":"bradley","key":"ref19"},{"key":"ref28","article-title":"Anytime MiniBatch: Exploiting stragglers in online distributed optimization","author":"ferdinand","year":"2019","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref4","first-page":"873","article-title":"Distributed delayed stochastic optimization","author":"agarwal","year":"2011","journal-title":"Advances in neural information processing systems"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ALLERTON.2018.8635903"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.1986.1104412"},{"key":"ref6","first-page":"803","article-title":"Slow and stale gradients can win the race: Error-runtime trade-offs in distributed SGD","author":"dutta","year":"2018","journal-title":"Proc Int Conf Artif Intell Stat"},{"key":"ref29","first-page":"8386","article-title":"Robust and communication-efficient collaborative learning","author":"reisizadeh","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref5","first-page":"1223","article-title":"Large scale distributed deep networks","author":"agarwal","year":"2012","journal-title":"Advances in neural information processing systems"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2016.2525015"},{"key":"ref7","first-page":"693","article-title":"HOGWILD!: A lock-free approach to parallelizing stochastic gradient descent","author":"feng","year":"2011","journal-title":"Advances in neural information processing systems"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/2847220.2847223"},{"journal-title":"Asynchronous decentralized accelerated stochastic gradient descent","year":"2018","author":"lan","key":"ref9"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TSC.2016.2611578"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/2783258.2783412"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2020.3026619"},{"key":"ref21","first-page":"4120","article-title":"Asynchronous stochastic gradient descent with delay compensation","author":"zheng","year":"2017","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TSIPN.2016.2620440"},{"key":"ref23","first-page":"165","article-title":"Optimal distributed online prediction using mini-batches","volume":"13","author":"dekel","year":"2012","journal-title":"J Mach Learn Res"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICMLA.2017.0-166"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CAMSAP.2017.8313171"}],"container-title":["IEEE Journal on Selected Areas in Information Theory"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8700143\/9459757\/09429693.pdf?arnumber=9429693","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,11,8]],"date-time":"2021-11-08T22:36:29Z","timestamp":1636410989000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9429693\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,6]]},"references-count":39,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/jsait.2021.3079856","relation":{},"ISSN":["2641-8770"],"issn-type":[{"type":"electronic","value":"2641-8770"}],"subject":[],"published":{"date-parts":[[2021,6]]}}}