{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,23]],"date-time":"2025-11-23T06:10:44Z","timestamp":1763878244683,"version":"3.37.3"},"reference-count":28,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2018,1,1]],"date-time":"2018-01-01T00:00:00Z","timestamp":1514764800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/OAPA.html"}],"funder":[{"name":"Institute for Information and Communications Technology Promotion through the Korea Government (MSIT) (Development of HPC System for Accelerating Large-Scale Deep Learning)","award":["2016-0-00087"],"award-info":[{"award-number":["2016-0-00087"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2018]]},"DOI":"10.1109\/access.2018.2834146","type":"journal-article","created":{"date-parts":[[2018,5,8]],"date-time":"2018-05-08T18:52:20Z","timestamp":1525805540000},"page":"26493-26504","source":"Crossref","is-referenced-by-count":16,"title":["Soft Memory Box: A Virtual Shared Memory Framework for Fast Deep Neural Network Training in Distributed High Performance Computing"],"prefix":"10.1109","volume":"6","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2686-7273","authenticated-orcid":false,"given":"Shinyoung","family":"Ahn","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1794-6076","authenticated-orcid":false,"given":"Joongheon","family":"Kim","sequence":"additional","affiliation":[]},{"given":"Eunji","family":"Lim","sequence":"additional","affiliation":[]},{"given":"Sungwon","family":"Kang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"265","article-title":"On optimization methods for deep learning","author":"le","year":"2011","journal-title":"Proc Int Conf Int Conf Mach Learn"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2017.2760251"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2017.2773571"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-25255-1_65"},{"key":"ref14","first-page":"1223","article-title":"Large scale distributed deep networks","author":"dean","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst"},{"journal-title":"One weird trick for parallelizing convolutional neural networks","year":"2014","author":"krizhevsky","key":"ref15"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/2996464"},{"key":"ref17","first-page":"1","article-title":"Revisiting distributed synchronous SGD","author":"chen","year":"2016","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref18","first-page":"583","article-title":"Scaling distributed machine learning with the parameter server","author":"li","year":"2014","journal-title":"Proc USENIX Symp Oper Syst Design Implementation"},{"key":"ref19","first-page":"693","article-title":"Hogwild: A lock-free approach to parallelizing stochastic gradient descent","author":"recht","year":"2011","journal-title":"Proc 25th Adv Neural Inf Process Syst (NIPS)"},{"journal-title":"Very Deep Convolutional Networks for Large-scale Image Recognition","year":"2014","author":"simonyan","key":"ref28"},{"key":"ref4","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref27","first-page":"4278","article-title":"Inception-v4, inception-ResNet and the impact of residual connections on learning","author":"szegedy","year":"2017","journal-title":"Proc AAAI Conf Artif Intell (AAAI)"},{"journal-title":"Deep Big Simple Neural Nets Excel on Handwritten Digit Recognition","year":"2010","author":"ciresan","key":"ref3"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2012.2205597"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2011.2134090"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2017.2762418"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/1390156.1390177"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553486"},{"key":"ref9","first-page":"571","article-title":"Project Adam: Building an efficient and scalable deep learning training system","author":"chilimbi","year":"2014","journal-title":"Proc USENIX Symp Oper Syst Design Implementation"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2015.2510865"},{"key":"ref20","first-page":"1","article-title":"Dogwild!&#x2014;Distributed hogwild for CPU & GPU","author":"noel","year":"2014","journal-title":"Proc NIPS Workshop Distrib Mach Learn Matrix Comput"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/2647868.2654889"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2015.2462371"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref23","article-title":"Poster W37: A novel shared memory framework for distributed deep learning in high-performance computing architecture","author":"ahn","year":"2018","journal-title":"Proc 40th IEEE\/ACM Int Conf Softw Eng (ICSE)"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"journal-title":"Rethinking the inception architecture for computer vision","year":"2015","author":"szegedy","key":"ref25"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/8274985\/08356235.pdf?arnumber=8356235","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,26]],"date-time":"2022-01-26T04:11:20Z","timestamp":1643170280000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8356235\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/access.2018.2834146","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2018]]}}}