{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T04:25:55Z","timestamp":1750220755618,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":10,"publisher":"ACM","license":[{"start":{"date-parts":[[2020,7,8]],"date-time":"2020-07-08T00:00:00Z","timestamp":1594166400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2020,7,8]]},"DOI":"10.1145\/3377929.3389915","type":"proceedings-article","created":{"date-parts":[[2020,8,26]],"date-time":"2020-08-26T15:26:57Z","timestamp":1598455617000},"page":"153-154","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["Population-based evolutionary distributed SGD"],"prefix":"10.1145","author":[{"given":"Amna","family":"Shahab","sequence":"first","affiliation":[{"name":"University of Edinburgh"}]},{"given":"Boris","family":"Grot","sequence":"additional","affiliation":[{"name":"University of Edinburgh"}]}],"member":"320","published-online":{"date-parts":[[2020,7,8]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"X. Cui etal 2018. Evolutionary stochastic gradient descent for optimization of deep neural networks. In NIPS. 6048--6058.  X. Cui et al. 2018. Evolutionary stochastic gradient descent for optimization of deep neural networks. In NIPS. 6048--6058."},{"key":"e_1_3_2_1_2_1","unstructured":"X. Jia etal 2018. Highly scalable deep learning training system with mixed-precision: Training imagenet in four minutes. arXiv:1807.11205 (2018).  X. Jia et al. 2018. Highly scalable deep learning training system with mixed-precision: Training imagenet in four minutes. arXiv:1807.11205 (2018)."},{"key":"e_1_3_2_1_3_1","unstructured":"Y. Jiang etal 2019. Fantastic Generalization Measures and Where to Find Them. arXiv:1912.02178 (2019).  Y. Jiang et al. 2019. Fantastic Generalization Measures and Where to Find Them. arXiv:1912.02178 (2019)."},{"key":"e_1_3_2_1_4_1","unstructured":"N. Keskar etal 2016. On large-batch training for deep learning: Generalization gap and sharp minima. arXiv:1609.04836 (2016).  N. Keskar et al. 2016. On large-batch training for deep learning: Generalization gap and sharp minima. arXiv:1609.04836 (2016)."},{"key":"e_1_3_2_1_5_1","first-page":"11","article-title":"2019. CROSSBOW: scaling deep learning with small batch sizes on multi-gpu servers","volume":"12","author":"Koliousis A.","year":"2019","journal-title":"VLDB"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"crossref","unstructured":"S. Li etal 2018. Near-optimal straggler mitigation for distributed gradient methods. In 2018 IEEE IPDPSW. IEEE 857--866.  S. Li et al. 2018. Near-optimal straggler mitigation for distributed gradient methods. In 2018 IEEE IPDPSW. IEEE 857--866.","DOI":"10.1109\/IPDPSW.2018.00137"},{"key":"e_1_3_2_1_7_1","unstructured":"I. Loshchilov and F. Hutter. 2016. CMA-ES for hyperparameter optimization of deep neural networks. arXiv:1604.07269 (2016).  I. Loshchilov and F. Hutter. 2016. CMA-ES for hyperparameter optimization of deep neural networks. arXiv:1604.07269 (2016)."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"crossref","unstructured":"R. Miikkulainen etal 2019. Evolving deep neural networks. In Artificial Intelligence in the Age of Neural Networks and Brain Computing. Elsevier 293--312.  R. Miikkulainen et al. 2019. Evolving deep neural networks. In Artificial Intelligence in the Age of Neural Networks and Brain Computing. Elsevier 293--312.","DOI":"10.1016\/B978-0-12-815480-9.00015-3"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"crossref","unstructured":"G. Morse and K. O Stanley. 2016. Simple evolutionary optimization can rival stochastic gradient descent in neural networks. In GECCO. 477--484.  G. Morse and K. O Stanley. 2016. Simple evolutionary optimization can rival stochastic gradient descent in neural networks. In GECCO. 477--484.","DOI":"10.1145\/2908812.2908916"},{"key":"e_1_3_2_1_10_1","unstructured":"C. Shallue etal 2018. Measuring the effects of data parallelism on neural network training. arXiv:1811.03600 (2018).  C. Shallue et al. 2018. Measuring the effects of data parallelism on neural network training. arXiv:1811.03600 (2018)."}],"event":{"name":"GECCO '20: Genetic and Evolutionary Computation Conference","sponsor":["SIGEVO ACM Special Interest Group on Genetic and Evolutionary Computation"],"location":"Canc\u00fan Mexico","acronym":"GECCO '20"},"container-title":["Proceedings of the 2020 Genetic and Evolutionary Computation Conference Companion"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3377929.3389915","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3377929.3389915","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T22:39:00Z","timestamp":1750199940000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3377929.3389915"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,7,8]]},"references-count":10,"alternative-id":["10.1145\/3377929.3389915","10.1145\/3377929"],"URL":"https:\/\/doi.org\/10.1145\/3377929.3389915","relation":{},"subject":[],"published":{"date-parts":[[2020,7,8]]},"assertion":[{"value":"2020-07-08","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}