{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,21]],"date-time":"2026-02-21T18:57:05Z","timestamp":1771700225458,"version":"3.50.1"},"reference-count":28,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2017,9]]},"DOI":"10.1109\/iros.2017.8202141","type":"proceedings-article","created":{"date-parts":[[2017,12,14]],"date-time":"2017-12-14T22:12:59Z","timestamp":1513289579000},"page":"79-86","source":"Crossref","is-referenced-by-count":76,"title":["Collective robot reinforcement learning with distributed asynchronous guided policy search"],"prefix":"10.1109","author":[{"given":"Ali","family":"Yahya","sequence":"first","affiliation":[]},{"given":"Adrian","family":"Li","sequence":"additional","affiliation":[]},{"given":"Mrinal","family":"Kalakrishnan","sequence":"additional","affiliation":[]},{"given":"Yevgen","family":"Chebotar","sequence":"additional","affiliation":[]},{"given":"Sergey","family":"Levine","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2013.6707053"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1177\/02783640022067878"},{"key":"ref12","article-title":"Cloud-enabled humanoid robots","author":"kuffner","year":"2010","journal-title":"IEEE-RAS International Conference on Humanoid Robotics"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2013.6631180"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TASE.2014.2376492"},{"key":"ref15","article-title":"Parallelized stochastic gradient descent","author":"zinkevich","year":"2010","journal-title":"Neural Information Processing Systems (NIPS)"},{"key":"ref16","article-title":"Asynchronous Methods for Deep Reinforcement Learning","author":"mnih","year":"2016","journal-title":"ICML"},{"key":"ref17","first-page":"3132","article-title":"Interactive control of diverse complex characters with neural networks","author":"mordatch","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1177\/0278364917710318"},{"key":"ref19","article-title":"Deep reinforcement learning for robotic manipulation with asynchronous off-policy updates","author":"gu","year":"2016","journal-title":"ICRA"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46487-9_51"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1561\/2300000021","article-title":"A survey on policy search for robotics","volume":"2","author":"deisenroth","year":"2013","journal-title":"Foundations and Trends in Robotics"},{"key":"ref27","article-title":"Revisiting distributed synchronous SGD","author":"chen","year":"2016","journal-title":"International Conference on Learning Representations (ICLR) Workshop Track"},{"key":"ref3","first-page":"3137","article-title":"A generalized path integral control approach to reinforcement learning","volume":"11","author":"theodorou","year":"2010","journal-title":"Journal of Machine Learning Research"},{"key":"ref6","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016","journal-title":"ICLRE"},{"key":"ref5","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"ICML"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2002.1014739"},{"key":"ref7","first-page":"1334","article-title":"End-to-end training of deep visuomotor policies","volume":"17","author":"levine","year":"2016","journal-title":"Journal of Machine Learning Research"},{"key":"ref2","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v24i1.7727","article-title":"Relative entropy policy search","author":"peters","year":"2010","journal-title":"AAAI"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1038\/nature14539"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2004.1389841"},{"key":"ref20","article-title":"Guided policy search as approximate mirror descent","author":"montgomery","year":"2016","journal-title":"NIPS"},{"key":"ref22","article-title":"Bregman alternating direction method of multipliers","author":"wang","year":"2014","journal-title":"NIPS"},{"key":"ref21","article-title":"Learning neural network policies with guided policy search under unknown dynamics","author":"levine","year":"2014","journal-title":"NIPS"},{"key":"ref24","first-page":"222","article-title":"Iterative linear quadratic regulator design for nonlinear biological movement systems","author":"li","year":"2004","journal-title":"ICINCO"},{"key":"ref23","first-page":"273","article-title":"Hierarchical relative entropy policy search","author":"daniel","year":"2012","journal-title":"AISTATS"},{"key":"ref26","article-title":"Large scale distributed deep networks","author":"dean","year":"2012","journal-title":"NIPS"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989384"}],"event":{"name":"2017 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","location":"Vancouver, BC","start":{"date-parts":[[2017,9,24]]},"end":{"date-parts":[[2017,9,28]]}},"container-title":["2017 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8119304\/8202121\/08202141.pdf?arnumber=8202141","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,29]],"date-time":"2023-08-29T22:59:52Z","timestamp":1693349992000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/8202141\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017,9]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/iros.2017.8202141","relation":{},"subject":[],"published":{"date-parts":[[2017,9]]}}}