{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T14:05:40Z","timestamp":1766066740828,"version":"3.28.0"},"reference-count":23,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2017,5]]},"DOI":"10.1109\/icra.2017.7989383","type":"proceedings-article","created":{"date-parts":[[2017,7,25]],"date-time":"2017-07-25T17:44:28Z","timestamp":1501004668000},"page":"3373-3380","source":"Crossref","is-referenced-by-count":18,"title":["Reset-free guided policy search: Efficient deep reinforcement learning with stochastic initial states"],"prefix":"10.1109","author":[{"given":"William","family":"Montgomery","sequence":"first","affiliation":[]},{"given":"Anurag","family":"Ajay","sequence":"additional","affiliation":[]},{"given":"Chelsea","family":"Finn","sequence":"additional","affiliation":[]},{"given":"Pieter","family":"Abbeel","sequence":"additional","affiliation":[]},{"given":"Sergey","family":"Levine","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"journal-title":"Introduction to Reinforcement Learning","year":"1998","author":"sutton","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1177\/0278364913495721"},{"key":"ref12","article-title":"A survey on policy search for robotics","volume":"2","author":"deisenroth","year":"2013","journal-title":"Foundations and Trends in Robotics"},{"key":"ref13","article-title":"An application of reinforcement learning to aerobatic helicopter flight","author":"abbeel","year":"2006","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2004.1389841"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2011.6095096"},{"key":"ref16","article-title":"Guided policy search as approximate mirror descent","author":"montgomery","year":"2016","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/S0167-6377(02)00231-6"},{"key":"ref18","article-title":"Learning neural network policies with guided policy search under unknown dynamics","author":"levine","year":"2014","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2009.5152385"},{"key":"ref3","article-title":"Learning attractor landscapes for learning motor primitives","author":"ijspeert","year":"2003","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"ref6","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref5","article-title":"End-to-end training of deep visuomotor policies","volume":"17","author":"levine","year":"2016","journal-title":"Journal of Machine Learning Resaerch"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511804441"},{"key":"ref7","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref2","article-title":"Reinforcement learning of motor skills in high dimensions","author":"theodorou","year":"2010","journal-title":"International Conference on Robotics and Automation (ICRA)"},{"key":"ref1","article-title":"Relative entropy policy search","author":"peters","year":"2010","journal-title":"AAAI Conference on Artificial Intelligence"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2005.1469949"},{"key":"ref20","article-title":"Applying the episodic natural actor-critic architecture to motor primitive learning","author":"peters","year":"2007","journal-title":"European Symposium on Artificial Neural Networks (ESANN)"},{"key":"ref22","article-title":"Deterministic policy gradient algorithms","author":"silver","year":"2014","journal-title":"Proc of the International Conference on Machine Learning (ICML)"},{"key":"ref21","article-title":"Benchmarking deep reinforcement learning for continuous control","author":"duan","year":"2016","journal-title":"Proc of the International Conference on Machine Learning (ICML)"},{"key":"ref23","article-title":"Model-free trajectory optimization for reinforcement learning","author":"akrour","year":"2016","journal-title":"International Conference on Machine Learning (ICML)"}],"event":{"name":"2017 IEEE International Conference on Robotics and Automation (ICRA)","start":{"date-parts":[[2017,5,29]]},"location":"Singapore, Singapore","end":{"date-parts":[[2017,6,3]]}},"container-title":["2017 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7960754\/7988677\/07989383.pdf?arnumber=7989383","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2017,8,16]],"date-time":"2017-08-16T12:22:27Z","timestamp":1502886147000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/7989383\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017,5]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/icra.2017.7989383","relation":{},"subject":[],"published":{"date-parts":[[2017,5]]}}}