{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T20:52:32Z","timestamp":1768337552880,"version":"3.49.0"},"reference-count":52,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,10,24]]},"DOI":"10.1109\/iros45743.2020.9341571","type":"proceedings-article","created":{"date-parts":[[2021,3,15]],"date-time":"2021-03-15T14:49:56Z","timestamp":1615819796000},"page":"3769-3776","source":"Crossref","is-referenced-by-count":38,"title":["Rapidly Adaptable Legged Robots via Evolutionary Meta-Learning"],"prefix":"10.1109","author":[{"given":"Xingyou","family":"Song","sequence":"first","affiliation":[]},{"given":"Yuxiang","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Krzysztof","family":"Choromanski","sequence":"additional","affiliation":[]},{"given":"Ken","family":"Caluwaerts","sequence":"additional","affiliation":[]},{"given":"Wenbo","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Chelsea","family":"Finn","sequence":"additional","affiliation":[]},{"given":"Jie","family":"Tan","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"Deterministic policy gradient algorithms","author":"silver","year":"2014","journal-title":"ICML"},{"key":"ref38","article-title":"Proximal policy optimization algorithms","volume":"abs 1707 6347","author":"schulman","year":"2017"},{"key":"ref33","article-title":"Efficient off-policy meta-reinforcement learning via probabilistic context variables","author":"rakelly","year":"2019","journal-title":"ICML"},{"key":"ref32","article-title":"Towards generalization and simplicity in continuous control","author":"rajeswaran","year":"2017","journal-title":"NeurIPS"},{"key":"ref31","article-title":"Sim-to-real transfer of robotic control with dynamics randomization","author":"bin peng","year":"2018","journal-title":"ICRA"},{"key":"ref30","article-title":"On first-order meta-learning algorithms","volume":"abs 1803 2999","author":"nichol","year":"2018"},{"key":"ref37","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"ICML"},{"key":"ref36","article-title":"Evolution strategies as a scalable alternative to reinforcement learning","volume":"abs 1703 3864","author":"salimans","year":"2017"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2017.XIII.034"},{"key":"ref34","article-title":"Promp: Proximal meta-policy search","author":"rothfuss","year":"2019","journal-title":"ICLRE"},{"key":"ref28","article-title":"Multi-agent manipulation via locomotion using hierarchical sim2real","author":"nachum","year":"2019"},{"key":"ref27","article-title":"A simple neural attentive meta-learner","author":"mishra","year":"2018","journal-title":"ICLRE"},{"key":"ref29","article-title":"Learning to adapt in dynamic, real-world environments through meta-reinforcement learning","author":"nagabandi","year":"2019","journal-title":"ICLRE"},{"key":"ref2","article-title":"When random search is not enough: Sample-efficient and noise-robust blackbox optimization of RL policies","author":"choromanski","year":"2019","journal-title":"CoRL"},{"key":"ref1","article-title":"Meta reinforcement learning for sim-to-real domain adaptation","author":"arndt","year":"2019"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2004.1307456"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9196642"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793864"},{"key":"ref24","article-title":"Taming MAML: efficient unbiased meta-reinforcement learning","author":"liu","year":"2019","journal-title":"ICML"},{"key":"ref23","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016","journal-title":"ICLRE"},{"key":"ref26","article-title":"Active domain randomization","author":"mehta","year":"2019"},{"key":"ref25","article-title":"Simple random search provides a competitive approach to reinforcement learning","author":"mania","year":"2018","journal-title":"NeurIPS"},{"key":"ref50","article-title":"Policy transfer with strategy optimization","author":"yu","year":"2019"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2974685"},{"key":"ref52","article-title":"Varibad: A very good method for bayes-adaptive deep RL via meta-learning","volume":"abs 1910 8348","author":"zintgraf","year":"2019"},{"key":"ref10","article-title":"Probabilistic model-agnostic meta-learning","author":"finn","year":"2018","journal-title":"NeurIPS"},{"key":"ref11","article-title":"Gradientless descent: High-dimensional zeroth-order optimization","author":"golovin","year":"2020","journal-title":"ICLRE"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794179"},{"key":"ref12","article-title":"Meta-reinforcement learning of structured exploration strategies","author":"gupta","year":"2018","journal-title":"NeurIPS"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/URAI.2018.8442201"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2019.XV.011"},{"key":"ref15","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v31i1.11044","article-title":"Grounded action transformation for robot learning in simulation","author":"hanna","year":"2017","journal-title":"AAAI"},{"key":"ref16","article-title":"Evolved policy gradients","author":"houthooft","year":"2018","journal-title":"NeurIPS"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aau5872"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2016.2528294"},{"key":"ref19","article-title":"Machine learning for fast quadrupedal locomotion","author":"kohl","year":"2004","journal-title":"AAAI"},{"key":"ref4","article-title":"Pybullet, a python module for physics simulation for games, robotics and machine learning","author":"coumans","year":"2016"},{"key":"ref3","article-title":"Transfer from simulation to real world through learning deep inverse dynamics model","volume":"abs 1610 3518","author":"christiano","year":"2016"},{"key":"ref6","article-title":"Rl2: Fast reinforcement learning via slow reinforcement learning","volume":"abs 1611 2779","author":"duan","year":"2016"},{"key":"ref5","author":"cully","year":"2015","journal-title":"Robots that can adapt like animals"},{"key":"ref8","article-title":"Meta-learning and universality: Deep representations and gradient descent can approximate any learning algorithm","author":"finn","year":"2018","journal-title":"ICLRE"},{"key":"ref7","article-title":"Meta-learning by the baldwin effect","author":"fernando","year":"2018","journal-title":"GECCO"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2017.XIII.048"},{"key":"ref9","article-title":"Model-agnostic meta-learning for fast adaptation of deep networks","author":"finn","year":"2017","journal-title":"ICML"},{"key":"ref46","article-title":"Learning to reinforcement learn","author":"wang","year":"2017","journal-title":"CogSci"},{"key":"ref45","article-title":"Mujoco","author":"todorov","year":"2015"},{"key":"ref48","article-title":"Norml: No-reward meta learning","author":"yang","year":"2019","journal-title":"AAMAS"},{"key":"ref47","article-title":"Learning locomotion skills for cassie: Iterative design and sim-to-real","author":"xie","year":"2019"},{"key":"ref42","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"sutton","year":"1999","journal-title":"NeurIPS"},{"key":"ref41","article-title":"Es-maml: Simple hessian-free meta learning","author":"song","year":"2020","journal-title":"ICLRE"},{"key":"ref44","article-title":"Learning to walk in 20 minutes","author":"tedrake","year":"0"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2018.XIV.010"}],"event":{"name":"2020 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","location":"Las Vegas, NV, USA","start":{"date-parts":[[2020,10,24]]},"end":{"date-parts":[[2021,1,24]]}},"container-title":["2020 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9340668\/9340635\/09341571.pdf?arnumber=9341571","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,23]],"date-time":"2023-10-23T05:14:49Z","timestamp":1698038089000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9341571\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,10,24]]},"references-count":52,"URL":"https:\/\/doi.org\/10.1109\/iros45743.2020.9341571","relation":{},"subject":[],"published":{"date-parts":[[2020,10,24]]}}}