{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T21:08:44Z","timestamp":1775077724805,"version":"3.50.1"},"reference-count":50,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"5","license":[{"start":{"date-parts":[[2022,10,1]],"date-time":"2022-10-01T00:00:00Z","timestamp":1664582400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,10,1]],"date-time":"2022-10-01T00:00:00Z","timestamp":1664582400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,10,1]],"date-time":"2022-10-01T00:00:00Z","timestamp":1664582400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"UKRI\/EPSRC RAIN Hub","award":["EP\/R026084\/1"],"award-info":[{"award-number":["EP\/R026084\/1"]}]},{"name":"EU H2020"},{"DOI":"10.13039\/501100000266","name":"Engineering and Physical Sciences Research Council","doi-asserted-by":"publisher","award":["EP\/S002383\/1"],"award-info":[{"award-number":["EP\/S002383\/1"]}],"id":[{"id":"10.13039\/501100000266","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Royal Society University Research Fellowship"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Robot."],"published-print":{"date-parts":[[2022,10]]},"DOI":"10.1109\/tro.2022.3172469","type":"journal-article","created":{"date-parts":[[2022,5,20]],"date-time":"2022-05-20T19:37:07Z","timestamp":1653075427000},"page":"2908-2927","source":"Crossref","is-referenced-by-count":111,"title":["RLOC: Terrain-Aware Legged Locomotion Using Reinforcement Learning and Optimal Control"],"prefix":"10.1109","volume":"38","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1308-3744","authenticated-orcid":false,"given":"Siddhant","family":"Gangapurwala","sequence":"first","affiliation":[{"name":"Dynamic Robots Systems Group, Oxford Robotics Institute, University of Oxford, Oxford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5651-8736","authenticated-orcid":false,"given":"Mathieu","family":"Geisert","sequence":"additional","affiliation":[{"name":"Dynamic Robots Systems Group, Oxford Robotics Institute, University of Oxford, Oxford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9847-2601","authenticated-orcid":false,"given":"Romeo","family":"Orsolino","sequence":"additional","affiliation":[{"name":"Dynamic Robots Systems Group, Oxford Robotics Institute, University of Oxford, Oxford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2940-0879","authenticated-orcid":false,"given":"Maurice","family":"Fallon","sequence":"additional","affiliation":[{"name":"Dynamic Robots Systems Group, Oxford Robotics Institute, University of Oxford, Oxford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4371-4623","authenticated-orcid":false,"given":"Ioannis","family":"Havoutis","sequence":"additional","affiliation":[{"name":"Dynamic Robots Systems Group, Oxford Robotics Institute, University of Oxford, Oxford, U.K."}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2016.7758092"},{"key":"ref2","article-title":"Anybotics introduces sleek new ANYmal C quadruped","volume-title":"IEEE Spectr.","author":"Ackerman","year":"2019"},{"key":"ref3","article-title":"North sea deployment shows how quadruped robots can be commercially useful","volume-title":"IEEE Spectr.","author":"Ackerman","year":"2018"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1002\/rob.21964"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/2185520.2185539"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9196673"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8968251"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1242\/jeb.202.23.3325"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2001.973365"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1177\/0278364906066768"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2009.2024565"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TBME.1969.4502596"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICHR.2006.321385"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s10514-013-9341-4"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2016.7803333"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2018.2862902"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.3007427"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9196777"},{"issue":"26","key":"ref19","article-title":"Learning agile and dynamic motor skills for legged robots","volume-title":"Sci. Robot.","volume":"4","author":"Hwangbo","year":"2019"},{"key":"ref20","article-title":"Robust recovery controller for a quadrupedal robot using deep reinforcement learning","author":"Lee","year":"2019"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2979656"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2020.xvi.064"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8593722"},{"key":"ref24","first-page":"1","article-title":"Guided policy search","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Levine","year":"2013"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2019.XV.011"},{"key":"ref26","first-page":"1","article-title":"Data efficient reinforcement learning for legged robots","volume-title":"Proc. Conf. Robot Learn.","author":"Yang","year":"2020"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202133"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2979660"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abc5986"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abk2822"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2018.2794620"},{"key":"ref32","volume-title":"Introduction to Reinforcement Learning","volume":"135","author":"Sutton","year":"1998"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2018.2849506"},{"key":"ref34","first-page":"91","article-title":"Learning to walk in minutes using massively parallel deep reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","author":"Rudin","year":"2022"},{"key":"ref35","first-page":"916","article-title":"Policies modulating trajectory generators","volume-title":"Proc. Conf. Robot Learn.","author":"Iscen","year":"2018"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8206174"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2019.2931284"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2020.2983318"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636474"},{"key":"ref40","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja","year":"2018"},{"key":"ref41","first-page":"3053","article-title":"RLlib: Abstractions for distributed reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Liang","year":"2018"},{"key":"ref42","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schulman","year":"2015"},{"key":"ref43","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017"},{"key":"ref44","article-title":"Soft actor-critic algorithms and applications","volume-title":"CoRR","volume":"abs\/1812.05905","author":"Haarnoja","year":"2018"},{"key":"ref45","first-page":"1582","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto","year":"2018"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2018.2792536"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561639"},{"key":"ref48","first-page":"249","article-title":"Understanding the difficulty of training deep feedforward neural networks","volume-title":"Proc. 13th Int. Conf. Artif. Intell. Statist.","author":"Glorot","year":"2010"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2004.1389727"},{"key":"ref50","article-title":"Open dynamics engine","author":"Smith","year":"2005"}],"container-title":["IEEE Transactions on Robotics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8860\/9910236\/09779429.pdf?arnumber=9779429","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,22]],"date-time":"2024-01-22T22:42:11Z","timestamp":1705963331000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9779429\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10]]},"references-count":50,"journal-issue":{"issue":"5"},"URL":"https:\/\/doi.org\/10.1109\/tro.2022.3172469","relation":{},"ISSN":["1552-3098","1941-0468"],"issn-type":[{"value":"1552-3098","type":"print"},{"value":"1941-0468","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,10]]}}}