{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,23]],"date-time":"2026-01-23T08:37:09Z","timestamp":1769157429017,"version":"3.49.0"},"reference-count":44,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"},{"start":{"date-parts":[[2021,10,1]],"date-time":"2021-10-01T00:00:00Z","timestamp":1633046400000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2021,10]]},"DOI":"10.1109\/lra.2021.3076955","type":"journal-article","created":{"date-parts":[[2021,9,27]],"date-time":"2021-09-27T20:59:16Z","timestamp":1632776356000},"page":"8561-8568","source":"Crossref","is-referenced-by-count":17,"title":["On the Emergence of Whole-Body Strategies From Humanoid Robot Push-Recovery Learning"],"prefix":"10.1109","volume":"6","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0808-9754","authenticated-orcid":false,"given":"Diego","family":"Ferigo","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8890-2732","authenticated-orcid":false,"given":"Raffaello","family":"Camoriano","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6903-4458","authenticated-orcid":false,"given":"Paolo Maria","family":"Viceconte","sequence":"additional","affiliation":[]},{"given":"Daniele","family":"Calandriello","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9283-6133","authenticated-orcid":false,"given":"Silvio","family":"Traversaro","sequence":"additional","affiliation":[]},{"given":"Lorenzo","family":"Rosasco","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7600-3203","authenticated-orcid":false,"given":"Daniele","family":"Pucci","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"OpenAI gym","author":"brockman","year":"2016"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/SII46433.2020.9025951"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1145\/3355089.3356536"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/3450626.3459670"},{"key":"ref31","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017"},{"key":"ref30","article-title":"Iterative reinforcement learning based design of dynamic locomotion skills for cassie","author":"xie","year":"2019"},{"key":"ref37","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","volume":"99","author":"sutton","year":"1999","journal-title":"Adv Neural Inf Process Syst"},{"key":"ref36","article-title":"Reinforcement learning and optimal control","author":"bertsekas","year":"2019","journal-title":"Belmont MA Athena Scientific"},{"key":"ref35","first-page":"47","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction ser Adaptive Computation and Machine Learning series"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2972879"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2018.8625025"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.21105\/joss.00500"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8594277"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2018.8625045"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2001.973365"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.3389\/frobt.2015.00006"},{"key":"ref15","article-title":"A unified view of the equations of motion used for control design of humanoid robots","author":"traversaro","year":"2017"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICHR.2007.4813931"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2016.2629489"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2016.7803247"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TBME.1970.4502681"},{"key":"ref28","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"haarnoja","year":"2018","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2019.01.011"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aau5872"},{"key":"ref3","article-title":"An empirical investigation of the challenges of real-world reinforcement learning","author":"dulac-arnold","year":"2020"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2952353"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9197175"},{"key":"ref5","article-title":"Transfer from simulation to real world through learning deep inverse dynamics model","author":"christiano","year":"2016"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/s10514-015-9479-3"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2014.7041347"},{"key":"ref2","article-title":"Emergence of locomotion behaviours in rich environments","author":"heess","year":"2017"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2016.7803271"},{"key":"ref1","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2019"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/Humanoids.2011.6100894"},{"key":"ref22","first-page":"200","article-title":"Capture point: A step toward humanoid push recovery","author":"pratt","year":"2006","journal-title":"Proc 6th IEEE-RAS Int Conf Humanoid Robots"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICHR.2006.321375"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2016.7759126"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2011.6048045"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2017.8246900"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1177\/0278364912452673"},{"key":"ref44","article-title":"Model-based reinforcement learning: A survey","author":"moerland","year":"2020"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2018.XIV.010"},{"key":"ref43","first-page":"3053","article-title":"RLlib: Abstractions for distributed reinforcement learning","author":"liang","year":"2018","journal-title":"Int Conf Mach Learn"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2012.6651601"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielam\/7083369\/9475905\/9420230-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/9475905\/09420230.pdf?arnumber=9420230","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,8]],"date-time":"2022-04-08T18:54:36Z","timestamp":1649444076000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9420230\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,10]]},"references-count":44,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/lra.2021.3076955","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,10]]}}}