{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,9]],"date-time":"2025-11-09T03:54:51Z","timestamp":1762660491233,"version":"3.44.0"},"reference-count":37,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"9","license":[{"start":{"date-parts":[[2024,9,1]],"date-time":"2024-09-01T00:00:00Z","timestamp":1725148800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,9,1]],"date-time":"2024-09-01T00:00:00Z","timestamp":1725148800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,9,1]],"date-time":"2024-09-01T00:00:00Z","timestamp":1725148800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100018537","name":"National Science and Technology Major Project","doi-asserted-by":"publisher","award":["2021ZD0112002"],"award-info":[{"award-number":["2021ZD0112002"]}],"id":[{"id":"10.13039\/501100018537","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U22A2057","61991411","62076148"],"award-info":[{"award-number":["U22A2057","61991411","62076148"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Shandong Excellent Young Scientists Fund Program","award":["2022HWYQ-042"],"award-info":[{"award-number":["2022HWYQ-042"]}]},{"name":"Young Taishan Scholars Program of Shandong Province","award":["tsqn201909029"],"award-info":[{"award-number":["tsqn201909029"]}]},{"name":"Project for Self-Developed Innovation Team of Jinan City","award":["2021GXRC038"],"award-info":[{"award-number":["2021GXRC038"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2024,9]]},"DOI":"10.1109\/lra.2024.3418310","type":"journal-article","created":{"date-parts":[[2024,6,24]],"date-time":"2024-06-24T16:10:31Z","timestamp":1719245431000},"page":"8138-8145","source":"Crossref","is-referenced-by-count":2,"title":["MCLER: Multi-Critic Continual Learning With Experience Replay for Quadruped Gait Generation"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-5365-2826","authenticated-orcid":false,"given":"Maoqi","family":"Liu","sequence":"first","affiliation":[{"name":"School of Control Science and Engineering, Shandong University, Jinan, China"}]},{"given":"Yanyun","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Control Science and Engineering, Shandong University, Jinan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1344-4415","authenticated-orcid":false,"given":"Ran","family":"Song","sequence":"additional","affiliation":[{"name":"School of Control Science and Engineering, Shandong University, Jinan, China"}]},{"given":"Longyue","family":"Qian","sequence":"additional","affiliation":[{"name":"School of Control Science and Engineering, Shandong University, Jinan, China"}]},{"given":"Xing","family":"Fang","sequence":"additional","affiliation":[{"name":"School of Control Science and Engineering, Shandong University, Jinan, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-4197-142X","authenticated-orcid":false,"given":"Wenhao","family":"Tan","sequence":"additional","affiliation":[{"name":"School of Control Science and Engineering, Shandong University, Jinan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5906-5074","authenticated-orcid":false,"given":"Yibin","family":"Li","sequence":"additional","affiliation":[{"name":"School of Control Science and Engineering, Shandong University, Jinan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4960-3190","authenticated-orcid":false,"given":"Wei","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Control Science and Engineering, Shandong University, Jinan, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1126\/science.150.3697.701"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1111\/j.1469-7998.1978.tb03334.x"},{"key":"ref3","first-page":"235","article-title":"Speed control in animal locomotion: Transitions between symmetrical and nonsymmetrical gaits in the dog","volume":"43","author":"Afelt","year":"1983","journal-title":"Acta Neurobiol Exp"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/292239a0"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abc5986"},{"key":"ref6","first-page":"928","article-title":"Minimizing energy consumption leads to the emergence of gaits in legged robots","volume-title":"Proc. Conf. Robot Learn.","author":"Fu","year":"2022"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2022.3186804"},{"key":"ref8","article-title":"RMA: Rapid motor adaptation for legged robots","volume-title":"Proc. Conf. Robot., Sci. Syst.","author":"Kumar","year":"2022"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3177289"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8594448"},{"article-title":"Highly dynamic quadruped locomotion via whole-body impulse control and model predictive control","year":"2019","author":"Kim","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aau5872"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2019.xv.011"},{"key":"ref14","first-page":"317","article-title":"Learning locomotion skills for cassie: Iterative design and sim-to-real","volume-title":"Proc. Conf. Robot Learn.","author":"Xie"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2019.12.004"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abb2174"},{"article-title":"Robust recovery controller for a quadrupedal robot using deep reinforcement learning","year":"2019","author":"Lee","key":"ref17"},{"article-title":"Imitate and repurpose: Learning reusable robot movement skills from human and animal behaviors","year":"2022","author":"Bohez","key":"ref18"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2019.01.012"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/j.tics.2020.09.004"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2773081"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.81"},{"article-title":"Never stop learning: The effectiveness of fine-tuning in robotic reinforcement learning","year":"2020","author":"Julian","key":"ref23"},{"key":"ref24","article-title":"Continual reinforcement learning deployed in real-life using policy distillation and sim2real transfer","volume-title":"Proc. Conf. Mach. Learn. Workshop","author":"Traor","year":"2019"},{"key":"ref25","first-page":"165","article-title":"Multi-critic actor learning: Teaching RL policies to act with style","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Mysore","year":"2021"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/S0921-8890(97)00018-3"},{"key":"ref27","first-page":"2037","article-title":"Adaptive dynamic walking of a quadruped robot Tekkenon irregular terrain using a neural system model","volume-title":"Proc. IEEE Int. Conf. Robot. Automat.","volume":"2","author":"Fukuoka","year":"2003"},{"volume-title":"Reinforcement Learning: An Introduction","year":"2018","author":"Sutton","key":"ref28"},{"key":"ref29","first-page":"91","article-title":"Learning to walk in minutes using massively parallel deep reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","author":"Rudin","year":"2022"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2018.XIV.010"},{"key":"ref31","first-page":"916","article-title":"Policies modulating trajectory generators","volume-title":"Proc. Conf. Robot Learn.","author":"Iscen","year":"2018"},{"key":"ref32","first-page":"773","article-title":"Fast and efficient locomotion via learned gait transitions","volume-title":"Proc. Conf. Robot Learn.","author":"Yang","year":"2022"},{"article-title":"Towards continual reinforcement learning for quadruped robots","year":"2023","author":"Minelli","key":"ref33"},{"article-title":"Proximal policy optimization algorithms","year":"2017","author":"Schulman","key":"ref34"},{"key":"ref35","first-page":"15885","article-title":"Few-shot image generation with elastic weight consolidation","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Li","year":"2020"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11595"},{"article-title":"Pybullet, a python module for physics simulation for games, robotics and machine learning","year":"20162019","author":"Coumans","key":"ref37"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7083369\/10601335\/10568997.pdf?arnumber=10568997","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,18]],"date-time":"2025-08-18T19:47:18Z","timestamp":1755546438000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10568997\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9]]},"references-count":37,"journal-issue":{"issue":"9"},"URL":"https:\/\/doi.org\/10.1109\/lra.2024.3418310","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"type":"electronic","value":"2377-3766"},{"type":"electronic","value":"2377-3774"}],"subject":[],"published":{"date-parts":[[2024,9]]}}}