{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T16:16:12Z","timestamp":1774455372214,"version":"3.50.1"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icra40945.2020.9196749","type":"proceedings-article","created":{"date-parts":[[2020,9,15]],"date-time":"2020-09-15T21:25:46Z","timestamp":1600205146000},"page":"754-760","source":"Crossref","is-referenced-by-count":20,"title":["Knowledge-Guided Reinforcement Learning Control for Robotic Lower Limb Prosthesis"],"prefix":"10.1109","author":[{"given":"Xiang","family":"Gao","sequence":"first","affiliation":[]},{"given":"Jennie","family":"Si","sequence":"additional","affiliation":[]},{"given":"Yue","family":"Wen","sequence":"additional","affiliation":[]},{"given":"Minhan","family":"Li","sequence":"additional","affiliation":[]},{"given":"He Helen","family":"Huang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"crossref","first-page":"2215","DOI":"10.1109\/TNNLS.2016.2584559","article-title":"A New Powered Lower Limb Prosthesis Control Framework Based on Adaptive Dynamic Programming","volume":"28","author":"wen","year":"2017","journal-title":"IEEE Trans Neural Networks Learn Syst"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2019.2890974"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794212"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-13-7983-3_49"},{"key":"ref14","first-page":"1633","article-title":"Transfer Learning for Reinforcement Learning Domains : A Survey","volume":"10","author":"taylor","year":"2009","journal-title":"J Mach Learn Res"},{"key":"ref15","article-title":"Successor features for transfer in reinforcement learning","author":"barreto","year":"2017","journal-title":"Adv Neural Inf Process Syst"},{"key":"ref16","article-title":"Effective control knowledge transfer through learning skill and representation hierarchies","author":"asadi","year":"2007","journal-title":"Inter Joint Conf Artif Intell IJCAI-83"},{"key":"ref17","article-title":"Schema networks: Zero-shot transfer with a generative causal model of physics intuitive","author":"kansky","year":"2017","journal-title":"34th Int Conf Mach Learn ICML 2017"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8462977"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1002\/jor.1100080310"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2018.2794536"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TNSRE.2018.2810165"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1177\/0278364907084588"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TBME.2011.2160173"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8463162"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/s10846-013-9979-3"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2008.03.006"},{"key":"ref1","doi-asserted-by":"crossref","DOI":"10.1109\/TRO.2008.2008747","article-title":"Powered ankle-foot prosthesis improves walking metabolic economy","author":"au","year":"2009","journal-title":"IEEE Trans Robot"},{"key":"ref9","article-title":"Trajectory-based Deep Latent Policy Gradient for Learning Locomotion Behaviors","author":"choi","year":"2019","journal-title":"IEEE Int Conf Robot Autom"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/72.914523"},{"key":"ref22","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref21","author":"bellman","year":"1957","journal-title":"Dynamic Programming"},{"key":"ref24","article-title":"From the Ground Up: Building a Passive Dynamic Walker Model","author":"hicks","year":"2014"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/9780470544785"}],"event":{"name":"2020 IEEE International Conference on Robotics and Automation (ICRA)","location":"Paris, France","start":{"date-parts":[[2020,5,31]]},"end":{"date-parts":[[2020,8,31]]}},"container-title":["2020 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9187508\/9196508\/09196749.pdf?arnumber=9196749","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,18]],"date-time":"2022-11-18T14:03:07Z","timestamp":1668780187000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9196749\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/icra40945.2020.9196749","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}