{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,28]],"date-time":"2026-02-28T18:09:44Z","timestamp":1772302184732,"version":"3.50.1"},"reference-count":20,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2019,4,1]],"date-time":"2019-04-01T00:00:00Z","timestamp":1554076800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,4,1]],"date-time":"2019-04-01T00:00:00Z","timestamp":1554076800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,4,1]],"date-time":"2019-04-01T00:00:00Z","timestamp":1554076800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100008227","name":"Achievement Rewards for College Scientists Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100008227","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2019,4]]},"DOI":"10.1109\/lra.2019.2895892","type":"journal-article","created":{"date-parts":[[2019,1,29]],"date-time":"2019-01-29T20:07:04Z","timestamp":1548792424000},"page":"1387-1394","source":"Crossref","is-referenced-by-count":53,"title":["Rover-IRL: Inverse Reinforcement Learning With Soft Value Iteration Networks for Planetary Rover Path Planning"],"prefix":"10.1109","volume":"4","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0667-0518","authenticated-orcid":false,"given":"Max","family":"Pflueger","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5509-1841","authenticated-orcid":false,"given":"Ali","family":"Agha","sequence":"additional","affiliation":[]},{"given":"Gaurav S.","family":"Sukhatme","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"1107","article-title":"Least-squares policy iteration","volume":"4","author":"lagoudakis","year":"2003","journal-title":"J Mach Learn Res"},{"key":"ref11","first-page":"4697","article-title":"Qmdp-net: Deep learning for planning under partial observability","author":"karkus","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ADPRL.2007.368206"},{"key":"ref13","first-page":"3191","article-title":"The predictron: End-to-end learning and planning","volume":"70","author":"silver","year":"0","journal-title":"Proc 34th Int Conf Mach Learn"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1177\/0278364910369715"},{"key":"ref15","first-page":"1433","article-title":"Maximum entropy inverse reinforcement learning","volume":"8","author":"ziebart","year":"2008","journal-title":"Proc 23rd AAAI Conf Artif Intell"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2016.7759328"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/s10514-009-9121-3"},{"key":"ref18","article-title":"Maximum entropy deep inverse reinforcement learning","author":"wulfmeier","year":"0","journal-title":"Proc Neural Inf Process Syst Deep Reinforc Learn Workshop"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2015.2509024"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/MRA.2010.936946"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/700"},{"key":"ref6","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref5","first-page":"115","article-title":"Productivity challenges for mars rover operations","author":"gaines","year":"2016","journal-title":"Proc 4th Workshop Planning Robot"},{"key":"ref8","article-title":"High-dimensional continuous control using generalized advantage estimation","author":"schulman","year":"0","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref7","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"0","journal-title":"Proc Int Conf Learn Represent"},{"key":"ref2","author":"bertsekas","year":"1995","journal-title":"Dynamic Programming and Optimal Control"},{"key":"ref1","author":"bellman","year":"1957","journal-title":"Dynamic Programming"},{"key":"ref9","article-title":"Stochastic motion planning for hopping rovers on small solar system bodies","author":"hockman","year":"0","journal-title":"Proc Int Symp Robot Res"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/AERO.2016.7500597"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/8581687\/08629318.pdf?arnumber=8629318","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,13]],"date-time":"2022-07-13T20:58:54Z","timestamp":1657745934000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8629318\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,4]]},"references-count":20,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/lra.2019.2895892","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019,4]]}}}