{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,6]],"date-time":"2025-11-06T11:44:17Z","timestamp":1762429457622,"version":"3.37.3"},"reference-count":28,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2021,4,1]],"date-time":"2021-04-01T00:00:00Z","timestamp":1617235200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,4,1]],"date-time":"2021-04-01T00:00:00Z","timestamp":1617235200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,4,1]],"date-time":"2021-04-01T00:00:00Z","timestamp":1617235200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2021,4]]},"DOI":"10.1109\/lra.2021.3062572","type":"journal-article","created":{"date-parts":[[2021,2,26]],"date-time":"2021-02-26T20:37:52Z","timestamp":1614371872000},"page":"3192-3199","source":"Crossref","is-referenced-by-count":17,"title":["Learning to Assist Drone Landings"],"prefix":"10.1109","volume":"6","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6898-4945","authenticated-orcid":false,"given":"Kal","family":"Backman","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4169-2141","authenticated-orcid":false,"given":"Dana","family":"Kulic","sequence":"additional","affiliation":[]},{"given":"Hoam","family":"Chung","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"9784","article-title":"Neural lander: Stable drone landing control using learned dynamics","author":"shi","year":"2018","journal-title":"Comput Res Repository"},{"doi-asserted-by":"publisher","key":"ref11","DOI":"10.1145\/2909824.3020252"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1177\/0278364918776060"},{"key":"ref13","first-page":"1121","article-title":"Telemanipulation assistance based on motion intention recognition","author":"yu","year":"0"},{"doi-asserted-by":"publisher","key":"ref14","DOI":"10.15607\/RSS.2018.XIV.005"},{"doi-asserted-by":"publisher","key":"ref15","DOI":"10.3390\/s150922003"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1109\/IROS.2017.8205962"},{"doi-asserted-by":"publisher","key":"ref17","DOI":"10.1109\/CVPR.2018.00017"},{"key":"ref18","article-title":"Auto-encoding variational bayes","author":"kingma","year":"2014","journal-title":"Int Conf Learn Representations"},{"key":"ref19","first-page":"1637","article-title":"Learning visuomotor policies for aerial navigation using cross-modal representations","author":"bonatti","year":"0","journal-title":"Proc IEEE\/RSJ Int Conf Intell Robots Syst"},{"year":"2011","author":"jeff","article-title":"A practical guide to the system usability scale (SUS)","key":"ref28"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.3390\/drones2040034"},{"key":"ref27","article-title":"SUS - A quick and dirty usability scale","author":"brooke","year":"2006","journal-title":"Usability Evaluation in Industry"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1109\/SSRR.2017.8088164"},{"key":"ref6","first-page":"176","article-title":"Shared control for intelligent wheelchairs: An implicit estimation of the user intention","author":"vanhooydonck","year":"0","journal-title":"Proc 1st Int Workshop Adv Serv Robot"},{"doi-asserted-by":"publisher","key":"ref5","DOI":"10.1109\/ICSMC.2003.1244504"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.1177\/0278364917690593"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.1109\/THMS.2017.2647882"},{"key":"ref2","article-title":"Autonomous quadrotor landing using deep reinforcement learning","author":"polvara","year":"2017","journal-title":"Comput Res Repository"},{"doi-asserted-by":"publisher","key":"ref9","DOI":"10.23919\/ChiCC.2017.8029145"},{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1155\/2017\/1823056"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1109\/LRA.2018.2795643"},{"doi-asserted-by":"publisher","key":"ref22","DOI":"10.1007\/978-3-319-67361-5_40"},{"doi-asserted-by":"publisher","key":"ref21","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref24","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"Int Conf Learn Representations"},{"key":"ref23","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume":"80","author":"fujimoto","year":"0","journal-title":"Proc 35th Int Conf Mach Learn"},{"doi-asserted-by":"publisher","key":"ref26","DOI":"10.1103\/PhysRev.36.823"},{"key":"ref25","first-page":"2613","article-title":"Double q-learning","volume":"23","author":"hasselt","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/9285111\/09364669.pdf?arnumber=9364669","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:54:28Z","timestamp":1652194468000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9364669\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,4]]},"references-count":28,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/lra.2021.3062572","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"type":"electronic","value":"2377-3766"},{"type":"electronic","value":"2377-3774"}],"subject":[],"published":{"date-parts":[[2021,4]]}}}