{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:19:46Z","timestamp":1776885586801,"version":"3.51.2"},"reference-count":44,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2021,7,1]],"date-time":"2021-07-01T00:00:00Z","timestamp":1625097600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,7,1]],"date-time":"2021-07-01T00:00:00Z","timestamp":1625097600000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,7,1]],"date-time":"2021-07-01T00:00:00Z","timestamp":1625097600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,7,1]],"date-time":"2021-07-01T00:00:00Z","timestamp":1625097600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"NSF","award":["#1750082"],"award-info":[{"award-number":["#1750082"]}]},{"name":"Samsung Research"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2021,7]]},"DOI":"10.1109\/lra.2021.3068906","type":"journal-article","created":{"date-parts":[[2021,3,25]],"date-time":"2021-03-25T19:58:14Z","timestamp":1616702294000},"page":"4425-4432","source":"Crossref","is-referenced-by-count":19,"title":["Efficient Robotic Object Search Via HIEM: Hierarchical Policy Learning With Intrinsic-Extrinsic Modeling"],"prefix":"10.1109","volume":"6","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1962-9031","authenticated-orcid":false,"given":"Xin","family":"Ye","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0126-8976","authenticated-orcid":false,"given":"Yezhou","family":"Yang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00387"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00286"},{"key":"ref33","article-title":"From seeing to moving: A survey on learning for visual indoor navigation (vin)","author":"ye","year":"2020"},{"key":"ref32","article-title":"Hierarchical reinforcement learning via advantage-weighted information maximization","author":"osa","year":"0","journal-title":"Proc Int Conf on Learn Representations"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00430"},{"key":"ref30","first-page":"53","article-title":"Neural modular control for embodied question answering","author":"das","year":"0","journal-title":"Proc Conf on Robot Learn"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2967677"},{"key":"ref36","article-title":"Visual semantic navigation using scene priors","author":"yang","year":"0","journal-title":"Proc Int Conf on Learn Representations"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2019.2930426"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ECMR.2019.8870964"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202312"},{"key":"ref40","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8593871"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015430"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793493"},{"key":"ref15","first-page":"37","article-title":"Look before you leap: Bridging model-free and model-based reinforcement learning for planned-ahead vision-and-language navigation","author":"wang","year":"0","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00679"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8593720"},{"key":"ref18","article-title":"Why does hierarchy (sometimes) work so well in reinforcement learning","author":"nachum","year":"2019"},{"key":"ref19","first-page":"3675","article-title":"Hierarchical deep reinforcement learning: Integrating temporal abstraction and intrinsic motivation","author":"kulkarni","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref28","first-page":"166","article-title":"Modular multitask reinforcement learning with policy sketches","author":"andreas","year":"0","journal-title":"Proc 34th Int Conf Mach Learn -Volume\ufffd70"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2743240"},{"key":"ref27","article-title":"Hierarchical actor-critic","author":"levy","year":"2017"},{"key":"ref3","article-title":"Benchmarking classic and learned navigation in complex 3d environments","author":"mishkin","year":"2019"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989381"},{"key":"ref29","first-page":"7156","article-title":"Hierarchical reinforcement learning for zero-shot generalization with subtask dependencies","author":"sohn","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref5","article-title":"Learning to navigate in complex environments","author":"mirowski","year":"0","journal-title":"Proc Int Conf on Learn Representations"},{"key":"ref8","article-title":"Data-efficient deep reinforcement learning for dexterous manipulation","author":"popov","year":"2017"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989385"},{"key":"ref2","article-title":"To learn or not to learn: Analyzing the role of learning for navigation in virtual environments","author":"kojima","year":"2019"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2018.XIV.049"},{"key":"ref1","first-page":"2054","article-title":"Embodied question answering","author":"das","year":"0","journal-title":"Proc IEEE Conf Comp Vis Pattern Recognit"},{"key":"ref20","first-page":"2917","article-title":"Hierarchical imitation and reinforcement learning","author":"le","year":"0","journal-title":"Proc IEEE Intern Conf on Machine Learning"},{"key":"ref22","first-page":"3307","article-title":"Data-efficient hierarchical reinforcement learning","author":"nachum","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref21","article-title":"Hierarchical reinforcement learning with hindsight","author":"levy","year":"2018"},{"key":"ref42","first-page":"2094","article-title":"Deep reinforcement learning with double q-learning","author":"van hasselt","year":"0","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"ref24","article-title":"Hierarchical policy learning is sensitive to goal space design","author":"dwiel","year":"2019"},{"key":"ref41","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref23","article-title":"Near-optimal representation learning for hierarchical reinforcement learning","author":"nachum","year":"0","journal-title":"Proc Int Conf on Learn Representations"},{"key":"ref44","article-title":"Hierarchical and partially observable goal-driven policy learning with goals relational graph","author":"ye","year":"0","journal-title":"Proc IEEE Conf Comput Vis and Pattern Recog"},{"key":"ref26","article-title":"Building generalizable agents with a realistic and rich 3\ufffdd environment","author":"wu","year":"2018"},{"key":"ref43","article-title":"On evaluation of embodied navigation agents","author":"anderson","year":"2018"},{"key":"ref25","first-page":"1726","article-title":"The option-critic architecture","author":"bacon","year":"0","journal-title":"Proc 31st AAAI Conf Artif Intell"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielam\/7083369\/9399748\/9387146-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/9399748\/09387146.pdf?arnumber=9387146","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:54:02Z","timestamp":1652194442000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9387146\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,7]]},"references-count":44,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/lra.2021.3068906","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,7]]}}}