{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T01:00:19Z","timestamp":1775523619841,"version":"3.50.1"},"reference-count":56,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Royal Academy of Engineering under the Industrial Fellowship Programme","award":["IF2223-199"],"award-info":[{"award-number":["IF2223-199"]}]},{"name":"China Scholarship Council (CSC) for providing the living stipend for his Ph.D. Programme","award":["202008230171"],"award-info":[{"award-number":["202008230171"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Automat. Sci. Eng."],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1109\/tase.2023.3312237","type":"journal-article","created":{"date-parts":[[2023,9,12]],"date-time":"2023-09-12T17:40:28Z","timestamp":1694540428000},"page":"5457-5472","source":"Crossref","is-referenced-by-count":19,"title":["Efficient Hierarchical Reinforcement Learning for Mapless Navigation With Predictive Neighbouring Space Scoring"],"prefix":"10.1109","volume":"21","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-0094-2148","authenticated-orcid":false,"given":"Yan","family":"Gao","sequence":"first","affiliation":[{"name":"School of Engineering, Cardiff University, Cardiff, U.K"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5123-9861","authenticated-orcid":false,"given":"Jing","family":"Wu","sequence":"additional","affiliation":[{"name":"School of Computer Science and Informatics, Cardiff University, Cardiff, U.K"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7612-614X","authenticated-orcid":false,"given":"Xintong","family":"Yang","sequence":"additional","affiliation":[{"name":"School of Engineering, Cardiff University, Cardiff, U.K"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8968-9902","authenticated-orcid":false,"given":"Ze","family":"Ji","sequence":"additional","affiliation":[{"name":"School of Engineering, Cardiff University, Cardiff, U.K"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.dt.2019.04.011"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-control-042920-092451"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2016.08.001"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8593772"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202134"},{"key":"ref6","article-title":"Reinforcement learning based mapless robot navigation","author":"Xie","year":"2019"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9196739"},{"key":"ref8","article-title":"Curiosity-driven exploration for mapless navigation with deep reinforcement learning","author":"Zhelo","year":"2018","journal-title":"arXiv:1804.00456"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1177\/1729881421992621"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989381"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ROBIO.2018.8664803"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561151"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.3390\/s19071576"},{"key":"ref14","first-page":"1","article-title":"Hierarchical reinforcement learning for robot navigation","volume-title":"Proc. Eur. Symp. Artif. Neural Netw., Comput. Intell. Mach. Learn. (ESANN)","author":"Bischoff"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1038\/nn.4656"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.3389\/frobt.2019.00123"},{"key":"ref17","article-title":"Hierarchical reinforcement learning with abductive planning","author":"Yamamoto","year":"2018","journal-title":"arXiv:1806.10792"},{"key":"ref18","article-title":"Learning multi-level hierarchies with hindsight","author":"Levy","year":"2017","journal-title":"arXiv:1712.00948"},{"key":"ref19","first-page":"3303","article-title":"Data-efficient hierarchical reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Nachum"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636667"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/MRA.2006.1638022"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TASE.2019.2894748"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/tase.2022.3198801"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/MACE.2011.5987118"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TASE.2020.3010887"},{"key":"ref26","first-page":"1312","article-title":"Universal value function approximators","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schaul"},{"key":"ref27","first-page":"5049","article-title":"Hindsight experience replay","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Andrychowicz"},{"key":"ref28","first-page":"7207","article-title":"Goal-aware prediction: Learning to model what matters","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Nair"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2021\/480"},{"key":"ref30","article-title":"ACTRCE: Augmenting experience via teacher\u2019s advice for multi-goal reinforcement learning","author":"Chan","year":"2019","journal-title":"arXiv:1902.04546"},{"key":"ref31","article-title":"Reward-conditioned policies","author":"Kumar","year":"2019","journal-title":"arXiv:1912.13465"},{"key":"ref32","article-title":"Training agents using upside-down reinforcement learning","author":"Srivastava","year":"2019","journal-title":"arXiv:1912.02877"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3059912"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10916"},{"key":"ref35","first-page":"3540","article-title":"Feudal networks for hierarchical reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Vezhnevets"},{"key":"ref36","first-page":"1025","article-title":"Relay policy learning: Solving long-horizon tasks via imitation and reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","author":"Gupta"},{"key":"ref37","article-title":"Hierarchical foresight: Self-supervised learning of long-horizon tasks via visual subgoal generation","author":"Nair","year":"2019","journal-title":"arXiv:1909.05829"},{"key":"ref38","first-page":"5020","article-title":"Sub-goal trees a framework for goal-based reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jurgenson"},{"key":"ref39","article-title":"Divide-and-conquer Monte Carlo tree search for goal-directed planning","author":"Parascandolo","year":"2020","journal-title":"arXiv:2004.11410"},{"key":"ref40","article-title":"Dynamics-aware unsupervised discovery of skills","author":"Sharma","year":"2019","journal-title":"arXiv:1907.01657"},{"key":"ref41","first-page":"15167","article-title":"Search on the replay buffer: Bridging planning and reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Eysenbach"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2021.3072675"},{"key":"ref43","article-title":"DD-PPO: Learning near-perfect PointGoal navigators from 2.5 billion frames","author":"Wijmans","year":"2019","journal-title":"arXiv:1911.00357"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR48806.2021.9413026"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref46","article-title":"Continuous control with deep reinforcement learning","volume-title":"Proc. 4th Int. Conf. Learn. Represent. (ICLR)","author":"Lillicrap"},{"key":"ref47","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58558-7_24"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.3013848"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00945"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref53","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto"},{"key":"ref54","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref55","article-title":"Model-based reinforcement learning for Atari","author":"Kaiser","year":"2019","journal-title":"arXiv:1903.00374"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2019.2899365"}],"container-title":["IEEE Transactions on Automation Science and Engineering"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8856\/10718656\/10248030.pdf?arnumber=10248030","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,21]],"date-time":"2024-10-21T17:27:46Z","timestamp":1729531666000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10248030\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10]]},"references-count":56,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tase.2023.3312237","relation":{},"ISSN":["1545-5955","1558-3783"],"issn-type":[{"value":"1545-5955","type":"print"},{"value":"1558-3783","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10]]}}}