{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,13]],"date-time":"2025-12-13T23:09:36Z","timestamp":1765667376757,"version":"3.37.3"},"reference-count":29,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2022,7]]},"DOI":"10.1109\/lra.2022.3174258","type":"journal-article","created":{"date-parts":[[2022,5,11]],"date-time":"2022-05-11T20:01:20Z","timestamp":1652299280000},"page":"6590-6597","source":"Crossref","is-referenced-by-count":10,"title":["Developing Cooperative Policies for Multi-Stage Reinforcement Learning Tasks"],"prefix":"10.1109","volume":"7","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3932-6142","authenticated-orcid":false,"given":"Jordan","family":"Erskine","sequence":"first","affiliation":[{"name":"Queensland University of Technology, Brisbane, QLD, Australia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4230-8068","authenticated-orcid":false,"given":"Christopher","family":"Lehnert","sequence":"additional","affiliation":[{"name":"Queensland University of Technology, Brisbane, QLD, Australia"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Why does hierarchy (sometimes) work so well in reinforcement learning?","volume-title":"Proc. NeurIPS Deep RL Workshop","author":"Nachum","year":"2019"},{"key":"ref2","article-title":"Near-optimal representation learning for hierarchical reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Nachum","year":"2019"},{"key":"ref3","article-title":"MCP: Learning composable hierarchical control with multiplicative compositional policies","volume-title":"Adv. Neural Inf. Process. Syst.","volume":"32","author":"Peng","year":"2019"},{"key":"ref4","article-title":"Hierarchical reinforcement learning with advantage-based auxiliary rewards","volume-title":"Adv. Neural Inf. Process. Syst.","volume":"32","author":"Li","year":"2019"},{"key":"ref5","first-page":"11340","article-title":"Data-efficient hindsight off-policy option learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wulfmeier","year":"2021"},{"key":"ref6","first-page":"3540","article-title":"Feudal networks for hierarchical reinforcement learning","volume-title":"Proc. 34th Int. Conf. Mach. Learn.","volume":"70","author":"Vezhnevets","year":"2017"},{"key":"ref7","first-page":"3303","article-title":"Data-efficient hierarchical reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Nachum","year":"2018"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.12785\/ijcds\/040207"},{"volume-title":"Reinforcement Learning: An Introduction","year":"2018","author":"Sutton","key":"ref9"},{"key":"ref10","article-title":"Meta learning shared hierarchies","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Frans","year":"2018"},{"key":"ref11","first-page":"2961","article-title":"Actor-attention-critic for multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Iqbal","year":"2019"},{"key":"ref12","first-page":"2661","article-title":"Zero-shot task generalization with multi-task deep reinforcement learning","volume-title":"Proc. 34th Int. Conf. Mach. Learn.","volume":"70","author":"Oh","year":"2017"},{"key":"ref13","first-page":"2107","article-title":"Using reward machines for high-level task specification and decomposition in reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Icarte","year":"2018"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10916"},{"key":"ref15","article-title":"Hierarchical soft actor-critic: Adversarial exploration via mutual information optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Azarafrooz","year":"2019"},{"key":"ref16","article-title":"Relay policy learning: Solving long-horizon tasks via imitation and reinforcement learning","volume-title":"Proc. Conf. Robot. Learn.","author":"Gupta","year":"2019"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10744"},{"key":"ref18","first-page":"166","article-title":"Modular multitask reinforcement learning with policy sketches","volume-title":"Proc. 34th Int. Conf. Mach. Learn.","volume":"70","author":"Andreas","year":"2017"},{"key":"ref19","first-page":"1851","article-title":"Latent space policies for hierarchical reinforcement learning","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","volume":"80","author":"Haarnoja","year":"2018"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8968149"},{"key":"ref21","article-title":"Dynamics-aware unsupervised discovery of skills","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Sharma","year":"2020"},{"key":"ref22","article-title":"Intrinsically motivated goal exploration processes with automatic curriculum learning","volume-title":"J. Mach. Learn. Res.","author":"Forestier","year":"2022"},{"key":"ref23","article-title":"Stochastic neural networks for hierarchical reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Florensa","year":"2017"},{"key":"ref24","article-title":"Composing complex skills by learning transition policies","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lee","year":"2019"},{"article-title":"Adversarial skill chaining for long-horizon robot manipulation via terminal state regularization","year":"2021","author":"Lee","key":"ref25"},{"article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","year":"2018","author":"Haarnoja","key":"ref26"},{"key":"ref27","article-title":"Soft actor-critic algorithms and applications","volume-title":"CoRR","volume":"abs\/1812.05905","author":"Haarnoja","year":"2018"},{"key":"ref28","article-title":"RLkit"},{"key":"ref29","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schulman","year":"2015"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/9750005\/09772966.pdf?arnumber=9772966","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,22]],"date-time":"2024-01-22T22:01:40Z","timestamp":1705960900000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9772966\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7]]},"references-count":29,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/lra.2022.3174258","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"type":"electronic","value":"2377-3766"},{"type":"electronic","value":"2377-3774"}],"subject":[],"published":{"date-parts":[[2022,7]]}}}