{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T16:38:12Z","timestamp":1757608692913,"version":"3.44.0"},"reference-count":42,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000006","name":"ONR","doi-asserted-by":"publisher","award":["N00014-24-1-2024"],"award-info":[{"award-number":["N00014-24-1-2024"]}],"id":[{"id":"10.13039\/100000006","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,5,19]]},"DOI":"10.1109\/icra55743.2025.11128650","type":"proceedings-article","created":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T17:28:56Z","timestamp":1756834136000},"page":"12557-12564","source":"Crossref","is-referenced-by-count":0,"title":["Curiosity-Driven Imagination: Discovering Plan Operators and Learning Associated Policies for Open-World Adaptation"],"prefix":"10.1109","author":[{"given":"Pierrick","family":"Lorang","sequence":"first","affiliation":[{"name":"Tufts University,Medford,MA,USA,02155"}]},{"given":"Hong","family":"Lu","sequence":"additional","affiliation":[{"name":"Tufts University,Medford,MA,USA,02155"}]},{"given":"Matthias","family":"Scheutz","sequence":"additional","affiliation":[{"name":"Tufts University,Medford,MA,USA,02155"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2024.104111"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/DEVLRN.2019.8850711"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICDL53763.2022.9962230"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/675"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3308061"},{"key":"ref6","first-page":"701","article-title":"Learning neuro-symbolic skills for bilevel planning","volume-title":"Proceedings of The 6th Conference on Robot Learning","author":"Silver"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/icaps.v31i1.16001"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1609\/icaps.v30i1.6750"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TAI.2023.3311428"},{"key":"ref10","first-page":"508","article-title":"Adapting to the \u201copen world\u201d: The utility of hybrid hierarchical reinforcement learning and symbolic planning","volume-title":"2024 IEEE International Conference on Robotics and Automation (ICRA)","author":"Lorang"},{"volume-title":"N euro-symbolic world models for adapting to open world novelty","year":"2023","author":"Balloch","key":"ref11"},{"journal-title":"Leveraging approxi-mate symbolic models for reinforcement learning via skill diversity","author":"Guan","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/icaps.v32i1.19846"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.12440"},{"key":"ref15","first-page":"2778","article-title":"Curiosity-driven exploration by self-supervised prediction","volume-title":"Proceedings of the 34th International Conference on Machine Learning, ser. Proceedings of Machine Learning Research","volume":"70","author":"Pathak"},{"key":"ref16","first-page":"6065","article-title":"Ltl and beyond: Formal languages for reward function specification in reinforcement learning","volume-title":"Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence","author":"Camacho"},{"key":"ref17","article-title":"A novelty-centric agent architecture for changing worlds","author":"Muhammad","year":"2021","journal-title":"AAMAS"},{"key":"ref18","article-title":"Spotter: Extending symbolic planning operators through targeted reinforcement learning","author":"Sarathy","year":"2021","journal-title":"AAMAS"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1002\/aaai.12087"},{"journal-title":"Trajectory-wise multiple choice learning for dynamics generalization in reinforcement learning","year":"2020","author":"Seo","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i9.21223"},{"key":"ref22","article-title":"Single-life reinforcement learning","author":"Chen","year":"2022","journal-title":"NeurIPS"},{"volume-title":"A framework for following temporal logic instructions with unknown causal dependencies","year":"2022","author":"Xu","key":"ref23"},{"key":"ref24","article-title":"Speeding-up continual learning through information gains in novel experiences","author":"Lorang","year":"2022","journal-title":"4th Planning and Reinforcement Learning (PRL) Workshop at IJCAI-2022"},{"key":"ref25","doi-asserted-by":"crossref","DOI":"10.1109\/IROS58592.2024.10801627","article-title":"A framework for neurosymbolic goal-conditioned continual learning in open world environments","volume-title":"Proceedings of the International Conference on Intelligent Robots and Systems","author":"Lorang"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1017\/S0269888918000188"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1613\/jair.5575"},{"key":"ref28","first-page":"5410","article-title":"Learning constraint-based planning models from demonstrations","volume-title":"2020 IEEEIRSJ International Conference on Intelligent Robots and Systems (IROS)","author":"Loula"},{"key":"ref29","first-page":"3182","article-title":"Learning symbolic operators for task and motion planning","volume-title":"2021 IEEEIRSJ International Conference on Intelligent Robots and Systems (IROS)","author":"Silver"},{"key":"ref30","doi-asserted-by":"crossref","DOI":"10.1109\/IROS47612.2022.9981440","volume-title":"Learning neuro-symbolic relational transition models for bilevel planning","author":"Chitnis","year":"2022"},{"key":"ref31","article-title":"Dream architecture: a developmental approach to open-ended learning in robotics","author":"Doncieux","year":"2020","journal-title":"arXiv preprint"},{"issue":"13","key":"ref32","first-page":"11782","article-title":"Glib: Efficient exploration for relational model-based reinforcement learning via goal-literal babbling","volume-title":"Proceedings of the AAAI Conference on Artificial Intelligence","volume":"35","author":"Chitnis"},{"key":"ref33","first-page":"2238","article-title":"Flex-ible and efficient long-range planning through curious exploration","volume-title":"International Conference on Machine Learning","author":"Curtis"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-47546-7_9"},{"journal-title":"Learning to utilize shaping rewards: A new approach of reward shaping","year":"2020","author":"Hu","key":"ref35"},{"key":"ref36","first-page":"111","article-title":"Learning intrinsic rewards as a bi-level optimization problem","volume-title":"Proceedings of the 36th Conference on Uncertainty in Artificial Intelligence (UAI)","author":"Stadie"},{"key":"ref37","doi-asserted-by":"crossref","DOI":"10.1109\/IROS.2017.8206234","volume-title":"Reinforcement learning with temporal logic rewards","author":"Li","year":"2017"},{"key":"ref38","article-title":"Environment-independent task specifications via gltl","volume-title":"ArXiv","author":"Littman","year":"2017"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.14063"},{"journal-title":"Pddl","year":"1998","author":"McDermott","key":"ref40"},{"journal-title":"robosuite: A modular simulation framework and benchmark for robot learning","year":"2020","author":"Zhu","key":"ref41"},{"key":"ref42","first-page":"767","article-title":"Surreal: Open-source reinforcement learning framework and robot manipulation benchmark","volume-title":"Proceedings of The 2nd Conference on Robot Learning, ser. Proceedings of Machine Learning Research","volume":"87","author":"Fan"}],"event":{"name":"2025 IEEE International Conference on Robotics and Automation (ICRA)","start":{"date-parts":[[2025,5,19]]},"location":"Atlanta, GA, USA","end":{"date-parts":[[2025,5,23]]}},"container-title":["2025 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11127273\/11127223\/11128650.pdf?arnumber=11128650","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T06:12:32Z","timestamp":1756879952000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11128650\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,19]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/icra55743.2025.11128650","relation":{},"subject":[],"published":{"date-parts":[[2025,5,19]]}}}