{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T22:01:40Z","timestamp":1770933700811,"version":"3.50.1"},"reference-count":89,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Cogn. Dev. Syst."],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1109\/tcds.2025.3565515","type":"journal-article","created":{"date-parts":[[2025,4,29]],"date-time":"2025-04-29T13:33:15Z","timestamp":1745933595000},"page":"113-127","source":"Crossref","is-referenced-by-count":0,"title":["Task-Agnostic Learning to Accomplish New Tasks"],"prefix":"10.1109","volume":"18","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0617-2965","authenticated-orcid":false,"given":"Xianqi","family":"Zhang","sequence":"first","affiliation":[{"name":"Faculty of Computing, Harbin Institute of Technologyy, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5763-2493","authenticated-orcid":false,"given":"Xingtao","family":"Wang","sequence":"additional","affiliation":[{"name":"Faculty of Computing, Harbin Institute of Technologyy, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-7258-8400","authenticated-orcid":false,"given":"Xu","family":"Liu","sequence":"additional","affiliation":[{"name":"Faculty of Computing, Harbin Institute of Technologyy, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-8289-5942","authenticated-orcid":false,"given":"Wenrui","family":"Wang","sequence":"additional","affiliation":[{"name":"Faculty of Computing, Harbin Institute of Technologyy, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9660-3636","authenticated-orcid":false,"given":"Xiaopeng","family":"Fan","sequence":"additional","affiliation":[{"name":"Faculty of Computing, Harbin Institute of Technologyy, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3434-9967","authenticated-orcid":false,"given":"Debin","family":"Zhao","sequence":"additional","affiliation":[{"name":"Faculty of Computing, Harbin Institute of Technologyy, Harbin, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/JSEN.2023.3249625"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/LSENS.2023.3277433"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/SENSORS52175.2022.9967001"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3306647"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3257680"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2022.3203752"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3207346"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3054912"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-021-09997-9"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2929141"},{"key":"ref12","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3084685"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9562061"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2023.3284399"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3236884"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561388"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9562061"},{"key":"ref19","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref20","first-page":"131","article-title":"Inverse reward design","volume":"30","author":"Hadfield-Menell","year":"2017","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref21","first-page":"1234","article-title":"Assisted robust reward design","volume-title":"Proc. Conf. Robot Learn","author":"Z","year":"2022"},{"key":"ref22","first-page":"1179","article-title":"Conservative q-learning for offline reinforcement learning","volume":"33","author":"Kumar","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref23","first-page":"28954","article-title":"COMBO: Conservative offline model-based policy optimization","volume":"34","author":"Yu","year":"2021","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1561\/2300000053"},{"key":"ref25","first-page":"49","article-title":"Guided cost learning: Deep inverse optimal control via policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Finn","year":"2016"},{"key":"ref26","article-title":"Generative adversarial imitation learning","volume":"29","author":"Ho","year":"2016","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/687"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3357847"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3367329"},{"key":"ref30","article-title":"A survey of exploration methods in reinforcement learning","author":"Amin","year":"2021"},{"key":"ref31","first-page":"4870","article-title":"Reward-free exploration for reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Jin","year":"2020"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i10.17091"},{"key":"ref33","first-page":"11734","article-title":"Task-agnostic exploration in reinforcement learning","volume":"33","author":"Zhang","year":"2020","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref34","first-page":"20516","article-title":"Interesting object, curious agent: Learning task-agnostic exploration","volume":"34","author":"Parisi","year":"2021","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref35","first-page":"7207","article-title":"Goal-aware prediction: Learning to model what matters","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Nair","year":"2020"},{"key":"ref36","first-page":"131","article-title":"Learning with AMIGo: Adversarially motivated intrinsic goals","volume":"34","author":"Campero","year":"2020","journal-title":"Int. Conf. Learn. Represent."},{"key":"ref37","first-page":"24379","article-title":"Discovering and achieving goals via world models","volume":"34","author":"Mendonca","year":"2021","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/770"},{"key":"ref39","article-title":"Walk the random walk: Learning to discover and reach goals without supervision","author":"Mezghani","year":"2022","journal-title":"presented at the ICLR Workshop Agent Learn. Open-Endedness"},{"key":"ref40","article-title":"Minimalistic gridworld environment for openai gym","author":"Chevalier-Boisvert","year":"2018"},{"key":"ref41","first-page":"1025","article-title":"Relay policy learning: Solving long-horizon tasks via imitation and reinforcement learning","volume-title":"Proc. Conf. Robot Learn","author":"Gupta","year":"2020"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.32657\/10356\/90191"},{"key":"ref44","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Fujimoto","year":"2019"},{"key":"ref45","first-page":"20118","article-title":"Explicable reward design for reinforcement learning agents","volume":"34","author":"Devidze","year":"2021","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref46","article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","author":"Levine","year":"2020"},{"key":"ref47","article-title":"A fine-grained analysis on distribution shift","author":"Wiles","year":"2021"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/882"},{"key":"ref49","article-title":"A connection between generative adversarial networks, inverse reinforcement learning, and energy-based models","author":"Finn","year":"2016"},{"key":"ref50","article-title":"Learning human behaviors from motion capture by adversarial imitation","author":"Merel","year":"2017"},{"key":"ref51","first-page":"6818","article-title":"Imitation learning from imperfect demonstration","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Wu","year":"2019"},{"key":"ref52","first-page":"4028","article-title":"IQ-Learn: Inverse soft-Q learning for imitation","volume":"34","author":"Garg","year":"2021","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3068912"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989324"},{"key":"ref55","first-page":"131","article-title":"Learning to poke by poking: Experiential learning of intuitive physics","volume":"29,pp","author":"Agrawal","year":"2016","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref56","first-page":"1755","article-title":"Imitating latent policies from observation","author":"Edwards","year":"2019","journal-title":"Int. Conf. Mach. Learn"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00030"},{"key":"ref58","first-page":"135","article-title":"Third-person visual imitation learning via decoupled hierarchical controller","volume":"32","author":"Sharma","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2805379"},{"key":"ref60","first-page":"125","article-title":"Feudal reinforcement learning","volume":"5","author":"Dayan","year":"1992","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref61","article-title":"Reinforcement learning with hierarchies of machines","volume":"10","author":"Parr","year":"1997","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1613\/jair.639"},{"key":"ref64","first-page":"3540","article-title":"Feudal networks for hierarchical reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Vezhnevets","year":"2017"},{"key":"ref65","first-page":"1113","article-title":"Learning latent plans from play","volume-title":"Proc. Conf. Robot Learn","author":"Lynch","year":"2020"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3079209"},{"key":"ref67","first-page":"1126","article-title":"Model-agnostic meta-learning for fast adaptation of deep networks","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Finn","year":"2017"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00445"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5752"},{"key":"ref70","article-title":"A survey of meta-reinforcement learning","author":"Beck","year":"2023"},{"key":"ref71","first-page":"7780","article-title":"Offline meta-reinforcement learning with advantage weighting","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Mitchell","year":"2021"},{"key":"ref72","first-page":"10161","article-title":"Model-based adversarial meta-reinforcement learning","volume":"33","author":"Lin","year":"2020","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref73","article-title":"Focal: Efficient fully-offline meta-reinforcement learning via distance metric learning and behavior regularization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Li","year":"2020"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2019.12.004"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.3233\/FAIA200193"},{"key":"ref76","first-page":"11588","article-title":"Look-ahead meta learning for continual learning","volume":"33","author":"Gupta","year":"2020","journal-title":"Adv. Neur. Inf. Process. Syst."},{"key":"ref77","first-page":"123","article-title":"A definition of continual reinforcement learning","volume":"36","author":"Abel","year":"2024","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.13673"},{"key":"ref79","first-page":"89","article-title":"Task-agnostic continual reinforcement learning: Gaining insights and overcoming challenges","author":"Caccia","year":"2023","journal-title":"Conf. Lifelong Learn. Agents"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.70"},{"key":"ref81","article-title":"Exploration by random network distillation","author":"Burda","year":"2018"},{"key":"ref82","article-title":"Never give up: Learning directed exploration strategies","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Badia","year":"2019"},{"key":"ref83","first-page":"8583","article-title":"Planning to explore via self-supervised world models","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Sekar","year":"2020"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2021\/577"},{"key":"ref85","article-title":"Pybullet, a python module for physics simulation for games, robotics and machine learning","author":"Coumans","year":"2016"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.11164"},{"key":"ref87","article-title":"Td-mpc2: Scalable, robust world models for continuous control","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Hansen","year":"2024"},{"key":"ref88","first-page":"121","article-title":"Meta-DT: Offline meta-RL as conditional sequence modeling with world model disentanglement","author":"Wang","year":"2024","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref89","article-title":"Improving language understanding by generative pre-training","author":"Radford"}],"container-title":["IEEE Transactions on Cognitive and Developmental Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7274989\/11392775\/10979777.pdf?arnumber=10979777","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T21:01:35Z","timestamp":1770930095000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10979777\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2]]},"references-count":89,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/tcds.2025.3565515","relation":{},"ISSN":["2379-8920","2379-8939"],"issn-type":[{"value":"2379-8920","type":"print"},{"value":"2379-8939","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,2]]}}}