{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T16:02:57Z","timestamp":1772553777630,"version":"3.50.1"},"reference-count":36,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2024,11]]},"DOI":"10.1109\/lra.2024.3417114","type":"journal-article","created":{"date-parts":[[2024,6,20]],"date-time":"2024-06-20T17:47:13Z","timestamp":1718905633000},"page":"9701-9708","source":"Crossref","is-referenced-by-count":5,"title":["Dream to Adapt: Meta Reinforcement Learning by Latent Context Imagination and MDP Imagination"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8197-8195","authenticated-orcid":false,"given":"Lu","family":"Wen","sequence":"first","affiliation":[{"name":"Mechanical Engineering, University of Michigan, Ann Arbor, MI, USA"}]},{"given":"Eric H.","family":"Tseng","sequence":"additional","affiliation":[{"name":"Department of Electrical Engineering, University of Texas at Arlington, Dearborn, MI, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7684-1696","authenticated-orcid":false,"given":"Huei","family":"Peng","sequence":"additional","affiliation":[{"name":"Mechanical Engineering, University of Michigan, Ann Arbor, MI, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3238-5406","authenticated-orcid":false,"given":"Songan","family":"Zhang","sequence":"additional","affiliation":[{"name":"Global Institute of Future Technology, Shanghai Jiao Tong University, Shanghai, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"5331","article-title":"Efficient off-policy meta-reinforcement learning via probabilistic context variables","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rakelly","year":"2019"},{"key":"ref2","author":"Brockman","year":"2016","journal-title":"OpenAI Gym"},{"key":"ref3","first-page":"1094","article-title":"Meta-world: A benchmark and evaluation for multi-task and meta reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","author":"Yu","year":"2020"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC48978.2021.9564972"},{"key":"ref5","first-page":"1126","article-title":"Model-agnostic meta-learning for fast adaptation of deep networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Finn","year":"2017"},{"key":"ref6","article-title":"On first-order meta-learning algorithms","author":"Nichol","year":"2018"},{"key":"ref7","article-title":"Learning to reinforcement learn","author":"Wang","year":"2016"},{"key":"ref8","article-title":"Rl$^{2}$: Fast reinforcement learning via slow reinforcement learning","author":"Duan","year":"2016"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/DICTA.2016.7797091"},{"key":"ref10","first-page":"7354","article-title":"Self-attention generative adversarial networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhang","year":"2019"},{"key":"ref11","first-page":"8152","article-title":"Data augmentation for meta-learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ni","year":"2021"},{"key":"ref12","first-page":"11887","article-title":"Improving generalization in meta-learning via task augmentation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Yao","year":"2021"},{"key":"ref13","article-title":"Dream to control: Learning behaviors by latent imagination","author":"Hafner","year":"2019"},{"key":"ref14","article-title":"Mastering atari with discrete world models","author":"Hafner","year":"2020"},{"key":"ref15","first-page":"27222","article-title":"Improving generalization in meta-rl with imaginary tasks from latent dynamics mixture","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Lee","year":"2021"},{"key":"ref16","first-page":"10161","article-title":"Model-based adversarial meta-reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Lin","year":"2020"},{"key":"ref17","article-title":"Meta-reinforcement learning robust to distributional shift via model identification and experience relabeling","author":"Mendonca","year":"2020"},{"key":"ref18","article-title":"Improving generalization in meta reinforcement learning using learned objectives","author":"Kirsch","year":"2019"},{"key":"ref19","article-title":"Meta-reinforcement learning in broad and non-parametric environments","author":"Bing","year":"2021"},{"key":"ref20","article-title":"Prior is all you need to improve the robustness and safety for the first time deployment of meta rl","author":"Wen","year":"2021"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1179"},{"key":"ref22","article-title":"Beta-vae: Learning basic visual concepts with a constrained variational framework","author":"Higgins","year":"2016"},{"key":"ref23","first-page":"2615","article-title":"Isolating sources of disentanglement in vaes","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst.","author":"Chen","year":"2018"},{"key":"ref24","article-title":"Disentanglement analysis with partial information decomposition","author":"Tokui","year":"2021"},{"key":"ref25","article-title":"Understanding disentangling in $beta$-vae","author":"Burgess","year":"2018"},{"key":"ref26","article-title":"Isolating sources of disentanglement in variational autoencoders","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Chen","year":"2018"},{"key":"ref27","article-title":"The role of disentanglement in generalisation","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Montero","year":"2020"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/IROS47612.2022.9981405"},{"key":"ref29","article-title":"Unsupervised meta-learning through latent-space interpolation in generative models","author":"Khodadadeh","year":"2020"},{"issue":"289","key":"ref30","first-page":"1","article-title":"VariBAD: Variational bayes-adaptive deep RL via meta-learning","volume":"22","author":"Zintgraf","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref31","first-page":"19","article-title":"Informed machine learningtowards a taxonomy of explicit integration of knowledge into machine learning","volume":"18","author":"Rueden","year":"2019","journal-title":"Learning"},{"key":"ref32","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja","year":"2018"},{"key":"ref33","article-title":"An environment for autonomous driving decision-making","author":"Leurent","year":"2018"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevE.62.1805"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1016\/j.trc.2004.12.003"},{"key":"ref36","first-page":"14809","article-title":"Physics-integrated variational autoencoders for robust and interpretable generative modeling","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Takeishi","year":"2021"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7083369\/10683798\/10565991.pdf?arnumber=10565991","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T17:30:51Z","timestamp":1727717451000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10565991\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11]]},"references-count":36,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/lra.2024.3417114","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11]]}}}