{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,12]],"date-time":"2025-12-12T06:17:09Z","timestamp":1765520229626,"version":"3.48.0"},"reference-count":28,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iros60139.2025.11247146","type":"proceedings-article","created":{"date-parts":[[2025,11,27]],"date-time":"2025-11-27T18:54:45Z","timestamp":1764269685000},"page":"9753-9759","source":"Crossref","is-referenced-by-count":0,"title":["Mastering the Labyrinth Game: Efficient Multimodal Reinforcement Learning with Selective Reconstruction"],"prefix":"10.1109","author":[{"given":"Thomas","family":"Bi","sequence":"first","affiliation":[{"name":"ETH Zurich,Institute for Dynamic Systems and Control,Switzerland"}]},{"given":"Ethan","family":"Marot","sequence":"additional","affiliation":[{"name":"ETH Zurich,Institute for Dynamic Systems and Control,Switzerland"}]},{"given":"Aswin","family":"Ramachandran","sequence":"additional","affiliation":[{"name":"ETH Zurich,Institute for Dynamic Systems and Control,Switzerland"}]},{"given":"Raffaello","family":"D\u2019Andrea","sequence":"additional","affiliation":[{"name":"ETH Zurich,Institute for Dynamic Systems and Control,Switzerland"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10610577"},{"journal-title":"Mastering diverse domains through world models","year":"2023","author":"Hafner","key":"ref2"},{"journal-title":"Learning invariant representations for reinforcement learning without reconstruction","year":"2020","author":"Zhang","key":"ref3"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561384"},{"key":"ref5","first-page":"308","article-title":"Fully autonomous real-world reinforcement learning with applications to mobile manipulation","volume-title":"Conference on Robot Learning","author":"Sun"},{"article-title":"Path following using gain scheduled lqr control: with applications to a labyrinth game","year":"2020","author":"Frid","key":"ref6"},{"article-title":"Combining vision, machine learning and automatic control to play the labyrinth game","volume-title":"Swedish Symposium on Image Analysis","author":"\u00d6fj\u00e4ll","key":"ref7"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/BFb0036154"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CDC56724.2024.10886880"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-74565-5_32"},{"article-title":"The brio labyrinth game-a testbed for reinforcement learning and for studies on sensorimotor learning","volume-title":"Multidisciplinary Symposium on Reinforcement Learning","author":"Metzen","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.2174\/1573399812666160613113556"},{"journal-title":"An image is worth 16x16 words: Transformers for image recognition at scale","year":"2020","author":"Dosovitskiy","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/IROS58592.2024.10802719"},{"journal-title":"Combining reconstruction and contrastive methods for multimodal representations in rl","year":"2023","author":"Becker","key":"ref15"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2024.106347"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561187"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992699"},{"journal-title":"Prioritized experience replay","year":"2015","author":"Schaul","key":"ref19"},{"journal-title":"Distributed prioritized experience replay","year":"2018","author":"Horgan","key":"ref20"},{"journal-title":"Curious replay for model-based adaptation","year":"2023","author":"Kauvar","key":"ref21"},{"article-title":"Remember and forget for experience replay","volume-title":"International Conference on Machine Learning","author":"Novati","key":"ref22"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abm6074"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2006.282372"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-014-0725-5"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10611493"},{"journal-title":"Mastering visual continuous control: Improved data-augmented reinforcement learning","year":"2021","author":"Yarats","key":"ref27"},{"journal-title":"Soft actor-critic algorithms and applications","year":"2018","author":"Haarnoja","key":"ref28"}],"event":{"name":"2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","start":{"date-parts":[[2025,10,19]]},"location":"Hangzhou, China","end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11245651\/11245652\/11247146.pdf?arnumber=11247146","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,12]],"date-time":"2025-12-12T06:15:10Z","timestamp":1765520110000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11247146\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/iros60139.2025.11247146","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}