{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,18]],"date-time":"2026-01-18T13:36:40Z","timestamp":1768743400184,"version":"3.49.0"},"reference-count":35,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,5,13]],"date-time":"2024-05-13T00:00:00Z","timestamp":1715558400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,5,13]],"date-time":"2024-05-13T00:00:00Z","timestamp":1715558400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100000781","name":"European Research Council","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100000781","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,5,13]]},"DOI":"10.1109\/icra57147.2024.10610528","type":"proceedings-article","created":{"date-parts":[[2024,8,8]],"date-time":"2024-08-08T17:51:05Z","timestamp":1723139465000},"page":"2866-2872","source":"Crossref","is-referenced-by-count":9,"title":["Contrastive Initial State Buffer for Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Nico","family":"Messikommer","sequence":"first","affiliation":[{"name":"University of Zurich,Robotics and Perception Group, Department of Informatics,Switzerland"}]},{"given":"Yunlong","family":"Song","sequence":"additional","affiliation":[{"name":"University of Zurich,Robotics and Perception Group, Department of Informatics,Switzerland"}]},{"given":"Davide","family":"Scaramuzza","sequence":"additional","affiliation":[{"name":"University of Zurich,Robotics and Perception Group, Department of Informatics,Switzerland"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2743240"},{"key":"ref2","article-title":"Deep reinforcement learning in a handful of trials using probabilistic dynamics models","volume":"31","author":"Chua","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref3","article-title":"Prioritized experience replay","author":"Schaul","year":"2015"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992699"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref6","article-title":"Hindsight experience replay","volume":"30","author":"Andrychowicz","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aau5872"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3197517.3201311"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636053"},{"key":"ref10","article-title":"Learning to walk in minutes using massively parallel deep reinforcement learning","volume-title":"5th Annual Conference on Robot Learning","author":"Rudin"},{"key":"ref11","article-title":"Learning montezuma\u2019s revenge from a single demonstration","author":"Salimans","year":"2018"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3272127.3275014"},{"key":"ref13","first-page":"482","article-title":"Reverse curriculum generation for reinforcement learning","volume-title":"Conference on robot learning.","author":"Florensa","year":"2017"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-020-03157-9"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460730"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989431"},{"key":"ref17","article-title":"Unifying count-based exploration and intrinsic motivation","volume":"29","author":"Bellemare","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref18","article-title":"Exploration by random network distillation","volume-title":"International Conference on Learning Representations","author":"Burda"},{"key":"ref19","article-title":"Never give up: Learning directed exploration strategies","volume-title":"International Conference on Learning Representations","author":"Badia"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1177\/0278364920987859"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.adg1462"},{"key":"ref22","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proceedings of the 32nd International Conference on Machine Learning","volume":"37","author":"Schulman"},{"key":"ref23","article-title":"High-dimensional continuous control using generalized advantage estimation","volume-title":"4th International Conference on Learning Representations, ICLR 2016","volume":"2016","author":"Schulman"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10610095"},{"key":"ref25","first-page":"2012","article-title":"Analyzing and improving representations with the soft nearest neighbor loss","volume-title":"International Conference on Machine Learning","author":"Frosst"},{"key":"ref26","article-title":"Representation learning with contrastive predictive coding","author":"Oord","year":"2018"},{"key":"ref27","article-title":"Isaac gym: High performance gpu-based physics simulation for robot learning","author":"Makoviychuk","year":"2021"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-023-06419-4"},{"key":"ref29","article-title":"Flightmare: A flexible quadrotor simulator","author":"Song","year":"2020","journal-title":"Conference on Robot Learning"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/IROS47612.2022.9982190"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.abk2822"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10610381"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4899-7687-1_79"},{"key":"ref34","first-page":"954","article-title":"Neighborhood mixup experience replay: Local convex interpolation for improved sample efficiency in continuous control tasks","volume-title":"Learning for Dynamics and Control Conference","author":"Sander"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561137"}],"event":{"name":"2024 IEEE International Conference on Robotics and Automation (ICRA)","location":"Yokohama, Japan","start":{"date-parts":[[2024,5,13]]},"end":{"date-parts":[[2024,5,17]]}},"container-title":["2024 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10609961\/10609862\/10610528.pdf?arnumber=10610528","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,10]],"date-time":"2024-08-10T05:20:37Z","timestamp":1723267237000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10610528\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,13]]},"references-count":35,"URL":"https:\/\/doi.org\/10.1109\/icra57147.2024.10610528","relation":{},"subject":[],"published":{"date-parts":[[2024,5,13]]}}}