{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T14:21:23Z","timestamp":1766067683778},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,10,23]],"date-time":"2022-10-23T00:00:00Z","timestamp":1666483200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,10,23]],"date-time":"2022-10-23T00:00:00Z","timestamp":1666483200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,10,23]]},"DOI":"10.1109\/iros47612.2022.9981126","type":"proceedings-article","created":{"date-parts":[[2022,12,26]],"date-time":"2022-12-26T19:38:15Z","timestamp":1672083495000},"source":"Crossref","is-referenced-by-count":8,"title":["How to Spend Your Robot Time: Bridging Kickstarting and Offline Reinforcement Learning for Vision-based Robotic Manipulation"],"prefix":"10.1109","author":[{"given":"Alex X.","family":"Lee","sequence":"first","affiliation":[{"name":"DeepMind,London,UK"}]},{"given":"Coline","family":"Devin","sequence":"additional","affiliation":[{"name":"DeepMind,London,UK"}]},{"given":"Jost Tobias","family":"Springenberg","sequence":"additional","affiliation":[{"name":"DeepMind,London,UK"}]},{"given":"Yuxiang","family":"Zhou","sequence":"additional","affiliation":[{"name":"DeepMind,London,UK"}]},{"given":"Thomas","family":"Lampe","sequence":"additional","affiliation":[{"name":"DeepMind,London,UK"}]},{"given":"Abbas","family":"Abdolmaleki","sequence":"additional","affiliation":[{"name":"DeepMind,London,UK"}]},{"given":"Konstantinos","family":"Bousmalis","sequence":"additional","affiliation":[{"name":"DeepMind,London,UK"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Policy finetuning: Bridging sample-efficient offline and online reinforcement learning","author":"Xie","year":"2021","journal-title":"NeurIPS"},{"key":"ref2","article-title":"A reduction of imitation learning and structured prediction to no-regret online learning","author":"Ross","year":"2011","journal-title":"AISTATS"},{"key":"ref3","article-title":"Beyond pick-and-place: Tackling robotic stacking of diverse shapes","author":"Lee","year":"2021","journal-title":"CoRL"},{"key":"ref4","article-title":"Off-policy deep reinforcement learning without exploration","author":"Fujimoto","year":"2019","journal-title":"ICML"},{"key":"ref5","article-title":"Stabilizing off-policy q-learning via bootstrapping error reduction","author":"Kumar","year":"2019","journal-title":"NeurIPS"},{"key":"ref6","article-title":"Conservative Q-learning for offline reinforcement learning","author":"Kumar","year":"2020","journal-title":"NeurIPS"},{"key":"ref7","author":"Kostrikov","year":"2021","journal-title":"Offline reinforcement learning with implicit Q-learning"},{"key":"ref8","article-title":"EMaQ: Expected-max Q-learning operator for simple yet effective offline and online RL","author":"Ghasemipour","year":"2021","journal-title":"ICML"},{"key":"ref9","author":"Nair","year":"2020","journal-title":"Accelerating online reinforcement learning with offline datasets"},{"key":"ref10","article-title":"Critic regularized regression","author":"Wang","year":"2020","journal-title":"NeurIPS"},{"key":"ref11","article-title":"Keep doing what worked: Behavior modelling priors for offline reinforcement learning","author":"Siegel","year":"2019","journal-title":"ICLR"},{"key":"ref12","article-title":"A minimalist approach to offline reinforcement learning","author":"Fujimoto","year":"2021","journal-title":"NeurIPS"},{"key":"ref13","author":"Wu","year":"2019","journal-title":"Behavior regularized offline reinforcement learning"},{"key":"ref14","author":"Xu","year":"2021","journal-title":"Offline reinforcement learning with soft behavior regularization"},{"key":"ref15","author":"Abdolmaleki","year":"2021","journal-title":"On multi-objective policy optimization as a tool for reinforcement learning"},{"key":"ref16","article-title":"Offline-to-online rein-forcement learning via balanced replay and pessimistic Q-ensemble","author":"Lee","year":"2021","journal-title":"CoRL"},{"key":"ref17","author":"Schmitt","year":"2018","journal-title":"Kickstarting deep rein-forcement learning"},{"key":"ref18","article-title":"Actor-mimic: Deep multitask and transfer reinforcement learning","author":"Parisotto","year":"2016","journal-title":"ICLR"},{"key":"ref19","author":"Cheng","year":"2018","journal-title":"Fast policy learning through imitation and reinforcement"},{"key":"ref20","author":"Jeong","year":"2020","journal-title":"Learning dexterous manipulation from sub-optimal experts"},{"key":"ref21","author":"Levine","year":"2018","journal-title":"Reinforcement learning and control as probabilistic inference: Tutorial and review"},{"key":"ref22","author":"Tirumala","year":"2020","journal-title":"Behavior priors for efficient reinforcement learning"},{"key":"ref23","article-title":"Soft actor-critic algorithms and applications","author":"Haarnoja","year":"2018","journal-title":"ICML"},{"key":"ref24","article-title":"Maximum a posteriori policy optimisation","author":"Abdolmaleki","year":"2018","journal-title":"ICLR"},{"key":"ref25","article-title":"A distributional perspective on reinforcement learning","author":"Bellemare","year":"2017","journal-title":"ICML"},{"key":"ref26","author":"Galashov","year":"2020","journal-title":"Importance weighted policy learning and adaptation"}],"event":{"name":"2022 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","location":"Kyoto, Japan","start":{"date-parts":[[2022,10,23]]},"end":{"date-parts":[[2022,10,27]]}},"container-title":["2022 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9981026\/9981028\/09981126.pdf?arnumber=9981126","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,2]],"date-time":"2024-03-02T08:35:16Z","timestamp":1709368516000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9981126\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10,23]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/iros47612.2022.9981126","relation":{},"subject":[],"published":{"date-parts":[[2022,10,23]]}}}