{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,28]],"date-time":"2026-02-28T18:29:35Z","timestamp":1772303375359,"version":"3.50.1"},"reference-count":54,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,9,27]],"date-time":"2021-09-27T00:00:00Z","timestamp":1632700800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,9,27]],"date-time":"2021-09-27T00:00:00Z","timestamp":1632700800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,9,27]],"date-time":"2021-09-27T00:00:00Z","timestamp":1632700800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,9,27]]},"DOI":"10.1109\/iros51168.2021.9636536","type":"proceedings-article","created":{"date-parts":[[2021,12,16]],"date-time":"2021-12-16T15:45:38Z","timestamp":1639669538000},"page":"3471-3477","source":"Crossref","is-referenced-by-count":15,"title":["Sample-efficient Reinforcement Learning Representation Learning with Curiosity Contrastive Forward Dynamics Model"],"prefix":"10.1109","author":[{"given":"Thanh","family":"Nguyen","sequence":"first","affiliation":[]},{"given":"Tung M.","family":"Luu","sequence":"additional","affiliation":[]},{"given":"Thang","family":"Vu","sequence":"additional","affiliation":[]},{"given":"Chang D.","family":"Yoo","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","author":"denil","year":"2016","journal-title":"Learning to perform physics experiments via deep reinforcement learning"},{"key":"ref38","author":"burda","year":"2018","journal-title":"Exploration by random network distillation"},{"key":"ref33","first-page":"1","article-title":"# exploration: A study of count-based exploration for deep reinforcement learning","volume":"30","author":"tang","year":"2017","journal-title":"31st Conference on Neural Information Processing Systems (NIPS)"},{"key":"ref32","author":"zhao","year":"2019","journal-title":"Curiosity-driven experience prioritization via density estimation"},{"key":"ref31","first-page":"2721","article-title":"Count-based exploration with neural density models","author":"ostrovski","year":"2017","journal-title":"International Conference on Machine Learning"},{"key":"ref30","author":"bellemare","year":"2016","journal-title":"Unifying count-based exploration and intrinsic motivation"},{"key":"ref37","author":"choshen","year":"2018","journal-title":"Dora the explorer Directed outreaching reinforcement action-selection"},{"key":"ref36","author":"burda","year":"2018","journal-title":"Large-Scale Study of Curiosity-Driven Learning[C]"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.70"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TEVC.2006.890271"},{"key":"ref28","volume":"10","author":"openai","year":"2019","journal-title":"Solving Rubik&#x2019;s Cube with a Robot Hand"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref29","article-title":"Hindsight experience replay","author":"andrychowicz","year":"2017","journal-title":"NeurIPS"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3912"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref20","first-page":"5690","article-title":"Imagination-augmented agents for deep reinforcement learning","author":"racani\u00e8re","year":"2017","journal-title":"NIPS"},{"key":"ref22","author":"feinberg","year":"2018","journal-title":"Model-based value estimation for efficient model-free reinforcement learning"},{"key":"ref21","author":"ha","year":"2018","journal-title":"Recurrent world models facilitate policy evolution"},{"key":"ref24","author":"sorokin","year":"2015","journal-title":"Deep attention recurrent q-network"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8463189"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-020-03051-4"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33014213"},{"key":"ref50","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"ICML"},{"key":"ref51","article-title":"Unsupervised state representation learning in atari","author":"anand","year":"2019","journal-title":"NeurIPS"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00393"},{"key":"ref53","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"haarnoja","year":"2018","journal-title":"ICML"},{"key":"ref52","author":"tassa","year":"2018","journal-title":"Deepmind control suite"},{"key":"ref10","article-title":"Scalable deep reinforcement learning for vision-based robotic manipulation","author":"kalashnikov","year":"2018","journal-title":"CoRL"},{"key":"ref11","article-title":"Curl: Contrastive unsupervised representations for reinforcement learning","author":"srinivas","year":"2020","journal-title":"ICML"},{"key":"ref40","article-title":"Model-based reinforcement learning for atari","author":"kaiser","year":"2020","journal-title":"ICLRE"},{"key":"ref12","article-title":"Reinforcement learning with augmented data","author":"laskin","year":"2020","journal-title":"NeurIPS"},{"key":"ref13","author":"kostrikov","year":"2020","journal-title":"Image augmentation is all you need Regularizing deep reinforcement learning from pixels"},{"key":"ref14","first-page":"2555","article-title":"Learning latent dynamics for planning from pixels","author":"hafner","year":"2019","journal-title":"International Conference on Machine Learning"},{"key":"ref15","author":"yarats","year":"2019","journal-title":"Improving sample efficiency in model-free reinforcement learning from images"},{"key":"ref16","article-title":"dm control: Software and tasks for continuous control","author":"tassa","year":"2020"},{"key":"ref17","author":"cobbe","year":"2019","journal-title":"Leveraging Procedural Generation to Benchmark Reinforcement Learning"},{"key":"ref18","author":"brockman","year":"2016","journal-title":"OpenAI Gym"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"ref4","author":"silver","year":"2017","journal-title":"Mastering chess and shogi by self-play with a general reinforcement learning algorithm"},{"key":"ref3","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref6","first-page":"1407","article-title":"Impala: Scalable distributed deep-rl with importance weighted actor-learner architectures","author":"espeholt","year":"2018","journal-title":"International Conference on Machine Learning"},{"key":"ref5","article-title":"Reinforcement learning with unsupervised auxiliary tasks","author":"jaderberg","year":"2017","journal-title":"ICLRE"},{"key":"ref8","article-title":"End-to-end training of deep visuomotor policies","author":"levine","year":"2016","journal-title":"JMLR"},{"key":"ref7","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016","journal-title":"ICML"},{"key":"ref49","author":"chen","year":"2020","journal-title":"A simple framework for contrastive learning of visual representations"},{"key":"ref9","author":"lee","year":"2019","journal-title":"Stochastic latent actor-critic Deep reinforcement learning with a latent variable model"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487173"},{"key":"ref45","article-title":"Darla: Improving zero-shot transfer in reinforcement learning","author":"higgins","year":"2017","journal-title":"ICML"},{"key":"ref48","author":"chen","year":"2020","journal-title":"Improved baselines with momentum contrastive learning"},{"key":"ref47","article-title":"Visual reinforcement learning with imagined goals","author":"nair","year":"2018","journal-title":"NeurIPS"},{"key":"ref42","author":"oord","year":"2018","journal-title":"Representation learning with contrastive predictive coding"},{"key":"ref41","article-title":"Dream to control: Learning behaviors by latent imagination","author":"hafner","year":"2020","journal-title":"ICLRE"},{"key":"ref44","author":"kingma","year":"2013","journal-title":"Auto-encoding variational bayes"},{"key":"ref43","author":"beattie","year":"2016","journal-title":"Deepmind lab"}],"event":{"name":"2021 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","location":"Prague, Czech Republic","start":{"date-parts":[[2021,9,27]]},"end":{"date-parts":[[2021,10,1]]}},"container-title":["2021 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9635848\/9635849\/09636536.pdf?arnumber=9636536","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T12:54:33Z","timestamp":1652187273000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9636536\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,9,27]]},"references-count":54,"URL":"https:\/\/doi.org\/10.1109\/iros51168.2021.9636536","relation":{},"subject":[],"published":{"date-parts":[[2021,9,27]]}}}