{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T23:47:25Z","timestamp":1771544845678,"version":"3.50.1"},"reference-count":29,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2017,9]]},"DOI":"10.1109\/devlrn.2017.8329785","type":"proceedings-article","created":{"date-parts":[[2018,4,6]],"date-time":"2018-04-06T04:26:54Z","timestamp":1522988814000},"page":"39-46","source":"Crossref","is-referenced-by-count":15,"title":["Curiosity-driven exploration enhances motor skills of continuous actor-critic learner"],"prefix":"10.1109","author":[{"given":"Muhammad Burhan","family":"Hafez","sequence":"first","affiliation":[]},{"given":"Cornelius","family":"Weber","sequence":"additional","affiliation":[]},{"given":"Stefan","family":"Wermter","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/DEVLRN.2011.6037356"},{"key":"ref11","article-title":"VIME: variational information maximizing exploration","author":"houthooft","year":"2016","journal-title":"Advances In Neural Information Processing Systems (NIPS) Long Beach CA USA"},{"key":"ref12","article-title":"Variational information maximisation for intrinsically motivated reinforcement learning","author":"mohamed","year":"2015","journal-title":"Advances in Neural Information Processing Systems (NIPS) Montr&#x00E9;al Canada"},{"key":"ref13","author":"stadie","year":"2015","journal-title":"Incentivizing exploration in reinforcement rearning with deep predictive models"},{"key":"ref14","article-title":"Intrinsically motivated model learning for developing curious robots","author":"hester","year":"2015","journal-title":"Artificial Intelligence"},{"key":"ref15","article-title":"An intrinsic reward mechanism for efficient exploration","author":"ozgur","year":"2006","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-015-1861-8"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/DEVLRN.2015.7346097"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1111\/cdev.12263"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TAMD.2010.2062511"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2006.07.010"},{"key":"ref4","first-page":"213","article-title":"R-MAX a general polynomial time algorithm for near-optimal reinforcement learning","volume":"3","author":"brafman","year":"2002","journal-title":"Journal of Machine Learning Research"},{"key":"ref27","first-page":"625","article-title":"A growing neural gas network learns topologies","volume":"7","author":"fritzke","year":"1995","journal-title":"Advances in neural information processing systems"},{"key":"ref3","article-title":"Integrated architectures for learning, planning, and reacting based on approximating dynamic programming","author":"sutton","year":"1990","journal-title":"Proc of the International Conference on Machine Learning (ICML)"},{"key":"ref6","article-title":"Deep exploration via bootstrapped DQN","author":"osband","year":"2016","journal-title":"Advances In Neural Information Processing Systems (NIPS) Long Beach CA USA"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ROMAN.2017.8172289"},{"key":"ref5","article-title":"Online exploration in least-squares policy iteration","author":"lihong","year":"2009","journal-title":"International Joint Conference on Autonomous Agents and Multiagent Systems (AAMAS)"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TAMD.2010.2056368"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21236\/ADA440280"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TEVC.2006.890271"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1613\/jair.301"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2012.05.008"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ADPRL.2007.368199"},{"key":"ref21","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref24","author":"bertsekas","year":"1996","journal-title":"Neuro-Dynamic Programming"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/BF00115009"},{"key":"ref26","doi-asserted-by":"crossref","DOI":"10.1007\/978-3-642-88163-3","author":"kohonen","year":"1989","journal-title":"Self-Organization and Associative Memory"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.1999.831553"}],"event":{"name":"2017 Joint IEEE International Conference on Development and Learning and Epigenetic Robotics (ICDL-EpiRob)","location":"Lisbon","start":{"date-parts":[[2017,9,18]]},"end":{"date-parts":[[2017,9,21]]}},"container-title":["2017 Joint IEEE International Conference on Development and Learning and Epigenetic Robotics (ICDL-EpiRob)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8326423\/8329770\/08329785.pdf?arnumber=8329785","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,26]],"date-time":"2022-01-26T05:36:18Z","timestamp":1643175378000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/8329785\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017,9]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/devlrn.2017.8329785","relation":{},"subject":[],"published":{"date-parts":[[2017,9]]}}}