{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,20]],"date-time":"2025-07-20T03:38:48Z","timestamp":1752982728901,"version":"3.28.0"},"reference-count":47,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018,8]]},"DOI":"10.1109\/cig.2018.8490448","type":"proceedings-article","created":{"date-parts":[[2018,10,15]],"date-time":"2018-10-15T23:37:41Z","timestamp":1539646661000},"page":"1-8","source":"Crossref","is-referenced-by-count":6,"title":["Automated Curriculum Learning by Rewarding Temporally Rare Events"],"prefix":"10.1109","author":[{"given":"Niels","family":"Justesen","sequence":"first","affiliation":[]},{"given":"Sebastian","family":"Risi","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"article-title":"Evolution strategies as a scalable alternative to reinforcement learning","year":"2017","author":"salimans","key":"ref39"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1006\/ceps.1999.1020"},{"key":"ref33","first-page":"6","article-title":"What is intrinsic motivation? a typology of computational approaches","volume":"1","author":"oudeyer","year":"2009","journal-title":"Frontiers in Neurorobotics"},{"key":"ref32","doi-asserted-by":"crossref","DOI":"10.31234\/osf.io\/3p8f6","article-title":"Computational theories of curiosity-driven learning","author":"oudeyer","year":"2018"},{"key":"ref31","article-title":"Shaping and policy search in reinforcement learning","author":"ng","year":"2003","journal-title":"PhD thesis"},{"article-title":"Illuminating search spaces by mapping elites","year":"2015","author":"mouret","key":"ref30"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1177\/1059712310379923"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TCIAIG.2015.2494596"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.3389\/frobt.2016.00040"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.70"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-20525-5_24"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TAMD.2010.2056368"},{"article-title":"Learning to act by predicting the future","year":"2016","author":"dosovitskiy","key":"ref11"},{"key":"ref12","doi-asserted-by":"crossref","DOI":"10.1515\/9781503620766","author":"festinger","year":"1957","journal-title":"A Theory of Cognitive Dissonance"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/s12065-007-0002-4"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2012.02.024"},{"key":"ref15","first-page":"2829","article-title":"Continuous deep Q-learning with model-based acceleration","author":"gu","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref16","first-page":"189","article-title":"Intrinsic motivation and its role in psychological development","volume":"13","author":"hunt","year":"0"},{"article-title":"Deep learning for video game playing","year":"2017","author":"justesen","key":"ref17"},{"key":"ref18","first-page":"303","article-title":"Intrinsically motivated machines","author":"kaplan","year":"2007","journal-title":"50 Years of Artificial Intelligence"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2016.7860433"},{"key":"ref28","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553380"},{"article-title":"Learning to navigate in complex environments","year":"2016","author":"mirowski","key":"ref27"},{"key":"ref3","first-page":"1471","article-title":"Unifying count-based exploration and intrinsic motivation","author":"bellemare","year":"2016","journal-title":"Advances in neural information processing systems"},{"article-title":"Playing doom with slam-augmented deep reinforcement learning","year":"2016","author":"bhatti","key":"ref6"},{"key":"ref29","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1037\/11164-000"},{"key":"ref8","first-page":"5085","article-title":"Arnold: An autonomous agent to play fps games","author":"chaplot","year":"2017","journal-title":"AAAI"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.cognition.2008.08.011"},{"key":"ref2","first-page":"5055","article-title":"Hindsight experience replay","author":"andrychowicz","year":"2017","journal-title":"Advances in neural information processing systems"},{"article-title":"Combining model-based and model-free updates for trajectory-centric reinforcement learning","year":"2017","author":"chebotar","key":"ref9"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CIG.2017.8080408"},{"journal-title":"Training agent for first-person shooter game with actor-critic curriculum learning","year":"2016","author":"wu","key":"ref46"},{"key":"ref20","first-page":"3675","article-title":"Hierarchical deep reinforcement learning: Integrating temporal abstraction and intrinsic motivation","author":"kulkarni","year":"2016","journal-title":"Advances in neural information processing systems"},{"article-title":"Learning to reinforcement learn","year":"2016","author":"wang","key":"ref45"},{"key":"ref22","first-page":"2140","article-title":"Playing FPS games with deep reinforcement learning","author":"lample","year":"2017","journal-title":"AAAI"},{"key":"ref47","first-page":"5285","article-title":"Scalable trust-region method for deep reinforcement learning using kronecker-factored approximation","author":"wu","year":"2017","journal-title":"Advances in neural information processing systems"},{"article-title":"Deep successor reinforcement learning","year":"2016","author":"kulkarni","key":"ref21"},{"article-title":"Proximal policy optimization algorithms","year":"2017","author":"schulman","key":"ref42"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1162\/EVCO_a_00025"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.3389\/fpsyg.2013.00313"},{"key":"ref23","article-title":"Theory and application of reward shaping in reinforcement learning","author":"laud","year":"2004","journal-title":"Technical Report"},{"article-title":"Starcraft II: a new challenge for reinforcement learning","year":"2017","author":"vinyals","key":"ref44"},{"key":"ref26","first-page":"40","article-title":"Achieving creative behavior using curious learning agents","volume":"8","author":"maher","year":"2008","journal-title":"AAAI Spring Symposium Creative Intelligent Systems"},{"article-title":"Deep neuroevolution: Genetic algorithms are a competitive alternative for training deep neural networks for reinforcement learning","year":"2017","author":"such","key":"ref43"},{"key":"ref25","first-page":"206","article-title":"Exploration in model-based reinforcement learning by empirically estimating learning progress","author":"lopes","year":"2012","journal-title":"Advances in neural information processing systems"}],"event":{"name":"2018 IEEE Conference on Computational Intelligence and Games (CIG)","start":{"date-parts":[[2018,8,14]]},"location":"Maastricht","end":{"date-parts":[[2018,8,17]]}},"container-title":["2018 IEEE Conference on Computational Intelligence and Games (CIG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8473398\/8490359\/08490448.pdf?arnumber=8490448","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,5]],"date-time":"2023-09-05T20:51:43Z","timestamp":1693947103000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8490448\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,8]]},"references-count":47,"URL":"https:\/\/doi.org\/10.1109\/cig.2018.8490448","relation":{},"subject":[],"published":{"date-parts":[[2018,8]]}}}