{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T16:55:08Z","timestamp":1777654508981,"version":"3.51.4"},"reference-count":41,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"9","license":[{"start":{"date-parts":[[2020,9,1]],"date-time":"2020-09-01T00:00:00Z","timestamp":1598918400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,9,1]],"date-time":"2020-09-01T00:00:00Z","timestamp":1598918400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,9,1]],"date-time":"2020-09-01T00:00:00Z","timestamp":1598918400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2020,9]]},"DOI":"10.1109\/tnnls.2019.2934906","type":"journal-article","created":{"date-parts":[[2019,9,10]],"date-time":"2019-09-10T00:15:40Z","timestamp":1568074540000},"page":"3732-3740","source":"Crossref","is-referenced-by-count":181,"title":["Teacher\u2013Student Curriculum Learning"],"prefix":"10.1109","volume":"31","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6815-1580","authenticated-orcid":false,"given":"Tambet","family":"Matiisen","sequence":"first","affiliation":[]},{"given":"Avital","family":"Oliver","sequence":"additional","affiliation":[]},{"given":"Taco","family":"Cohen","sequence":"additional","affiliation":[]},{"given":"John","family":"Schulman","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"arXiv 1707 06347"},{"key":"ref38","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv 1412 6980"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2016.2514360"},{"key":"ref32","article-title":"Incentivizing exploration in reinforcement learning with deep predictive models","author":"stadie","year":"2015","journal-title":"arXiv 1507 00814"},{"key":"ref31","first-page":"1109","article-title":"VIME: Variational information maximizing exploration","author":"houthooft","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref30","first-page":"1471","article-title":"Unifying count-based exploration and intrinsic motivation","author":"bellemare","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref37","author":"chollet","year":"2015","journal-title":"Keras"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2017.2773562"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2016.2563981"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2792062"},{"key":"ref10","first-page":"6","article-title":"What is intrinsic motivation? A typology of computational approaches","volume":"1","author":"oudeyer","year":"2009","journal-title":"Frontiers Neurorobot"},{"key":"ref40","article-title":"Reinforcement learning through asynchronous advantage actor-critic on a GPU","author":"babaeizadeh","year":"2016","journal-title":"arXiv 1611 06256"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(98)00023-X"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1512\/iumj.1957.6.56038"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref15","first-page":"3104","article-title":"Sequence to sequence learning with neural networks","author":"sutskever","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref16","first-page":"4246","article-title":"The malmo platform for artificial intelligence experimentation","author":"johnson","year":"2016","journal-title":"Proc Int Joint Conf Artif Intell (IJCAI)"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.1991.170605"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TEVC.2006.890271"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2012.05.008"},{"key":"ref28","article-title":"Control of memory, active perception, and action in minecraft","author":"oh","year":"2016","journal-title":"arXiv 1605 09128"},{"key":"ref4","article-title":"End-to-end training of deep visuomotor policies","author":"levine","year":"2015","journal-title":"Arxiv 1504 00702"},{"key":"ref27","article-title":"Neural GPUs learn algorithms","author":"kaiser","year":"2015","journal-title":"arXiv 1511 08228"},{"key":"ref3","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv 1509 02971"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553380"},{"key":"ref29","article-title":"A deep hierarchical approach to lifelong learning in minecraft","author":"tessler","year":"2016","journal-title":"arXiv 1604 07255"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-387-30164-8_244"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1038\/nature20101"},{"key":"ref7","article-title":"Learning to execute","author":"zaremba","year":"2014","journal-title":"arXiv 1410 4615"},{"key":"ref2","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref9","first-page":"1","article-title":"Training agent for first-person shooter game with actor-critic curriculum learning","author":"wu","year":"2017","journal-title":"Proc Submitted Conf Learn Represent"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref20","first-page":"20","article-title":"Multi-armed bandits for intelligent tutoring systems","volume":"7","author":"clement","year":"2015","journal-title":"J Educ Data Mining"},{"key":"ref22","article-title":"Intrinsic motivation and automatic curricula via asymmetric self-play","author":"sukhbaatar","year":"2017","journal-title":"arXiv 1703 05407"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TAMD.2010.2056368"},{"key":"ref24","article-title":"Automated curriculum learning for neural networks","author":"graves","year":"2017","journal-title":"arXiv 1704 03003"},{"key":"ref41","article-title":"Learning to navigate in complex environments","author":"mirowski","year":"2016","journal-title":"arXiv 1611 03673"},{"key":"ref23","article-title":"Automatic goal generation for reinforcement learning agents","author":"held","year":"2017","journal-title":"arXiv 1705 06366"},{"key":"ref26","article-title":"Neural programmer-interpreters","author":"reed","year":"2015","journal-title":"arXiv 1511 06279"},{"key":"ref25","article-title":"Grid long short-term memory","author":"kalchbrenner","year":"2015","journal-title":"arXiv 1507 01526"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/9184294\/08827566.pdf?arnumber=8827566","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T17:19:50Z","timestamp":1651079990000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8827566\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,9]]},"references-count":41,"journal-issue":{"issue":"9"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2019.2934906","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020,9]]}}}