{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:13:01Z","timestamp":1740100381621,"version":"3.37.3"},"reference-count":28,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100009148","name":"QMUL Research-IT","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100009148","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,8,17]]},"DOI":"10.1109\/cog52621.2021.9619033","type":"proceedings-article","created":{"date-parts":[[2021,12,7]],"date-time":"2021-12-07T20:53:06Z","timestamp":1638910386000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Learning on a Budget via Teacher Imitation"],"prefix":"10.1109","author":[{"given":"Ercument","family":"Ilhan","sequence":"first","affiliation":[{"name":"Queen Mary University of London,School of Electronic Engineering and Computer Science,London,United Kingdom"}]},{"given":"Jeremy","family":"Gow","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,School of Electronic Engineering and Computer Science,London,United Kingdom"}]},{"given":"Diego","family":"Perez-Liebana","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,School of Electronic Engineering and Computer Science,London,United Kingdom"}]}],"member":"263","reference":[{"key":"ref10","first-page":"1","article-title":"Teaching on a budget in multiagent deep reinforcement learning","author":"ilhan","year":"2019","journal-title":"IEEE Conference on Games CoG 2019"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.6036"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2021.3113644"},{"key":"ref13","article-title":"Action advising with advice imitation in deep reinforcement learning","author":"ilhan","year":"0","journal-title":"Proceedings of the 20th Conference on Autonomous Agents and Multi-Agent Systems AAMAS 2021"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1080\/09540091.2014.885279"},{"key":"ref15","article-title":"Teacher-Student Framework: A Reinforcement Learning Approach","author":"zimmer","year":"0","journal-title":"AAMAS Workshop Autonomous Robots and Multirobot Systems"},{"key":"ref16","first-page":"804","article-title":"Interactive teaching strategies for agent training","author":"amir","year":"0","journal-title":"Proceedings of the Twenty-Fifth International Joint Conference on Artificial Intelligence IJCAI 2016"},{"key":"ref17","first-page":"1100","article-title":"Simultaneously learning and advising in multiagent reinforcement learning","author":"da silva","year":"0","journal-title":"Proceedings of the 16th Conference on Autonomous Agents and Multi-Agent Systems AAMAS 2017"},{"key":"ref18","first-page":"1674","article-title":"Learning by reusing previous advice in teacher-student paradigm","author":"zhu","year":"0","journal-title":"Proceedings of the 19th International Conference on Autonomous Agents and MultiAgent Systems AAMAS &#x2018;20"},{"key":"ref19","article-title":"Active deep q-learning with demonstration","volume":"abs 1812 2632","author":"chen","year":"2018","journal-title":"CoRR"},{"key":"ref28","article-title":"UMAP: uniform manifold approximation and projection for dimension reduction","volume":"abs 1802 3426","author":"mcinnes","year":"2018","journal-title":"CoRR"},{"key":"ref4","article-title":"Benchmarking bonus-based exploration methods on the arcade learning environment","volume":"abs 1908 2388","author":"taiga","year":"2019","journal-title":"CoRR"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3912"},{"key":"ref3","article-title":"Solving rubik's cube with a robot hand","volume":"abs 1910 7113","author":"akkaya","year":"2019","journal-title":"CoRR"},{"key":"ref6","first-page":"1040","article-title":"Learning from demonstration","author":"schaal","year":"1996","journal-title":"Advances in Neural Information Processing Systems 9 NIPS"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1991.3.1.88"},{"key":"ref8","first-page":"720","article-title":"Probabilistic policy reuse in a reinforcement learning agent","author":"fern\u00e1ndez","year":"2006","journal-title":"International Joint Conference on Autonomous Agents and Multiagent Systems (AAMAS)"},{"key":"ref7","first-page":"3223","article-title":"Deep q-learning from demonstrations","author":"hester","year":"0","journal-title":"Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence (AAAI-18)"},{"key":"ref2","article-title":"Mastering atari, go, chess and shogi by planning with a learned model","volume":"abs 1911 8265","author":"schrittwieser","year":"2019","journal-title":"CoRR"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"350","DOI":"10.1038\/s41586-019-1724-z","article-title":"Grandmaster level in StarCraft II using multi-agent reinforcement learning","volume":"575","author":"vinyals","year":"2019","journal-title":"Nature"},{"key":"ref9","first-page":"1053","article-title":"Teaching on a budget: Agents advising agents in reinforcement learning","author":"torrey","year":"2013","journal-title":"International conference on Autonomous Agents and Multi-Agent Systems AAMAS &#x2018;13"},{"key":"ref20","article-title":"Exploration by random network distillation","volume":"abs 1810 12894","author":"burda","year":"2018","journal-title":"CoRR"},{"journal-title":"Reinforcement Learning An Introduction","year":"2018","author":"sutton","key":"ref22"},{"key":"ref21","first-page":"1050","article-title":"Dropout as a bayesian approximation: Representing model uncertainty in deep learning","volume":"48","author":"gal","year":"2016","journal-title":"Proceedings of the 33nd International Conference on Machine Learning ICML 2016"},{"key":"ref24","first-page":"3215","article-title":"Rainbow: Combining improvements in deep reinforcement learning","author":"hessel","year":"0","journal-title":"Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence (AAAI-18)"},{"key":"ref23","article-title":"Playing atari with deep reinforcement learning","volume":"abs 1312 5602","author":"mnih","year":"2013","journal-title":"CoRR"},{"key":"ref26","first-page":"2094","article-title":"Deep reinforcement learning with double q-learning","author":"van hasselt","year":"0","journal-title":"Proceedings of the Thirtieth AAAI Conference on Artificial Intelligence"},{"key":"ref25","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","volume":"48","author":"wang","year":"0","journal-title":"Proceedings of the 33nd International Conference on Machine Learning ICML 2016 ser JMLR Workshop and Conference Proceedings"}],"event":{"name":"2021 IEEE Conference on Games (CoG)","start":{"date-parts":[[2021,8,17]]},"location":"Copenhagen, Denmark","end":{"date-parts":[[2021,8,20]]}},"container-title":["2021 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9618888\/9618891\/09619033.pdf?arnumber=9619033","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,3]],"date-time":"2022-08-03T00:14:06Z","timestamp":1659485646000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9619033\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,8,17]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/cog52621.2021.9619033","relation":{},"subject":[],"published":{"date-parts":[[2021,8,17]]}}}