{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T13:40:45Z","timestamp":1730209245118,"version":"3.28.0"},"reference-count":18,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,8,21]]},"DOI":"10.1109\/cog51982.2022.9893666","type":"proceedings-article","created":{"date-parts":[[2022,9,20]],"date-time":"2022-09-20T19:33:31Z","timestamp":1663702411000},"page":"496-499","source":"Crossref","is-referenced-by-count":0,"title":["CGAR: Critic Guided Action Redistribution in Reinforcement Leaning"],"prefix":"10.1109","author":[{"given":"Tairan","family":"Huang","sequence":"first","affiliation":[{"name":"SCSE Beihang University,Beijing,China"}]},{"given":"Xu","family":"Li","sequence":"additional","affiliation":[{"name":"Cognitive Computing Lab Baidu Research,Beijing,China"}]},{"given":"Hao","family":"Li","sequence":"additional","affiliation":[{"name":"ECE Peking University,Beijing,China"}]},{"given":"Mingming","family":"Sun","sequence":"additional","affiliation":[{"name":"Cognitive Computing Lab Baidu Research,Seattle,USA"}]},{"given":"Ping","family":"Li","sequence":"additional","affiliation":[{"name":"Cognitive Computing Lab Baidu Research,Seattle,USA"}]}],"member":"263","reference":[{"key":"ref10","first-page":"1352","article-title":"Reinforcement learning with deep energy-based policies","author":"haarnoja","year":"2017","journal-title":"ICML 2017 Sydney NSW Australia 6-11 August 2017"},{"key":"ref11","first-page":"1433","article-title":"Maximum entropy inverse reinforcement learning","author":"ziebart","year":"0","journal-title":"AAAI 2008 Chicago Illinois USA July 13-17 2008"},{"key":"ref12","article-title":"Deepmind control suite","volume":"abs 1801 690","author":"tassa","year":"2018","journal-title":"CoRR"},{"key":"ref13","article-title":"Distilling the knowledge in a neural network","author":"hinton","year":"2015","journal-title":"Deep Learning and Representation Learning Workshop NIPS"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5963"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00489"},{"article-title":"Can students outperform teachers in knowledge distillation based model compression?","year":"0","author":"deng","key":"ref16"},{"article-title":"MNIST handwritten digit database","year":"2010","author":"lecun","key":"ref17"},{"article-title":"Soft actor-critic (sac) implementation in pytorch","year":"0","author":"yarats","key":"ref18"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"article-title":"Reinforcement learning: An introduction","year":"2011","author":"sutton","key":"ref3"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CoG52621.2021.9619146"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CoG52621.2021.9619081"},{"key":"ref8","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016","journal-title":"ICLRE"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CoG52621.2021.9619161"},{"key":"ref2","article-title":"Combining deep reinforcement learning and search for imperfect-information games","author":"brown","year":"2020","journal-title":"NeurIPS 2020"},{"key":"ref1","article-title":"Towards playing full MOBA games with deep reinforcement learning","author":"ye","year":"2020","journal-title":"NeurIPS 2020"},{"key":"ref9","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"haarnoja","year":"2018","journal-title":"ICML"}],"event":{"name":"2022 IEEE Conference on Games (CoG)","start":{"date-parts":[[2022,8,21]]},"location":"Beijing, China","end":{"date-parts":[[2022,8,24]]}},"container-title":["2022 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9893561\/9893544\/09893666.pdf?arnumber=9893666","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,14]],"date-time":"2022-10-14T20:53:15Z","timestamp":1665780795000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9893666\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8,21]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/cog51982.2022.9893666","relation":{},"subject":[],"published":{"date-parts":[[2022,8,21]]}}}