{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,28]],"date-time":"2026-01-28T12:12:10Z","timestamp":1769602330815,"version":"3.49.0"},"reference-count":40,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,8,17]],"date-time":"2021-08-17T00:00:00Z","timestamp":1629158400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"NSF","doi-asserted-by":"publisher","award":["1724237"],"award-info":[{"award-number":["1724237"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,8,17]]},"DOI":"10.1109\/cog52621.2021.9619008","type":"proceedings-article","created":{"date-parts":[[2021,12,7]],"date-time":"2021-12-07T20:53:06Z","timestamp":1638910386000},"page":"01-08","source":"Crossref","is-referenced-by-count":9,"title":["Honey. I Shrunk The Actor: A Case Study on Preserving Performance with Smaller Actors in Actor-Critic RL"],"prefix":"10.1109","author":[{"given":"Siddharth","family":"Mysore","sequence":"first","affiliation":[{"name":"Boston University,Department of Computer Science,Boston,U.S.A"}]},{"given":"Bassel El","family":"Mabsout","sequence":"additional","affiliation":[{"name":"Boston University,Department of Computer Science,Boston,U.S.A"}]},{"given":"Renato","family":"Mancuso","sequence":"additional","affiliation":[{"name":"Boston University,Department of Computer Science,Boston,U.S.A"}]},{"given":"Kate","family":"Saenko","sequence":"additional","affiliation":[{"name":"Boston University Co-affiliated with MIT-IBM Watson AI Lab,Department of Computer Science,Boston,U.S.A."}]}],"member":"263","reference":[{"key":"ref39","article-title":"Policy distillation","author":"rusu","year":"2015","journal-title":"ArXiv Preprint"},{"key":"ref38","article-title":"Soft weight-sharing for neural network compression","volume":"abs 1702 4008","author":"ullrich","year":"2017","journal-title":"ArXiv"},{"key":"ref33","article-title":"Implementation matters in deep rl: A case study on ppo and trpo","author":"engstrom","year":"0","journal-title":"International Conference on Learning Representations"},{"key":"ref32","author":"tasfi","year":"2016","journal-title":"Pygame learning environment"},{"key":"ref31","author":"brockman","year":"2016","journal-title":"OpenAI Gym"},{"key":"ref30","author":"guadarrama","year":"2018","journal-title":"TF-Agents A library for reinforcement learning in tensorflow"},{"key":"ref37","article-title":"Distilling the knowledge in a neural network","volume":"abs 1503 2531","author":"hinton","year":"2015","journal-title":"ArXiv"},{"key":"ref36","article-title":"Deep compression: Compressing deep neural network with pruning, trained quantization and huffman coding","author":"han","year":"0","journal-title":"arXiv Computer Vision and Pattern Recognition 2016"},{"key":"ref35","article-title":"Asymmetric actor critic for image-based robot learning","author":"pinto","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref34","article-title":"A closer look at deep policy gradients","author":"ilyas","year":"0","journal-title":"International Conference on Learning Representations 2020"},{"key":"ref10","article-title":"A study on overfitting in deep reinforcement learning","volume":"abs 1804 6893","author":"zhang","year":"2018","journal-title":"CoRR"},{"key":"ref40","year":"0","journal-title":"Python noise library version 1 2 3"},{"key":"ref11","article-title":"A dissection of overfitting and generalization in continuous reinforcement learning","volume":"abs 1806 7937","author":"zhang","year":"2018","journal-title":"CoRR"},{"key":"ref12","article-title":"Reproducibility of benchmarked deep reinforcement learning tasks for continuous control","author":"islam","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref13","article-title":"Bench-marking deep reinforcement learning for continuous control","author":"duan","year":"0","journal-title":"Proceedings of the 33rd International Conference on International Conference on Machine Learning ser ICML'16 JMLR org"},{"key":"ref14","article-title":"Unity: A general platform for intelligent agents","author":"juliani","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref15","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref16","author":"shen","year":"2020","journal-title":"Asynchronous advantage actor critic Non-asymptotic analysis and linear speedup"},{"key":"ref17","author":"schulman","year":"2017","journal-title":"Proximal policy optimization algorithms"},{"key":"ref18","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"Proceedings of The 32nd International Conference on Machine Learning"},{"key":"ref19","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"0","journal-title":"International Conference on Learning Representations"},{"key":"ref28","author":"hill","year":"2018","journal-title":"Stable Baselines"},{"key":"ref4","article-title":"Dota 2 with large scale deep reinforcement learning","volume":"abs 1912 6680","author":"berner","year":"2019","journal-title":"CoRR"},{"key":"ref27","author":"achiam","year":"2018","journal-title":"Spinning up in deep reinforcement learning"},{"key":"ref3","author":"zhao","year":"2019","journal-title":"On multiagent learning in team sports games"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2021\/294"},{"key":"ref29","author":"raffin","year":"2019","journal-title":"Stable baselines3"},{"key":"ref5","doi-asserted-by":"crossref","first-page":"350","DOI":"10.1038\/s41586-019-1724-z","article-title":"Grand-master level in starcraft ii using multi-agent reinforcement learning","volume":"575","author":"vinyals","year":"2019","journal-title":"Nature"},{"key":"ref8","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"sutton","year":"0","journal-title":"Advances in Neural Information Processing Systems 2000"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2020.2990865"},{"key":"ref9","author":"pineau","year":"2020","journal-title":"Improving reproducibility in machine learning research (a report from the neurips 2019 reproducibility program)"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2019.2947597"},{"key":"ref20","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","author":"fujimoto","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref22","article-title":"Deep reinforcement learning that matters","volume":"32","author":"henderson","year":"0","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence"},{"key":"ref21","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"haarnoja","year":"0","journal-title":"International Conference on Machine Learning (ICML)"},{"key":"ref24","article-title":"What matters for on-policy deep actor-critic methods? a large-scale study","author":"andrychowicz","year":"0","journal-title":"International Conference on Learning Representations 2021"},{"key":"ref23","article-title":"A self-tuning actor-critic algorithm","volume":"33","author":"zahavy","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref26","author":"dhariwal","year":"2017","journal-title":"OpenAI Baselines"},{"key":"ref25","article-title":"D2rl: Deep dense architectures in reinforcement learning","author":"sinha","year":"2020","journal-title":"ArXiv Preprint"}],"event":{"name":"2021 IEEE Conference on Games (CoG)","location":"Copenhagen, Denmark","start":{"date-parts":[[2021,8,17]]},"end":{"date-parts":[[2021,8,20]]}},"container-title":["2021 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9618888\/9618891\/09619008.pdf?arnumber=9619008","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,5]],"date-time":"2022-12-05T23:53:31Z","timestamp":1670284411000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9619008\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,8,17]]},"references-count":40,"URL":"https:\/\/doi.org\/10.1109\/cog52621.2021.9619008","relation":{},"subject":[],"published":{"date-parts":[[2021,8,17]]}}}