{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T13:44:51Z","timestamp":1730209491890,"version":"3.28.0"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,8,5]],"date-time":"2024-08-05T00:00:00Z","timestamp":1722816000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,8,5]],"date-time":"2024-08-05T00:00:00Z","timestamp":1722816000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,8,5]]},"DOI":"10.1109\/cog60054.2024.10645658","type":"proceedings-article","created":{"date-parts":[[2024,8,28]],"date-time":"2024-08-28T18:44:09Z","timestamp":1724870649000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Higher Replay Ratio Empowers Sample-Efficient Multi-Agent Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Linjie","family":"Xu","sequence":"first","affiliation":[{"name":"Queen Mary University of London,UK"}]},{"given":"Zichuan","family":"Liu","sequence":"additional","affiliation":[{"name":"Nanjing University,China"}]},{"given":"Alexander","family":"Dockhorn","sequence":"additional","affiliation":[{"name":"Leibniz University,Hannover,Germany"}]},{"given":"Diego","family":"Perez-Liebana","sequence":"additional","affiliation":[{"name":"Queen Mary University of London,UK"}]},{"given":"Jinyu","family":"Wang","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia,China"}]},{"given":"Lei","family":"Song","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia,China"}]},{"given":"Jiang","family":"Bian","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3338695"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3528223.3530058"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3406186"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00042"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1609\/aiide.v19i1.27504"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1609\/aiide.v19i1.27506"},{"key":"ref7","article-title":"Mariogpt: Open-ended text2level generation through large language models","author":"Sudhakaran","year":"2024","journal-title":"Advances in NeurIPS"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11796"},{"key":"ref9","first-page":"2186","article-title":"The starcraft multi-agent challenge","volume-title":"International Conference on AAMAS","author":"Samvelyan"},{"key":"ref10","article-title":"Dota 2 with large scale deep reinforcement learning","author":"Berner","year":"2019","journal-title":"arXiv preprint arXiv:1912.06680"},{"key":"ref11","first-page":"2085","article-title":"Value-decomposition networks for cooperative multiagent learning based on team reward","volume-title":"International Conference on AAMAS","author":"Sunehag"},{"issue":"1","key":"ref12","first-page":"7234","article-title":"Monotonic value function factorisation for deep multi-agent reinforcement learning","volume":"21","author":"Rashid","year":"2020","journal-title":"Journal of Machine Learning Research"},{"key":"ref13","first-page":"1","article-title":"QPLEX: Duplex dueling multi-agent Q-learning","author":"Wang","year":"2021","journal-title":"ICLR"},{"key":"ref14","first-page":"24611","article-title":"The surprising effectiveness of ppo in cooperative multi-agent games","author":"Yu","year":"2022","journal-title":"Advances in NeurIPS"},{"key":"ref15","first-page":"22539","article-title":"NA2 Q: Neural attention additive model for interpretable multiagent Q-learning","author":"Liu","year":"2023","journal-title":"ICML"},{"key":"ref16","first-page":"16828","article-title":"The primacy bias in deep reinforcement learning","author":"Nikishin","year":"2022","journal-title":"I C M L"},{"key":"ref17","first-page":"1","article-title":"Sample-efficient reinforcement learning by breaking the replay ratio barrier","author":"Pierluca","year":"2023","journal-title":"I C L R"},{"key":"ref18","article-title":"Bigger, better, faster: Human-level atari with human-level efficiency","author":"Schwarzer","year":"2023","journal-title":"ICML"},{"key":"ref19","first-page":"1","article-title":"Randomized ensembled double q-learning: Learning fast without a model","author":"Chen","year":"2021","journal-title":"ICLR"},{"key":"ref20","first-page":"0","article-title":"Small batch deep reinforcement learning","author":"Obando-Ceron","year":"2023","journal-title":"Advances in NeurIPS"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-28929-8"},{"key":"ref22","article-title":"Mixrts: Toward interpretable multi-agent reinforcement learning via mixing recurrent soft decision trees","author":"Liu","year":"2022","journal-title":"arXiv preprint arXiv:2209.07225"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref24","first-page":"6379","article-title":"Multi-agent actorcritic for mixed cooperative-competitive environments","author":"Lowe","year":"2017","journal-title":"Advances in NeurIPS"},{"key":"ref25","first-page":"3991","article-title":"Celebrating diversity in shared multi-agent reinforcement learning","author":"Li","year":"2021","journal-title":"Advances in NeurIPS"},{"key":"ref26","article-title":"Benchmarking multi-agent deep reinforcement learning algorithms in cooperative tasks","author":"Papoudakis","year":"2021","journal-title":"NeurIPS Datasets and Benchmarks Track"},{"key":"ref27","first-page":"16691","article-title":"Recurrent model-free RL can be a strong baseline for many POMDPs","author":"Ni","year":"2022","journal-title":"ICML"},{"key":"ref28","first-page":"1","article-title":"Mastering visual reinforcement learning through dormant ratio minimization","author":"Xu","year":"2024","journal-title":"I C L R"},{"key":"ref29","first-page":"32145","article-title":"The dormant neuron phenomenon in deep reinforcement learning","author":"Sokar","year":"2023","journal-title":"ICML"}],"event":{"name":"2024 IEEE Conference on Games (CoG)","start":{"date-parts":[[2024,8,5]]},"location":"Milan, Italy","end":{"date-parts":[[2024,8,8]]}},"container-title":["2024 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10645493\/10645533\/10645658.pdf?arnumber=10645658","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,4]],"date-time":"2024-09-04T10:41:42Z","timestamp":1725446502000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10645658\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,5]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/cog60054.2024.10645658","relation":{},"subject":[],"published":{"date-parts":[[2024,8,5]]}}}