{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T21:18:03Z","timestamp":1768339083132,"version":"3.49.0"},"reference-count":16,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,6,8]],"date-time":"2022-06-08T00:00:00Z","timestamp":1654646400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,6,8]],"date-time":"2022-06-08T00:00:00Z","timestamp":1654646400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,6,8]]},"DOI":"10.23919\/acc53348.2022.9867146","type":"proceedings-article","created":{"date-parts":[[2022,9,5]],"date-time":"2022-09-05T20:24:10Z","timestamp":1662409450000},"page":"3311-3316","source":"Crossref","is-referenced-by-count":4,"title":["Provably Efficient Multi-Agent Reinforcement Learning with Fully Decentralized Communication"],"prefix":"10.23919","author":[{"given":"Justin","family":"Lidard","sequence":"first","affiliation":[{"name":"Princeton University,Department of Mechanical and Aerospace Engineering,Princeton,USA,NJ 08544"}]},{"given":"Udari","family":"Madhushani","sequence":"additional","affiliation":[{"name":"Princeton University,Department of Mechanical and Aerospace Engineering,Princeton,USA,NJ 08544"}]},{"given":"Naomi Ehrich","family":"Leonard","sequence":"additional","affiliation":[{"name":"Princeton University,Department of Mechanical and Aerospace Engineering,Princeton,USA,NJ 08544"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2013.2241057"},{"key":"ref11","first-page":"1","article-title":"Fully decentralized multi-agent reinforcement learning with networked agents","volume":"80","author":"zhang","year":"2018","journal-title":"Proceedings of the 35th International Conference on Machine Learning"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2018.8619581"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1287\/moor.27.4.819.297"},{"key":"ref14","first-page":"1","article-title":"Nearly minimax optimal reinforcement learning forar mixture Markov decision processes","volume":"134","author":"zhou","year":"2021","journal-title":"Machine Learning Research"},{"key":"ref15","first-page":"1","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume":"31","author":"lowe","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref16","first-page":"1","article-title":"PettingZoo: A standard API for multi-agent reinforcement learning","volume":"35","author":"terry","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref4","article-title":"Provably efficient cooperative multi-agent reinforcement learning with function approximation","author":"dubey","year":"2021"},{"key":"ref3","first-page":"1","article-title":"ROMA: Multi-agent reinforcement learning with emergent roles","volume":"119","author":"wang","year":"2020","journal-title":"Machine Learning Research"},{"key":"ref6","article-title":"MARL with general utilities via decentralized shadow reward actor-critic","author":"zhang","year":"2021"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2021.3049345"},{"key":"ref8","first-page":"1","article-title":"Almost optimal model-free reinforcement learning via reference-advantage decomposition","volume":"34","author":"zhang","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref7","first-page":"1","article-title":"Is Q-learning provably efficient?","volume":"32","author":"jin","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"ref1","first-page":"1","article-title":"Weighted QMIX: Expanding monotonic value function factorisation for deep multi-agent reinforcement learning","author":"rashid","year":"2020","journal-title":"Advances in Neural IInformation Processing Systems"},{"key":"ref9","first-page":"1","article-title":"Bellman Eluder dimension: New rich classes of RL problems, and sample-efficient algorithms","volume":"35","author":"jin","year":"2021","journal-title":"Advances in neural information processing systems"}],"event":{"name":"2022 American Control Conference (ACC)","location":"Atlanta, GA, USA","start":{"date-parts":[[2022,6,8]]},"end":{"date-parts":[[2022,6,10]]}},"container-title":["2022 American Control Conference (ACC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9866948\/9867142\/09867146.pdf?arnumber=9867146","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,3]],"date-time":"2022-10-03T20:38:15Z","timestamp":1664829495000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9867146\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,8]]},"references-count":16,"URL":"https:\/\/doi.org\/10.23919\/acc53348.2022.9867146","relation":{},"subject":[],"published":{"date-parts":[[2022,6,8]]}}}