{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T22:13:06Z","timestamp":1740175986931,"version":"3.37.3"},"reference-count":23,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2022,4,1]],"date-time":"2022-04-01T00:00:00Z","timestamp":1648771200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,4,1]],"date-time":"2022-04-01T00:00:00Z","timestamp":1648771200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,4,1]],"date-time":"2022-04-01T00:00:00Z","timestamp":1648771200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2022,4]]},"DOI":"10.1109\/lra.2021.3135930","type":"journal-article","created":{"date-parts":[[2021,12,16]],"date-time":"2021-12-16T20:36:00Z","timestamp":1639686960000},"page":"890-897","source":"Crossref","is-referenced-by-count":3,"title":["Learning Cooperative Multi-Agent Policies With Partial Reward Decoupling"],"prefix":"10.1109","volume":"7","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3741-3709","authenticated-orcid":false,"given":"Benjamin","family":"Freed","sequence":"first","affiliation":[{"name":"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA"}]},{"given":"Aditya","family":"Kapoor","sequence":"additional","affiliation":[{"name":"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0299-1760","authenticated-orcid":false,"given":"Ian","family":"Abraham","sequence":"additional","affiliation":[{"name":"Mechanical Engineering Department, Yale University, New Haven, CT, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5080-9073","authenticated-orcid":false,"given":"Jeff","family":"Schneider","sequence":"additional","affiliation":[{"name":"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5434-7945","authenticated-orcid":false,"given":"Howie","family":"Choset","sequence":"additional","affiliation":[{"name":"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA"}]}],"member":"263","reference":[{"article-title":"Dota 2 with large scale deep reinforcement learning","year":"2019","author":"Berner","key":"ref1"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1126\/science.aau6249"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/JRPROC.1961.287775"},{"key":"ref5","volume-title":"Introduction to Reinforcement Learning","volume":"135","author":"Sutton","year":"1998"},{"key":"ref6","first-page":"1008","article-title":"Actor-critic algorithms","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Konda","year":"2000"},{"article-title":"Proximal policy optimization algorithms","year":"2017","author":"Schulman","key":"ref7"},{"key":"ref8","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schulman","year":"2015"},{"key":"ref9","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja","year":"2018"},{"key":"ref10","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"99","author":"Ng","year":"1999"},{"key":"ref11","first-page":"195","article-title":"Planning, learning and coordination in multiagent decision processes","volume-title":"Proc. TARK","volume":"96","author":"Boutilier","year":"1996"},{"key":"ref12","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Sutton","year":"2000"},{"key":"ref13","article-title":"Variance reduction for policy gradient with action-dependent factorized baselines","volume-title":"Int. Conf. Learn. Representations","author":"Wu","year":"2018"},{"key":"ref14","first-page":"6379","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"Adv. Neural Inf. Process. Syst.","volume":"30","author":"Lowe","year":"2017"},{"key":"ref15","article-title":"Distributed distributional deterministic policy gradients","volume-title":"Int. Conf. Learn. Representations","author":"Barth-Maron","year":"2018"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"ref17","first-page":"2961","article-title":"Actor-attention-critic for multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Iqbal","year":"2019"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9341079"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3077863"},{"key":"ref20","article-title":"Graph attention networks","volume-title":"Int. Conf. Learn. Representations","author":"Velikovi","year":"2018"},{"key":"ref21","first-page":"5998","article-title":"Attention is all you need","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Vaswani","year":"2017"},{"article-title":"High-dimensional continuous control using generalized advantage estimation","year":"2015","author":"Schulman","key":"ref22"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4612-4380-9_35"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/9647862\/09653841.pdf?arnumber=9653841","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,11]],"date-time":"2024-01-11T23:49:34Z","timestamp":1705016974000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9653841\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,4]]},"references-count":23,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/lra.2021.3135930","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"type":"electronic","value":"2377-3766"},{"type":"electronic","value":"2377-3774"}],"subject":[],"published":{"date-parts":[[2022,4]]}}}