{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:17:27Z","timestamp":1740100647650,"version":"3.37.3"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9747852","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"4108-4112","source":"Crossref","is-referenced-by-count":0,"title":["Qrelation: an Agent Relation-Based Approach for Multi-Agent Reinforcement Learning Value Function Factorization"],"prefix":"10.1109","author":[{"given":"Siqi","family":"Shen","sequence":"first","affiliation":[{"name":"Xiamen University,Fujian Key Lab of Sensing and Computing for Smart Cities, School of Informatics,China"}]},{"given":"Jun","family":"Liu","sequence":"additional","affiliation":[{"name":"Xiamen University,Fujian Key Lab of Sensing and Computing for Smart Cities, School of Informatics,China"}]},{"given":"Mengwei","family":"Qiu","sequence":"additional","affiliation":[{"name":"Xiamen University,Fujian Key Lab of Sensing and Computing for Smart Cities, School of Informatics,China"}]},{"given":"Weiquan","family":"Liu","sequence":"additional","affiliation":[{"name":"Xiamen University,Fujian Key Lab of Sensing and Computing for Smart Cities, School of Informatics,China"}]},{"given":"Cheng","family":"Wang","sequence":"additional","affiliation":[{"name":"Xiamen University,Fujian Key Lab of Sensing and Computing for Smart Cities, School of Informatics,China"}]},{"given":"Yongquan","family":"Fu","sequence":"additional","affiliation":[{"name":"National University of Defense Technology,Parallel and Distributed Processing Laboratory,China"}]},{"given":"Qinglin","family":"Wang","sequence":"additional","affiliation":[{"name":"National University of Defense Technology,Parallel and Distributed Processing Laboratory,China"}]},{"given":"Peng","family":"Qiao","sequence":"additional","affiliation":[{"name":"National University of Defense Technology,Parallel and Distributed Processing Laboratory,China"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413716"},{"key":"ref11","article-title":"Actor-attention-critic for multi-agent reinforcement learning","author":"iqbal","year":"2019","journal-title":"ICML"},{"key":"ref12","article-title":"Off-policy multi-agent decomposed policy gradients","author":"wang","year":"2020","journal-title":"ICLRE"},{"key":"ref13","article-title":"Valuedecomposition networks for cooperative multi-agent learning based on team reward","author":"sunehag","year":"2018","journal-title":"AAMAS"},{"key":"ref14","article-title":"Weighted QMIX: expanding monotonic value function factorisation for deep multi-agent reinforcement learning","author":"rashid","year":"2020","journal-title":"NeurIPS"},{"article-title":"Qtran++: Improved value transformation for cooperative multi-agent reinforcement learning","year":"2020","author":"son","key":"ref15"},{"key":"ref16","article-title":"Qatten: A general framework for cooperative multiagent reinforcement learning","author":"yang","year":"2020","journal-title":"CoRR"},{"key":"ref17","article-title":"QPLEX: duplex dueling multi-agent q-learning","author":"wang","year":"2021","journal-title":"ICLRE"},{"key":"ref18","article-title":"DFAC framework: Factorizing the value function via quantile mixture for multi-agent distributional qlearning","author":"sun","year":"2021","journal-title":"ICML"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-28929-8"},{"key":"ref4","first-page":"2681","article-title":"Deep decentralized multi-task multi-agent reinforcement learning under partial observability","author":"omidshafiei","year":"2017","journal-title":"ICML"},{"key":"ref3","article-title":"Is multiagent deep reinforcement learning the answer or the question? A brief survey","author":"hernandez-leal","year":"2018","journal-title":"CoRR"},{"key":"ref6","article-title":"QMIX: monotonic value function factorisation for deep multi-agent reinforcement learning","author":"rashid","year":"2018","journal-title":"ICML"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1613\/jair.2447"},{"key":"ref8","article-title":"QTRAN: learning to factorize with transformation for cooperative multi-agent reinforcement learning","author":"son","year":"2019","journal-title":"ICML"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"ref2","article-title":"An overview of recent progress in the study of distributed multi-agent coordination","volume":"9","author":"cao","year":"2012","journal-title":"IEEE Transactions on Industrial Informatics"},{"key":"ref1","article-title":"Coordinated multi-robot exploration under communication constraints using decentralized markov decision processes","author":"matignon","year":"2012","journal-title":"AAAI"},{"article-title":"Deep coordination graphs","year":"0","author":"b\u00f6hmer","key":"ref9"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/W14-4012"},{"key":"ref22","article-title":"Graph attention networks","author":"velickovic","year":"2018","journal-title":"ICLRE"},{"key":"ref21","article-title":"Modeling relational data with graph convolutional networks","author":"schlichtkrull","year":"2018","journal-title":"ESWC"},{"key":"ref24","first-page":"2186","article-title":"The starcraft multiagent challenge","author":"samvelyan","year":"2019","journal-title":"AAMAS"},{"key":"ref23","article-title":"Graph convolutional reinforcement learning","author":"jiang","year":"2020","journal-title":"ICLRE"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2022,5,23]]},"location":"Singapore, Singapore","end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09747852.pdf?arnumber=9747852","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,15]],"date-time":"2022-08-15T20:09:21Z","timestamp":1660594161000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9747852\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9747852","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}