{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T13:09:20Z","timestamp":1777640960835,"version":"3.51.4"},"reference-count":59,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Development Program of China","award":["2018AAA0102401"],"award-info":[{"award-number":["2018AAA0102401"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62373273"],"award-info":[{"award-number":["62373273"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62073234"],"award-info":[{"award-number":["62073234"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62373268"],"award-info":[{"award-number":["62373268"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62103417"],"award-info":[{"award-number":["62103417"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Emerg. Top. Comput. Intell."],"published-print":{"date-parts":[[2024,4]]},"DOI":"10.1109\/tetci.2024.3360282","type":"journal-article","created":{"date-parts":[[2024,2,14]],"date-time":"2024-02-14T00:07:48Z","timestamp":1707869268000},"page":"2086-2100","source":"Crossref","is-referenced-by-count":36,"title":["Game of Drones: Intelligent Online Decision Making of Multi-UAV Confrontation"],"prefix":"10.1109","volume":"8","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-0533-6677","authenticated-orcid":false,"given":"Da","family":"Liu","sequence":"first","affiliation":[{"name":"School of Electrical and Information Engineering, Tianjin University, Tianjin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0734-6728","authenticated-orcid":false,"given":"Qun","family":"Zong","sequence":"additional","affiliation":[{"name":"School of Electrical and Information Engineering, Tianjin University, Tianjin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5309-6147","authenticated-orcid":false,"given":"Xiuyun","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Electrical and Information Engineering, Tianjin University, Tianjin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1278-0556","authenticated-orcid":false,"given":"Ruilong","family":"Zhang","sequence":"additional","affiliation":[{"name":"Beijing Aerospace Automatic Control Institute, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3979-0420","authenticated-orcid":false,"given":"Liqian","family":"Dou","sequence":"additional","affiliation":[{"name":"School of Electrical and Information Engineering, Tianjin University, Tianjin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1004-8350","authenticated-orcid":false,"given":"Bailing","family":"Tian","sequence":"additional","affiliation":[{"name":"School of Electrical and Information Engineering, Tianjin University, Tianjin, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2019.2924143"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/LCOMM.2016.2524405"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1126\/science.add4679"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1126\/science.aar6404"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3319619.3321894"},{"key":"ref7","article-title":"The starcraft multi-agent challenge","volume":"abs\/1902.04043","author":"Samvelyan","year":"2019","journal-title":"CoRR"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2018.2823329"},{"key":"ref9","first-page":"2085","article-title":"Value-decomposition networks for cooperative multi-agent learning based on team reward","volume-title":"Proc. 17th Int. Conf. Auton. Agents MultiAgent Syst.","author":"Sunehag","year":"2018"},{"key":"ref10","first-page":"4295","article-title":"QMIX: Monotonic value function factorisation for deep multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rashid","year":"2018"},{"key":"ref11","first-page":"5887","article-title":"QTRAN: Learning to factorize with transformation for cooperative multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Son","year":"2019"},{"key":"ref12","article-title":"QPLEX: Duplex dueling multi-agent Q-learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wang","year":"2021"},{"key":"ref13","first-page":"2252","article-title":"Learning multiagent communication with backpropagation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Sukhbaatar","year":"2016"},{"key":"ref14","article-title":"Multiagent bidirectionally-coordinated nets: Emergence of human-level coordination in learning to play starcraft combat games","author":"Peng","year":"2017"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"ref16","first-page":"2961","article-title":"Actor-attention-critic for multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Iqbal","year":"2019"},{"key":"ref17","first-page":"6382","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Lowe","year":"2017"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-019-09421-1"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICUS52573.2021.9641171"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33014213"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016079"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-019-05864-5"},{"key":"ref23","article-title":"DOP: Off-policy multi-agent decomposed policy gradients","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wang","year":"2021"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3121546"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3170050"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3023711"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2021.3127925"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3146976"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3084685"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2022.3193367"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2020.3044082"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2016.2542134"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1016\/j.jfranklin.2019.06.007"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3004893"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/FUZZ-IEEE.2016.7737744"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2021.3112572"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2020.104112"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.2971780"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2009.03.012"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1145\/2939672.2939694"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2020.3004807"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793661"},{"key":"ref43","article-title":"Starcraft II: A new challenge for reinforcement learning","author":"Vinyals","year":"2017"},{"key":"ref44","first-page":"5571","article-title":"Mean field multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Yang","year":"2018"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11492"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2004.1302409"},{"key":"ref47","article-title":"Unity: A general platform for intelligent agents","author":"Juliani","year":"2020"},{"key":"ref48","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja","year":"2018"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8967811"},{"key":"ref51","first-page":"7265","article-title":"Learning attentional communication for multiagent cooperation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Jiang","year":"2018"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1093\/comnet\/cnad009"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1098\/rspa.2021.0564"},{"key":"ref54","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017"},{"key":"ref55","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schulman","year":"2015"},{"key":"ref56","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mnih","year":"2016"},{"key":"ref57","first-page":"2944","article-title":"Learning continuous control policies by stochastic value gradients","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Heess","year":"2015"},{"key":"ref58","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto","year":"2018"},{"key":"ref59","first-page":"24611","article-title":"The surprising effectiveness of PPO in cooperative multi-agent games","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yu","year":"2022"}],"container-title":["IEEE Transactions on Emerging Topics in Computational Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7433297\/10480102\/10433866.pdf?arnumber=10433866","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,28]],"date-time":"2024-03-28T20:52:52Z","timestamp":1711659172000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10433866\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4]]},"references-count":59,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tetci.2024.3360282","relation":{},"ISSN":["2471-285X"],"issn-type":[{"value":"2471-285X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4]]}}}