{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T03:41:57Z","timestamp":1769917317312,"version":"3.49.0"},"reference-count":42,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Basic Research Program of China","doi-asserted-by":"publisher","award":["2019YFB1406201"],"award-info":[{"award-number":["2019YFB1406201"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["CUC2019B021"],"award-info":[{"award-number":["CUC2019B021"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2020]]},"DOI":"10.1109\/access.2020.3022638","type":"journal-article","created":{"date-parts":[[2020,9,8]],"date-time":"2020-09-08T19:46:32Z","timestamp":1599594392000},"page":"163334-163343","source":"Crossref","is-referenced-by-count":35,"title":["Recurrent MADDPG for Object Detection and Assignment in Combat Tasks"],"prefix":"10.1109","volume":"8","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6905-087X","authenticated-orcid":false,"given":"Xiaolong","family":"Wei","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1275-9244","authenticated-orcid":false,"given":"Lifang","family":"Yang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4549-0125","authenticated-orcid":false,"given":"Gang","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Tao","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Bing","family":"Wang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"Deep decentralized multi-task multi-agent RL under partial observability","author":"omidshafiei","year":"2017"},{"key":"ref38","article-title":"Parameter sharing deep deterministic policy gradient for cooperative multi-agent reinforcement learning","author":"chu","year":"2017","journal-title":"arXiv 1710 00336"},{"key":"ref33","first-page":"1","article-title":"Deterministic policy gradient algorithms","volume":"1","author":"silver","year":"2014","journal-title":"Proc Int Conf Mach Learn (ICML)"},{"key":"ref32","article-title":"Deep reinforcement learning with double Q-learning","author":"van hasselt","year":"2015","journal-title":"arXiv 1509 06461 [cs]"},{"key":"ref31","article-title":"Prioritized experience replay","author":"schaul","year":"2015","journal-title":"arXiv 1511 05952"},{"key":"ref30","article-title":"Playing Atari with deep reinforcement learning","author":"mnih","year":"2013","journal-title":"arXiv 1312 5602"},{"key":"ref37","first-page":"2137","article-title":"Learning to communicate with deep multi-agent reinforcement learning","author":"foerster","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref36","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"arXiv 1707 06347"},{"key":"ref35","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"CoRR"},{"key":"ref34","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2009.5160494"},{"key":"ref40","article-title":"Counterfactual multi-agent policy gradients","author":"foerster","year":"2017","journal-title":"arXiv 1705 08926"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2007.4399095"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2009.06.006"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2018.2823329"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0172395"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1049\/iet-gtd.2016.0075"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2004.839380"},{"key":"ref17","doi-asserted-by":"crossref","first-page":"152","DOI":"10.1518\/155534310X522851","article-title":"Adaptive automation based on an object-oriented task model: Implementation and evaluation in a realistic c2 environment","volume":"4","author":"greef","year":"2010","journal-title":"J Cognit Eng Decis Making"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1121\/1.5035588"},{"key":"ref19","first-page":"1","article-title":"Cognitive multistatic AUV networks","author":"braca","year":"2014","journal-title":"Proc 17th Int Conf Inf Fusion (FUSION)"},{"key":"ref28","article-title":"Playing doom with SLAM-augmented deep reinforcement learning","author":"bhatti","year":"2016","journal-title":"arXiv 1612 00380"},{"key":"ref4","first-page":"1008","article-title":"Actor-critic algorithms","author":"konda","year":"2000","journal-title":"Proc Neural Inf Process Syst Conf"},{"key":"ref27","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref3","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv 1509 02971"},{"key":"ref6","first-page":"2961","article-title":"Actor-attention-critic for multi-agent reinforcement learning","author":"iqbal","year":"2019","journal-title":"Proceedings 36th Int Conf Mach Learn"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.4258\/hir.2016.22.4.351"},{"key":"ref5","article-title":"R-MADDPG for partially observable environments and limited communication","author":"wang","year":"2020","journal-title":"arXiv 2002 06684"},{"key":"ref8","article-title":"Qatten: A general framework for cooperative multiagent reinforcement learning","author":"yang","year":"2020","journal-title":"arXiv 2002 03939"},{"key":"ref7","article-title":"Q-value path decomposition for deep multiagent reinforcement learning","author":"yang","year":"2020","journal-title":"arXiv 2002 03950"},{"key":"ref2","first-page":"6379","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","author":"lowe","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref9","first-page":"96","article-title":"Cooperative control of UAV based on multi-agent system","author":"han","year":"2013","journal-title":"Proc IEEE 8th Conf Ind Electron Appl (ICIEA)"},{"key":"ref1","article-title":"Memory-based control with recurrent neural networks","author":"heess","year":"2015","journal-title":"arXiv 1512 04455"},{"key":"ref20","article-title":"Tactical task of operation platform recognition in situation assessment for antiaircraft defending","author":"yuan","year":"2015","journal-title":"Command Contr Simul"},{"key":"ref22","article-title":"DSBN used for recognition of tactical intenrion","author":"shun","year":"2014","journal-title":"Syst Eng Electron"},{"key":"ref21","first-page":"2398","article-title":"Threat assessment of aerial targets based on hybrid Bayesian network","volume":"32","author":"meng","year":"2010","journal-title":"Xi Tong Gong Cheng Yu Dian Zi Ji Shu\/Syst Eng Electron"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.11418"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICRAE48301.2019.9043821"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2018.2890773"},{"key":"ref23","first-page":"2374","article-title":"Tactical intention recognition based on multi-entity Bayesian network","volume":"32","author":"deng","year":"2010","journal-title":"Syst Eng Electron"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-018-0102-6"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2743240"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/8948470\/09187817.pdf?arnumber=9187817","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,12,17]],"date-time":"2021-12-17T19:55:46Z","timestamp":1639770946000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9187817\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/access.2020.3022638","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]}}}