{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T04:19:31Z","timestamp":1772338771603,"version":"3.50.1"},"reference-count":32,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,7]]},"DOI":"10.1109\/ijcnn48605.2020.9207088","type":"proceedings-article","created":{"date-parts":[[2020,9,30]],"date-time":"2020-09-30T00:40:33Z","timestamp":1601426433000},"page":"1-8","source":"Crossref","is-referenced-by-count":43,"title":["Beyond-Visual-Range Air Combat Tactics Auto-Generation by Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Haiyin","family":"Piao","sequence":"first","affiliation":[]},{"given":"Zhixiao","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Guanglei","family":"Meng","sequence":"additional","affiliation":[]},{"given":"Hechang","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Bohao","family":"Qu","sequence":"additional","affiliation":[]},{"given":"Kuijun","family":"Lang","sequence":"additional","affiliation":[]},{"given":"Yang","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Shengqi","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Xuanqi","family":"Peng","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref32","author":"shaw","year":"1985","journal-title":"Fighter Combat"},{"key":"ref31","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref30","first-page":"249","article-title":"Understanding the difficulty of training deep feedforward neural networks","author":"glorot","year":"2010","journal-title":"Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2012.6252763"},{"key":"ref11","doi-asserted-by":"crossref","first-page":"354","DOI":"10.1038\/nature24270","article-title":"Mastering the game of go without human knowledge","volume":"550","author":"silver","year":"2017","journal-title":"Nature"},{"key":"ref12","first-page":"2817","article-title":"Cold-start reinforcement learning with softmax policy gradient","author":"ding","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref13","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(01)00129-1"},{"key":"ref15","article-title":"Human-level performance in first-person multiplayer games with population-based deep reinforcement learning","author":"jaderberg","year":"2018"},{"key":"ref16","article-title":"Dota 2 with large scale deep reinforcement learning","author":"berner","year":"2019"},{"key":"ref17","doi-asserted-by":"crossref","first-page":"350","DOI":"10.1038\/s41586-019-1724-z","article-title":"Grandmaster level in starcraft ii using multi-agent reinforcement learning","volume":"575","author":"vinyals","year":"2019","journal-title":"Nature"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref19","article-title":"Air combat maneuvering","year":"2019"},{"key":"ref28","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.2514\/1.17168"},{"key":"ref27","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v32i1.11794","article-title":"Counterfactual multi-agent policy gradients","author":"foerster","year":"2018","journal-title":"Thirty-Second AAAI Conference on Artificial Intelligence"},{"key":"ref3","first-page":"859","article-title":"An influence diagram approach to one-on-one air combat","volume":"2","author":"virtanen","year":"2002","journal-title":"Proceedings of the 10th International Symposium on Differential Games and Applications St Petersburg Russia"},{"key":"ref6","author":"vinberg","year":"2009","journal-title":"Guided Reinforcement Learning applied to Air-Combat Simulation"},{"key":"ref29","article-title":"Starcraft ii: A new challenge for reinforcement learning","author":"vinyals","year":"2017"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.4172\/2167-0374.1000144"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-35288-2_5"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.2514\/1.46815"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.ast.2019.105534"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.2514\/6.1989-3312"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ast.2017.11.014"},{"key":"ref20","author":"bonanni","year":"1993","journal-title":"The art of the kill"},{"key":"ref22","first-page":"805","article-title":"Fictitious self-play in extensive-form games","author":"heinrich","year":"2015","journal-title":"International Conference on Machine Learning"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.geb.2005.08.005"},{"key":"ref24","article-title":"Emergent complexity via multi-agent competition","author":"bansal","year":"2017"},{"key":"ref23","article-title":"Deep reinforcement learning from self-play in imperfect-information games","author":"heinrich","year":"2016"},{"key":"ref26","article-title":"Asymmetric actor critic for image-based robot learning","author":"pinto","year":"2017"},{"key":"ref25","first-page":"6379","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","author":"lowe","year":"2017","journal-title":"Advances in neural information processing systems"}],"event":{"name":"2020 International Joint Conference on Neural Networks (IJCNN)","location":"Glasgow, United Kingdom","start":{"date-parts":[[2020,7,19]]},"end":{"date-parts":[[2020,7,24]]}},"container-title":["2020 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9200848\/9206590\/09207088.pdf?arnumber=9207088","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,21]],"date-time":"2022-11-21T06:23:57Z","timestamp":1669011837000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9207088\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,7]]},"references-count":32,"URL":"https:\/\/doi.org\/10.1109\/ijcnn48605.2020.9207088","relation":{},"subject":[],"published":{"date-parts":[[2020,7]]}}}