{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,20]],"date-time":"2026-01-20T13:38:50Z","timestamp":1768916330935,"version":"3.49.0"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,14]],"date-time":"2025-12-14T00:00:00Z","timestamp":1765670400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,14]],"date-time":"2025-12-14T00:00:00Z","timestamp":1765670400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,14]]},"DOI":"10.1109\/icpads67057.2025.11323124","type":"proceedings-article","created":{"date-parts":[[2026,1,14]],"date-time":"2026-01-14T20:36:54Z","timestamp":1768423014000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Multi-UAV Cooperative Pursuit Scheme via Multi-Agent Reinforcement Learning Approach"],"prefix":"10.1109","author":[{"given":"Jiahong","family":"Liu","sequence":"first","affiliation":[{"name":"College of Computer Science and Engineering, Shandong University of Science and Technology,Qingdao,China"}]},{"given":"Hang","family":"Tao","sequence":"additional","affiliation":[{"name":"College of Computer Science and Engineering, Shandong University of Science and Technology,Qingdao,China"}]},{"given":"Yang","family":"Zhao","sequence":"additional","affiliation":[{"name":"College of Computer Science and Engineering, Shandong University of Science and Technology,Qingdao,China"}]},{"given":"Chao","family":"Liu","sequence":"additional","affiliation":[{"name":"Ocean University of China,Department of Computer Science and Technology,Qingdao,China"}]},{"given":"Hanjiang","family":"Luo","sequence":"additional","affiliation":[{"name":"College of Computer Science and Engineering, Shandong University of Science and Technology,Qingdao,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2024.3364230"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2024.3365863"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2025.3562872"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2023.3238040"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/IWCMC48107.2020.9148519"},{"key":"ref6","first-page":"78836","article-title":"Beyond single stationary policies: Meta-task players as naturally superior collaborators","volume-title":"Proceedings of the 38th International Conference on Neural Information Processing Systems","author":"Haoming","year":"2024"},{"key":"ref7","first-page":"24611","article-title":"The surprising effectiveness of ppo in cooperative multi-agent games","volume-title":"Proceedings of the 36th International Conference on Neural Information Processing Systems","author":"Yu","year":"2022"},{"key":"ref8","first-page":"6382","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"Proceedings of the 31st International Conference on Neural Information Processing Systems","author":"Lowe","year":"2017"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2025.3535961"},{"key":"ref11","first-page":"2961","article-title":"Actor-attention-critic for multiagent reinforcement learning","volume-title":"International conference on machine learning","author":"Iqbal","year":"2019"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2023.123018"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/JAS.2024.124965"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM42981.2021.9488716"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM52122.2024.10621373"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2023.3288379"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2019.2902559"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM52122.2024.10621196"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1810.11363"},{"key":"ref20","first-page":"18932","article-title":"Revisiting deep learning models for tabular data","author":"Gorishniy","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i8.16826"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TCBB.2019.2911071"},{"key":"ref23","first-page":"3146","article-title":"Lightgbm: A highly efficient gradient boosting decision tree","volume-title":"Proceedings of the 31th International Conference on Neural Information Processing Systems","author":"Meng GuolinKe","year":"2017"}],"event":{"name":"2025 IEEE 31th International Conference on Parallel and Distributed Systems (ICPADS)","location":"Hefei, China","start":{"date-parts":[[2025,12,14]]},"end":{"date-parts":[[2025,12,18]]}},"container-title":["2025 IEEE 31th International Conference on Parallel and Distributed Systems (ICPADS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11322805\/11322871\/11323124.pdf?arnumber=11323124","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,15]],"date-time":"2026-01-15T07:50:40Z","timestamp":1768463440000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11323124\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,14]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/icpads67057.2025.11323124","relation":{},"subject":[],"published":{"date-parts":[[2025,12,14]]}}}