{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,6]],"date-time":"2026-01-06T05:52:45Z","timestamp":1767678765113,"version":"3.48.0"},"reference-count":27,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"Institute of Information and Communications Technology Planning and Evaluation (IITP) through the Artificial Intelligence Convergence Innovation Human Resources Development"},{"name":"Korean Government [Ministry of Science and ICT (MSIT)]","award":["IITP-2025-RS-2023-00255968"],"award-info":[{"award-number":["IITP-2025-RS-2023-00255968"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2026]]},"DOI":"10.1109\/access.2025.3649082","type":"journal-article","created":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T18:39:29Z","timestamp":1767033569000},"page":"886-899","source":"Crossref","is-referenced-by-count":0,"title":["Enhanced Counter-UAS Capabilities in Close-In Weapon Systems Using Hierarchical Reinforcement Learning"],"prefix":"10.1109","volume":"14","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-0216-1986","authenticated-orcid":false,"given":"Wonhyuk","family":"Yun","sequence":"first","affiliation":[{"name":"LIG Nex1, Seongnam-si, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8569-0236","authenticated-orcid":false,"given":"Seok-Won","family":"Lee","sequence":"additional","affiliation":[{"name":"Graduate School of Information and Communication Technology, Ajou University, Suwon, South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.23919\/CSMS.2023.0003"},{"key":"ref2","first-page":"32","article-title":"Counter drone technology: A review","author":"Gonz\u00e1lez-Jorge","year":"2024","journal-title":"Preprints"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.30890\/2567-5273.2023-26-01-052"},{"volume-title":"Reinforcement Learning: An Introduction","year":"2018","author":"Sutton","key":"ref4"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.38105\/spr.360apm5typ"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3298601"},{"key":"ref7","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.12716\/1001.17.02.23"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3023473"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1186\/s40537-024-01004-6"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.2322\/tjsass.58.163"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.5787\/39-2-115"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1002\/aisy.202300151"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.3390\/aerospace12110968"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCIS63642.2024.10779404"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.14429\/dsj.74.19504"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICUS58632.2023.10318249"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2805379"},{"key":"ref19","first-page":"1","article-title":"Modular hierarchical reinforcement learning for robotics: Improving scalability and generalizability","volume-title":"Proc. ICML Workshop New Frontiers Learn., Control, Dyn. Syst.","author":"Anca"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9635857"},{"volume-title":"Fundamentals of Aerodynamics","year":"2010","author":"Anderson","key":"ref21"},{"volume-title":"Design and Analysis of Modern Tracking Systems","year":"1999","author":"Blackman","key":"ref22"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1115\/1.3662552"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1002\/0471221279"},{"key":"ref25","article-title":"Hierarchical reinforcement learning via advantage-weighted information maximization","author":"Osa","year":"2019","journal-title":"arXiv:1901.01365"},{"volume-title":"Theory and Application of Reward Shaping in Reinforcement Learning","year":"2004","author":"Laud","key":"ref26"},{"key":"ref27","article-title":"Reinforcement learning with anticipation: A hierarchical approach for long-horizon tasks","author":"Yu","year":"2025","journal-title":"arXiv:2509.05545"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/11323511\/11316625.pdf?arnumber=11316625","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,6]],"date-time":"2026-01-06T05:49:02Z","timestamp":1767678542000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11316625\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/access.2025.3649082","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2026]]}}}