{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T16:58:44Z","timestamp":1770915524248,"version":"3.50.1"},"reference-count":20,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"7","license":[{"start":{"date-parts":[[2023,7,1]],"date-time":"2023-07-01T00:00:00Z","timestamp":1688169600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,7,1]],"date-time":"2023-07-01T00:00:00Z","timestamp":1688169600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,7,1]],"date-time":"2023-07-01T00:00:00Z","timestamp":1688169600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100006751","name":"U.S. Army","doi-asserted-by":"publisher","award":["W15P7T-20-C-0006"],"award-info":[{"award-number":["W15P7T-20-C-0006"]}],"id":[{"id":"10.13039\/100006751","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Commun. Lett."],"published-print":{"date-parts":[[2023,7]]},"DOI":"10.1109\/lcomm.2023.3274594","type":"journal-article","created":{"date-parts":[[2023,5,10]],"date-time":"2023-05-10T23:25:53Z","timestamp":1683761153000},"page":"1789-1793","source":"Crossref","is-referenced-by-count":2,"title":["Generalization of Deep Reinforcement Learning for Jammer-Resilient Frequency and Power Allocation"],"prefix":"10.1109","volume":"27","author":[{"given":"Swatantra","family":"Kafle","sequence":"first","affiliation":[{"name":"Marconi-Rosenblatt AI\/ML Innovation Laboratory, ANDRO Computational Solutions, Rome, LLC, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4059-6481","authenticated-orcid":false,"given":"Jithin","family":"Jagannath","sequence":"additional","affiliation":[{"name":"Marconi-Rosenblatt AI\/ML Innovation Laboratory, ANDRO Computational Solutions, Rome, LLC, USA"}]},{"given":"Zackary","family":"Kane","sequence":"additional","affiliation":[{"name":"Marconi-Rosenblatt AI\/ML Innovation Laboratory, ANDRO Computational Solutions, Rome, LLC, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2267-8520","authenticated-orcid":false,"given":"Noor","family":"Biswas","sequence":"additional","affiliation":[{"name":"Marconi-Rosenblatt AI\/ML Innovation Laboratory, ANDRO Computational Solutions, Rome, LLC, USA"}]},{"given":"Prem Sagar Vasanth","family":"Kumar","sequence":"additional","affiliation":[{"name":"Marconi-Rosenblatt AI\/ML Innovation Laboratory, ANDRO Computational Solutions, Rome, LLC, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4459-5336","authenticated-orcid":false,"given":"Anu","family":"Jagannath","sequence":"additional","affiliation":[{"name":"Marconi-Rosenblatt AI\/ML Innovation Laboratory, ANDRO Computational Solutions, Rome, LLC, USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TAI.2021.3108129"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2831240"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2904329"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1002\/9781119562306.ch13"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2018.2879433"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.adhoc.2019.101913"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2017.2654684"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2011.110406"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/IEEECONF51394.2020.9443301"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1186\/s13634-019-0637-1"},{"key":"ref11","article-title":"A deep Q-learning method for downlink power allocation in multi-cell networks","author":"Ahmed","year":"2019","journal-title":"arXiv:1904.13032"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2933973"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2020.3032991"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/GCWkshps52748.2021.9681985"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3522783.3529530"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-60990-0_12"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.20517\/ir.2021.02"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1016\/j.comnet.2022.109489"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3345768.3355908"},{"key":"ref20","volume-title":"Outdoor Demonstration of Deep Reinforcement Learning for Jammer-Resilient Frequency and Power Allocation","year":"2023"}],"container-title":["IEEE Communications Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/4234\/10178697\/10122217.pdf?arnumber=10122217","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,14]],"date-time":"2024-03-14T10:31:28Z","timestamp":1710412288000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10122217\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,7]]},"references-count":20,"journal-issue":{"issue":"7"},"URL":"https:\/\/doi.org\/10.1109\/lcomm.2023.3274594","relation":{},"ISSN":["1089-7798","1558-2558","2373-7891"],"issn-type":[{"value":"1089-7798","type":"print"},{"value":"1558-2558","type":"electronic"},{"value":"2373-7891","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,7]]}}}