{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,13]],"date-time":"2026-03-13T14:59:51Z","timestamp":1773413991305,"version":"3.50.1"},"reference-count":18,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,12,4]],"date-time":"2023-12-04T00:00:00Z","timestamp":1701648000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,12,4]],"date-time":"2023-12-04T00:00:00Z","timestamp":1701648000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000181","name":"Air Force Office of Scientific Research","doi-asserted-by":"publisher","award":["FA9550-20-1-0090"],"award-info":[{"award-number":["FA9550-20-1-0090"]}],"id":[{"id":"10.13039\/100000181","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["CNS-2232048,CNS-2204445"],"award-info":[{"award-number":["CNS-2232048,CNS-2204445"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,12,4]]},"DOI":"10.1109\/globecom54140.2023.10436850","type":"proceedings-article","created":{"date-parts":[[2024,2,26]],"date-time":"2024-02-26T19:45:36Z","timestamp":1708976736000},"page":"6328-6333","source":"Crossref","is-referenced-by-count":13,"title":["Attention-Based Open RAN Slice Management Using Deep Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Fatemeh","family":"Lotfi","sequence":"first","affiliation":[{"name":"Clemson University,Holcombe Department of Electrical and Computer Engineering,Clemson,SC,USA"}]},{"given":"Fatemeh","family":"Afghah","sequence":"additional","affiliation":[{"name":"Clemson University,Holcombe Department of Electrical and Computer Engineering,Clemson,SC,USA"}]},{"given":"Jonathan","family":"Ashdown","sequence":"additional","affiliation":[{"name":"Air Force Research Laboratory,Rome,NY,USA,13441"}]}],"member":"263","reference":[{"key":"ref1","article-title":"O-RAN use cases and deployment scenarios. towards open and smart ran","author":"Li","year":"2020","journal-title":"White paper"},{"issue":"3","key":"ref2","volume-title":"5G; system architecture for the 5G system (5GS)","year":"2020"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2872781"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TBC.2020.3031742"},{"issue":"3","key":"ref5","volume-title":"Study on new radio access technology: Radio access architecture and interfaces","year":"2017"},{"key":"ref6","article-title":"Study on o-ran slicing-v2.00","year":"2020","journal-title":"O-RAN. RAN.WG1.Study-on-O-RAN-Slicing-v02.00 Technical Specification"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/comst.2023.3239220"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/MCOMSTD.0001.2200017"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CCNC51644.2023.10059966"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICC45855.2022.9838763"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/GLOBECOM48099.2022.10001658"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/GCWkshps56602.2022.10008614"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/LCN58197.2023.10223373"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2022.3188013"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/WCNC51071.2022.9771605"},{"key":"ref16","first-page":"1861","article-title":"Soft actor-critic: Otf-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"International conference on machine learning","author":"Haarnoja","year":"2018"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"ref18","first-page":"2961","article-title":"Actor-attention-critic for multi-agent reinforcement learning","volume-title":"International conference on machine learning","author":"Iqbal","year":"2019"}],"event":{"name":"GLOBECOM 2023 - 2023 IEEE Global Communications Conference","location":"Kuala Lumpur, Malaysia","start":{"date-parts":[[2023,12,4]]},"end":{"date-parts":[[2023,12,8]]}},"container-title":["GLOBECOM 2023 - 2023 IEEE Global Communications Conference"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10436708\/10436716\/10436850.pdf?arnumber=10436850","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T00:51:28Z","timestamp":1709254288000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10436850\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,4]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/globecom54140.2023.10436850","relation":{},"subject":[],"published":{"date-parts":[[2023,12,4]]}}}