{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,15]],"date-time":"2026-04-15T17:55:38Z","timestamp":1776275738274,"version":"3.50.1"},"reference-count":55,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2022,9,1]],"date-time":"2022-09-01T00:00:00Z","timestamp":1661990400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Netw. Serv. Manage."],"published-print":{"date-parts":[[2022,9]]},"DOI":"10.1109\/tnsm.2022.3191746","type":"journal-article","created":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T20:27:48Z","timestamp":1658176068000},"page":"2751-2766","source":"Crossref","is-referenced-by-count":8,"title":["ICRAN: Intelligent Control for Self-Driving RAN Based on Deep Reinforcement Learning"],"prefix":"10.1109","volume":"19","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9605-4043","authenticated-orcid":false,"given":"Azza H.","family":"Ahmed","sequence":"first","affiliation":[{"name":"Center for Resilient Networks and Applications, Simula Metropolitan Center for Digital Engineering, Oslo, Norway"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9964-214X","authenticated-orcid":false,"given":"Ahmed","family":"Elmokashfi","sequence":"additional","affiliation":[{"name":"Center for Resilient Networks and Applications, Oslo Metropolitan University, Oslo, Norway"}]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TNET.2013.2256431"},{"key":"ref38","year":"2021","journal-title":"5G-Lena Module"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TNSM.2021.3096673"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/MNET.2018.1800109"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2019.2954595"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2021.3053601"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ISWCS.2016.7600929"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2021.3063188"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3088450"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/WCNC.2018.8377343"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2021.3063822"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2019.2916583"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2020.2988367"},{"key":"ref2","year":"2021","journal-title":"Network Automation Efficiency Resilience and the Pathway to 5G"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/MCOM.2017.1600951"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2016.2525342"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/VTCSpring.2018.8417638"},{"key":"ref21","year":"2018","journal-title":"O-RAN Towards an Open and Smart RAN"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TNSM.2021.3098193"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/GLOCOM.2018.8647358"},{"key":"ref26","article-title":"Federated learning for RAN slicing in beyond 5G networks","author":"abouaomar","year":"2022","journal-title":"arXiv 2206 11328"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/GLOBECOM42002.2020.9322106"},{"key":"ref50","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","author":"lowe","year":"2017","journal-title":"arXiv 1706 02275"},{"key":"ref51","article-title":"Learning to communicate with deep multi-agent reinforcement learning","author":"foerster","year":"2016","journal-title":"arXiv 1605 06676"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1016\/j.comnet.2020.107763"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/MCOMSTD.101.2000014"},{"key":"ref53","year":"2018"},{"key":"ref52","first-page":"113","article-title":"ns-3 meets OpenAI gym: The playground for machine learning in networking research","author":"gaw?owicz","year":"2019","journal-title":"Proc 22nd Int ACM Conf Model Anal Simul Wireless Mobile Syst"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3117811.3117813"},{"key":"ref11","article-title":"OpenAI gym","author":"brockman","year":"2016","journal-title":"arXiv 1606 01540 [cs]"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/WINCOM.2015.7381300"},{"key":"ref12","year":"2021","journal-title":"NS-3 Network Simulator"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/VTCSpring.2015.7145953"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TNSM.2016.2597295"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2018.2815638"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2021.3067807"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2022.3158270"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2014.2371999"},{"key":"ref19","year":"2012","journal-title":"Network Functions Virtualization An Introduction Benefits Enablers Challenges & Call for Action"},{"key":"ref4","article-title":"Why (and how) networks should run themselves","author":"feamster","year":"2017","journal-title":"arXiv 1710 11583"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1049\/ecej:20000307"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/WCNC49053.2021.9417363"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/MCOM.101.2001120"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2021.3090423"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2019.2933973"},{"key":"ref49","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","author":"sutton","year":"2000","journal-title":"Advances in neural information processing systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3072435"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2015.2477945"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref48","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv 1509 02971"},{"key":"ref47","first-page":"1","article-title":"Half field offense: An environment for multiagent learning and ad hoc teamwork","author":"hausknecht","year":"2016","journal-title":"Proc AAMAS Adaptive Learn Agents (ALA) Workshop"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1201\/9780429019777-68"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/WD.2018.8361696"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/SURV.2012.060912.00100"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-021-09996-w"}],"container-title":["IEEE Transactions on Network and Service Management"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/4275028\/9917435\/09831432.pdf?arnumber=9831432","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,14]],"date-time":"2022-10-14T18:00:22Z","timestamp":1665770422000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9831432\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,9]]},"references-count":55,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tnsm.2022.3191746","relation":{},"ISSN":["1932-4537","2373-7379"],"issn-type":[{"value":"1932-4537","type":"electronic"},{"value":"2373-7379","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,9]]}}}