{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,11]],"date-time":"2025-12-11T07:44:24Z","timestamp":1765439064223,"version":"3.44.0"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,7,8]],"date-time":"2025-07-08T00:00:00Z","timestamp":1751932800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,7,8]],"date-time":"2025-07-08T00:00:00Z","timestamp":1751932800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,7,8]]},"DOI":"10.23919\/acc63710.2025.11107606","type":"proceedings-article","created":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T18:17:51Z","timestamp":1755800271000},"page":"1145-1152","source":"Crossref","is-referenced-by-count":1,"title":["Deep Reinforcement Learning for Intervention of Partially Observable Regulatory Networks"],"prefix":"10.23919","author":[{"given":"Seyed Hamid","family":"Hosseini","sequence":"first","affiliation":[{"name":"Northeastern University,Department of Electrical and Computer Engineering"}]},{"given":"Mahdi","family":"Imani","sequence":"additional","affiliation":[{"name":"Northeastern University,Department of Electrical and Computer Engineering"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TCBB.2024.3383814"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TCBB.2020.2973636"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TCBB.2024.3402220"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/LCSYS.2022.3229054"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1002\/rnc.5909"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2007.908964"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1177\/1176935118790247"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TCNS.2022.3232527"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2014.02.034"},{"key":"ref10","article-title":"Qmdp-net: Deep learning for planning under partial observability","volume":"30","author":"Karkus","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CoDIT62066.2024.10708136"},{"article-title":"Maximum a posteriori policy optimisation","year":"2018","author":"Abdolmaleki","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CDC56724.2024.10886018"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TAI.2024.3515939"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1080\/21642583.2024.2329260"},{"key":"ref16","first-page":"486","article-title":"A theoretical analysis of deep q-learning","volume-title":"Learning for dynamics and control","author":"Fan","year":"2020"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1049\/2024\/7966713"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2014.02.011"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TAI.2024.3358261"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1117\/12.281504"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1142\/S0218339012400049"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4612-4054-9"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1002\/SERIES1345"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref25","first-page":"1167","article-title":"Classes of multiagent q-learning dynamics with epsilon-greedy exploration","volume-title":"Proceedings of the 27th International Conference on Machine Learning (ICML-10)","author":"Wunder"},{"key":"ref26","article-title":"Reinforcement learning: An introduction","author":"Sutton","year":"2018","journal-title":"A Bradford Book"},{"article-title":"Adam: A method for stochastic optimization","year":"2014","author":"Kingma","key":"ref27"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CCTA60707.2024.10666558"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1016\/S1535-6108(02)00045-4"}],"event":{"name":"2025 American Control Conference (ACC)","start":{"date-parts":[[2025,7,8]]},"location":"Denver, CO, USA","end":{"date-parts":[[2025,7,10]]}},"container-title":["2025 American Control Conference (ACC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11107441\/11107442\/11107606.pdf?arnumber=11107606","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T05:41:06Z","timestamp":1755841266000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11107606\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,8]]},"references-count":29,"URL":"https:\/\/doi.org\/10.23919\/acc63710.2025.11107606","relation":{},"subject":[],"published":{"date-parts":[[2025,7,8]]}}}