{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T18:04:09Z","timestamp":1774721049658,"version":"3.50.1"},"reference-count":71,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,10,1]],"date-time":"2023-10-01T00:00:00Z","timestamp":1696118400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100000266","name":"Engineering and Physical Sciences Research Council","doi-asserted-by":"publisher","award":["EP\/P00993X\/1"],"award-info":[{"award-number":["EP\/P00993X\/1"]}],"id":[{"id":"10.13039\/501100000266","id-type":"DOI","asserted-by":"publisher"}]},{"name":"President&#x0027;s Ph.D. Scholarship at Imperial College London"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE J. Biomed. Health Inform."],"published-print":{"date-parts":[[2023,10]]},"DOI":"10.1109\/jbhi.2023.3303367","type":"journal-article","created":{"date-parts":[[2023,8,22]],"date-time":"2023-08-22T17:45:36Z","timestamp":1692726336000},"page":"5087-5098","source":"Crossref","is-referenced-by-count":19,"title":["Offline Deep Reinforcement Learning and Off-Policy Evaluation for Personalized Basal Insulin Control in Type 1 Diabetes"],"prefix":"10.1109","volume":"27","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9782-3470","authenticated-orcid":false,"given":"Taiyu","family":"Zhu","sequence":"first","affiliation":[{"name":"Centre for Bio-Inspired Technology, Department of Electrical and Electronic Engineering, Imperial College London, London, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3073-3128","authenticated-orcid":false,"given":"Kezhi","family":"Li","sequence":"additional","affiliation":[{"name":"Institute of Health Informatics, University College London, London, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2476-3857","authenticated-orcid":false,"given":"Pantelis","family":"Georgiou","sequence":"additional","affiliation":[{"name":"Centre for Bio-Inspired Technology, Department of Electrical and Electronic Engineering, Imperial College London, London, U.K."}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1136\/bmj.k1310"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.3389\/fendo.2021.795895"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/s00125-016-4022-4"},{"key":"ref56","first-page":"71","article-title":"The OhioT1DM dataset for blood glucose level prediction: Update 2020","author":"marling","year":"0","journal-title":"Proc 5th KDH Workshop"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TBME.2016.2535241"},{"key":"ref59","first-page":"1","article-title":"d3rlpy: An offline deep reinforcement library","author":"seno","year":"0","journal-title":"Proc Offline Reinforcement Learn Workshop"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TBME.2016.2590498"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2023.01.037"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1177\/1932296819851135"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1016\/j.cmpb.2017.05.010"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/S2213-8587(16)30193-0"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1089\/dia.2011.0084"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1001\/jama.2013.277818"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1177\/1932296814532906"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.artmed.2020.101836"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.artmed.2020.101964"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/JBHI.2020.3040225"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.2196\/10775"},{"key":"ref51","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","author":"fujimoto","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.2337\/dci19-0028"},{"key":"ref46","first-page":"1","article-title":"Autoregressive dynamics models for offline policy evaluation and optimization","author":"zhang","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref45","first-page":"3703","article-title":"Batch policy learning under constraints","author":"le","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref48","first-page":"1","article-title":"Benchmarks for deep off-policy evaluation","author":"fu","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref47","first-page":"1","article-title":"Hyperparameter selection for offline reinforcement learning","author":"paine","year":"0","journal-title":"Proc Offline Reinforcement Learn Workshop"},{"key":"ref42","first-page":"1179","article-title":"Conservative Q-learning for offline reinforcement learning","volume":"33","author":"kumar","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref41","first-page":"1","article-title":"Stabilizing off-policy Q-learning via bootstrapping error reduction","volume":"32","author":"kumar","year":"0","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref44","first-page":"1","article-title":"Off-policy evaluation and learning from logged bandit feedback: Error reduction via surrogate policy","author":"xie","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref43","first-page":"5774","article-title":"Offline reinforcement learning with fisher divergence critic regularization","author":"kostrikov","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref49","first-page":"2","article-title":"Model selection for offline reinforcement learning: Practical considerations for healthcare settings","author":"tang","year":"0","journal-title":"Proc Mach Learn Healthcare Conf"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.4093\/dmj.2019.0121"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1177\/1932296816672689"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/S0140-6736(18)30297-6"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1056\/NEJMoa1003795"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1186\/2251-6581-11-17"},{"key":"ref6","author":"hussain","year":"2015","journal-title":"Insulin Pumps and Continuous Glucose Monitoring Made Easy"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1111\/j.1464-5491.2008.02486.x"},{"key":"ref40","first-page":"20132","article-title":"A minimalist approach to offline reinforcement learning","volume":"34","author":"fujimoto","year":"2021","journal-title":"Adv Neural Inf Process Syst"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1177\/193229680900300106"},{"key":"ref34","article-title":"In-silico evaluation of glucose regulation using policy gradient reinforcement learning for patients with type 1 diabetes mellitus","volume":"10","author":"myhre","year":"2020","journal-title":"Appl Sci"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1089\/dia.2013.0377"},{"key":"ref36","doi-asserted-by":"crossref","first-page":"26","DOI":"10.1177\/1932296813514502","article-title":"The UVA\/PADOVA type 1 diabetes simulator: New features","volume":"8","author":"man","year":"2014","journal-title":"J Diabetes Sci Technol"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/JBHI.2020.3014556"},{"key":"ref30","first-page":"508","article-title":"Deep reinforcement learning for closed-loop blood glucose control","author":"fox","year":"0","journal-title":"Proc Mach Learn Healthcare Conf"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.3390\/s20185058"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-53352-6_5"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.2337\/diaclin.26.2.77"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nrdp.2017.16"},{"key":"ref39","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","author":"fujimoto","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1088\/0967-3334\/25\/4\/010"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.3390\/s19245386"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0217301"},{"key":"ref24","first-page":"1334","article-title":"End-to-end training of deep visuomotor policies","volume":"17","author":"levine","year":"2016","journal-title":"J Mach Learn Res"},{"key":"ref68","first-page":"1","article-title":"Off-policy evaluation via off-policy classification","volume":"32","author":"irpan","year":"2019","journal-title":"Adv Neural Inf Process Syst"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2022.3143375"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1177\/19322968211060074"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1126\/sciadv.aap7885"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/TBME.2022.3187703"},{"key":"ref20","first-page":"74","article-title":"A deep learning algorithm for personalized blood glucose prediction","author":"zhu","year":"0","journal-title":"Proc 3rd Int Workshop Knowl Discov Healthcare Data"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/EMBC.2019.8856846"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1038\/s42255-020-0212-y"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/s41666-020-00068-2"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1007\/s13300-020-00823-z"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/JBHI.2019.2931842"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-019-0202-1"},{"key":"ref28","first-page":"1","article-title":"Reinforcement learning for blood glucose control: Challenges and opportunities","author":"fox","year":"0","journal-title":"Proc Reinforcement Learn Real Life Workshop 36th Int Conf Mach Learn"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-021-04301-9"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/JBHI.2020.3002022"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1038\/s41591-018-0213-5"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/TBME.2020.3004031"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1177\/1932296818759558"}],"container-title":["IEEE Journal of Biomedical and Health Informatics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6221020\/10272722\/10226213.pdf?arnumber=10226213","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,30]],"date-time":"2023-10-30T19:20:14Z","timestamp":1698693614000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10226213\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10]]},"references-count":71,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/jbhi.2023.3303367","relation":{},"ISSN":["2168-2194","2168-2208"],"issn-type":[{"value":"2168-2194","type":"print"},{"value":"2168-2208","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,10]]}}}