{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,8]],"date-time":"2026-02-08T04:21:05Z","timestamp":1770524465364,"version":"3.49.0"},"reference-count":55,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE J. Biomed. Health Inform."],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1109\/jbhi.2024.3415115","type":"journal-article","created":{"date-parts":[[2024,6,17]],"date-time":"2024-06-17T18:20:31Z","timestamp":1718648431000},"page":"6268-6279","source":"Crossref","is-referenced-by-count":5,"title":["Pruning the Way to Reliable Policies: A Multi-Objective Deep Q-Learning Approach to Critical Care"],"prefix":"10.1109","volume":"28","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3750-0159","authenticated-orcid":false,"given":"Ali","family":"Shirali","sequence":"first","affiliation":[{"name":"University of California, Berkeley, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-4514-4430","authenticated-orcid":false,"given":"Alexander","family":"Schubert","sequence":"additional","affiliation":[{"name":"University of California, Berkeley, CA, USA"}]},{"given":"Ahmed","family":"Alaa","sequence":"additional","affiliation":[{"name":"University of California, Berkeley, CA, USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1126\/scitranslmed.aab3719"},{"key":"ref2","article-title":"A reinforcement learning approach to weaning of mechanical ventilation in intensive care units","volume-title":"Proc. 33rd Conf. Uncertainty Artif. Intell.","author":"Prasad","year":"2017"},{"key":"ref3","first-page":"147","article-title":"Continuous state-space models for optimal sepsis treatment: A deep reinforcement learning approach","volume-title":"Proc. Mach. Learn. Healthcare Conf.","author":"Raghu","year":"2017"},{"key":"ref4","article-title":"Model-based reinforcement learning for sepsis treatment","author":"Raghu","year":"2018"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1142\/9789813279827_0029"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1038\/s41591-018-0213-5"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/embc.2018.8513203"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-021-00388-6"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/EMBC48229.2022.9871055"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pdig.0000012"},{"key":"ref11","first-page":"4856","article-title":"Medical dead-ends and learning to identify high-risk states and treatments","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Fatemi","year":"2021"},{"key":"ref12","first-page":"34272","article-title":"Leveraging factored action spaces for efficient offline reinforcement learning in healthcare","volume-title":"Proc.Adv. Neural Inf. Process. Syst.","volume":"35","author":"Tang","year":"2022"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1056\/AIoa2300032"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2022.105585"},{"key":"ref15","first-page":"887","article-title":"Improving sepsis treatment strategies by combining deep and kernel-based reinforcement learning","volume-title":"Proc. Amer. Med. Inform. Assoc. Annu. Symp.","author":"Peng","year":"2018"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.artmed.2023.102726"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-023-00755-5"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1186\/s12911-023-02126-2"},{"key":"ref19","article-title":"Openai gym","author":"Brockman","year":"2016"},{"key":"ref20","first-page":"4881","article-title":"Counterfactual off-policy evaluation with gumbel-max structural causal models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Oberst","year":"2019"},{"key":"ref21","first-page":"1179","article-title":"Conservative q-learning for offline reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Kumar","year":"2020"},{"key":"ref22","article-title":"Benchmarking batch deep reinforcement learning algorithms","author":"Fujimoto","year":"2019"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3987"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-022-09552-y"},{"key":"ref25","first-page":"1563","article-title":"The steering approach for multi-criteria reinforcement learning","volume":"14","author":"Mannor","year":"2001","journal-title":"Neural Inf. Process. Syst."},{"key":"ref26","first-page":"1497","article-title":"Managing power consumption and performance of computing systems using reinforcement learning","volume":"20","author":"Tesauro","year":"2007","journal-title":"Neural Inf. Process Syst"},{"key":"ref27","first-page":"601","article-title":"Dynamic preferences in multi-criteria reinforcement learning","volume-title":"Proc. 22nd Int. Conf. Mach. Learn.05","author":"Natarajan","year":"2005"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/adprl.2013.6615007"},{"key":"ref29","article-title":"Multi-Objective deep reinforcement learning","volume-title":"Artif. Intell.","author":"Mossalam","year":"2016"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1142\/9789813279827_0029"},{"key":"ref31","first-page":"10607","article-title":"Prediction-guided multi-objective reinforcement learning for continuous robot control","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"1","author":"Xu","year":"2020"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/1390156.1390162"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/s11571-008-9066-9"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/smc.2014.6974022"},{"issue":"1","key":"ref35","first-page":"7378","article-title":"Multi-objective Markov decision processes for data-driven decision support","volume":"17","author":"Lizotte","year":"2016","journal-title":"J. Mach. Learn. Res."},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3175"},{"key":"ref37","first-page":"9387","article-title":"Clinician-in-the-loop decision making: Reinforcement learning with near-optimal set-valued policies","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Tang","year":"2020"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.2196\/18477"},{"key":"ref39","first-page":"243","article-title":"An improved multi-output Gaussian process RNN with real-time validation for early sepsis detection","volume-title":"Proc. Mach. Learn. Healthcare Conf.","author":"Futoma","year":"2017"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1038\/s41591-018-0253-x"},{"key":"ref41","first-page":"9387","article-title":"Clinician-in-the-loop decision making: Reinforcement learning with near-optimal set-valued policies","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"1","author":"Tang","year":"2020"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1001\/jamanetworkopen.2021.13891"},{"key":"ref43","first-page":"5437","article-title":"Off-policy evaluation via off-policy classification","volume-title":"Neural Inf. Process. Syst.","volume":"32","author":"Irpan","year":"2018"},{"key":"ref44","first-page":"1873","article-title":"Dead-ends and secure exploration in reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"97","author":"Fatemi","year":"2019"},{"key":"ref45","article-title":"Risk sensitive dead-end identification in safety-critical offline reinforcement learning","volume-title":"Trans. Mach. Learn. Res.","author":"Killian","year":"2022"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref47","first-page":"5916","article-title":"Revisiting the softmax bellman operator: New benefits and new perspective","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Song","year":"2019"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/bf01709751"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1038\/sdata.2016.35"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1097\/00005650-199801000-00004"},{"key":"ref51","article-title":"Efficient estimation of word representations in vector space","volume-title":"Proc. 1st Int Conf. Learn. Representations: Workshop Track","author":"Mikolov","year":"2013"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1186\/s13054-019-2663-7"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1007\/BF01728326"},{"key":"ref54","first-page":"2","article-title":"Model selection for offline reinforcement learning: Practical considerations for healthcare settings","volume-title":"Proc. Mach. Learn. Healthcare Conf.","author":"Tang","year":"2021"},{"key":"ref55","article-title":"Evaluating reinforcement learning algorithms in observational health settings","author":"Gottesman","year":"2018"}],"container-title":["IEEE Journal of Biomedical and Health Informatics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6221020\/10704792\/10559219.pdf?arnumber=10559219","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,15]],"date-time":"2025-01-15T20:05:45Z","timestamp":1736971545000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10559219\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10]]},"references-count":55,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/jbhi.2024.3415115","relation":{},"ISSN":["2168-2194","2168-2208"],"issn-type":[{"value":"2168-2194","type":"print"},{"value":"2168-2208","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10]]}}}