{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T15:27:34Z","timestamp":1759332454057,"version":"3.37.3"},"reference-count":20,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"European Union in the Framework of the Horizon 2020 EU-Korea Project 5G-ALLSTAR","award":["815323"],"award-info":[{"award-number":["815323"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Control Syst. Lett."],"published-print":{"date-parts":[[2020,7]]},"DOI":"10.1109\/lcsys.2020.2979635","type":"journal-article","created":{"date-parts":[[2020,3,10]],"date-time":"2020-03-10T20:32:17Z","timestamp":1583872337000},"page":"755-760","source":"Crossref","is-referenced-by-count":8,"title":["Chance-Constrained Control With Lexicographic Deep Reinforcement Learning"],"prefix":"10.1109","volume":"4","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5503-8506","authenticated-orcid":false,"given":"Alessandro","family":"Giuseppi","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0188-3346","authenticated-orcid":false,"given":"Antonio","family":"Pietrabissa","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"2094","article-title":"Deep reinforcement learning with double Q-learning","author":"van hasselt","year":"2016","journal-title":"Proc 13th AAAI Conf Artif Intell"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCC.2012.2218595"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.5019\/j.ijcir.2007.78"},{"journal-title":"Constrained Markov Decision Processes","year":"1999","author":"altman","key":"ref13"},{"key":"ref14","first-page":"14636","article-title":"A generalized algorithm for multi-objective reinforcement learning and policy adaptation","author":"yang","year":"2019","journal-title":"Advances in Neural IInformation Processing Systems"},{"key":"ref15","first-page":"197","article-title":"Multi-criteria reinforcement learning","author":"g\u00e1bor","year":"1998","journal-title":"Proc Int Conf Machine Learn (ICML)"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1080\/00207179.2015.1068955"},{"key":"ref17","first-page":"160","article-title":"Reinforcement learning for robots using neural networks","author":"lin","year":"1993"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.1983.6313077"},{"journal-title":"OpenAI Gym","year":"2016","author":"brockman","key":"ref19"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TCST.2007.899741"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.jfranklin.2017.11.040"},{"key":"ref6","first-page":"199","article-title":"Markov decision processes","volume":"54","author":"puterman","year":"2015","journal-title":"Comput Vis Pattern Recognit"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4615-0805-2_16"},{"journal-title":"Playing atari with deep reinforcement learning","year":"2013","author":"mnih","key":"ref8"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1137\/130910312"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2015.2511587"},{"key":"ref20","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016","journal-title":"Proc 4th Int Conf Learn Represent (ICLR)"}],"container-title":["IEEE Control Systems Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7782633\/8995821\/09031720.pdf?arnumber=9031720","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T12:51:13Z","timestamp":1651063873000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9031720\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,7]]},"references-count":20,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/lcsys.2020.2979635","relation":{},"ISSN":["2475-1456"],"issn-type":[{"type":"electronic","value":"2475-1456"}],"subject":[],"published":{"date-parts":[[2020,7]]}}}