{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T07:16:26Z","timestamp":1760080586107,"version":"3.37.3"},"reference-count":38,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,14]],"date-time":"2021-12-14T00:00:00Z","timestamp":1639440000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000006","name":"Office of Naval Research","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000006","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12,14]]},"DOI":"10.1109\/cdc45484.2021.9683261","type":"proceedings-article","created":{"date-parts":[[2022,2,1]],"date-time":"2022-02-01T20:50:18Z","timestamp":1643748618000},"page":"1528-1535","source":"Crossref","is-referenced-by-count":5,"title":["Reinforcement Learning Beyond Expectation"],"prefix":"10.1109","author":[{"given":"Bhaskar","family":"Ramasubramanian","sequence":"first","affiliation":[{"name":"University of Washington,Network Security Lab,Department of Electrical and Computer Engineering,Seattle,WA,USA,98195"}]},{"given":"Luyao","family":"Niu","sequence":"additional","affiliation":[{"name":"Worcester Polytechnic Institute,Department of Electrical and Computer Engineering,Worcester,MA,USA,01609"}]},{"given":"Andrew","family":"Clark","sequence":"additional","affiliation":[{"name":"Worcester Polytechnic Institute,Department of Electrical and Computer Engineering,Worcester,MA,USA,01609"}]},{"given":"Radha","family":"Poovendran","sequence":"additional","affiliation":[{"name":"University of Washington,Network Security Lab,Department of Electrical and Computer Engineering,Seattle,WA,USA,98195"}]}],"member":"263","reference":[{"key":"ref38","article-title":"Is Q-learning provably efficient?","author":"jin","year":"2018","journal-title":"Neural Information Processing Systems"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.2307\/2998573"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1257\/jep.27.1.173"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-28619-4_10"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/s11081-015-9294-x"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1137\/S0363012997331639"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ADPRL.2009.4927542"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1994.6.6.1185"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.sysconle.2021.105009"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2019.01.003"},{"journal-title":"The Economics of Risk and Time","year":"2004","author":"gollier","key":"ref11"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511840203"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1137\/120899005"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1162\/NECO_a_00600"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.2307\/1914185"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/BF00122574"},{"key":"ref17","first-page":"77","article-title":"Portfolio selection","volume":"7","author":"markowitz","year":"1952","journal-title":"The Journal of Finance"},{"key":"ref18","first-page":"1651","article-title":"Policy gradients with variance related risk criteria","author":"tamar","year":"2012","journal-title":"International Coference on Machine Learning"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/j.ejor.2013.06.019"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2018.07.028"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-011-5235-x"},{"key":"ref27","first-page":"6778","article-title":"Dynamic programming with non-convex risk-sensitive measures","author":"lin","year":"2013","journal-title":"American Control Conference"},{"journal-title":"Markov Decision Processes Discrete Stochastic Dynamic Programming","year":"2014","author":"puterman","key":"ref3"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2018.8430905"},{"key":"ref5","doi-asserted-by":"crossref","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2016.XII.029"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"2224","DOI":"10.1109\/COMST.2019.2904897","article-title":"Deep learning in mobile and wireless networking: A survey","volume":"21","author":"zhang","year":"2019","journal-title":"IEEE Communications Surveys & Tutorials"},{"key":"ref2","volume":"1","author":"bertsekas","year":"2017","journal-title":"Dynamic Programming and Optimal Control"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TPWRS.2018.2881359"},{"journal-title":"Reinforcement Learning An Introduction","year":"2018","author":"sutton","key":"ref1"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1287\/mnsc.18.7.356"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1287\/moor.27.2.294.324"},{"journal-title":"Risk-Sensitive Optimal Control","year":"1990","author":"whittle","key":"ref21"},{"article-title":"Risk-sensitive reinforcement learning: A constrained optimization viewpoint","year":"2018","author":"prashanth","key":"ref24"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/S0378-4266(02)00271-6"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2018.2822658"},{"key":"ref25","article-title":"Cumulative prospect theory meets reinforcement learning: Prediction and control","author":"prashanth","year":"2016","journal-title":"International Conference on Machine Learning"}],"event":{"name":"2021 60th IEEE Conference on Decision and Control (CDC)","start":{"date-parts":[[2021,12,14]]},"location":"Austin, TX, USA","end":{"date-parts":[[2021,12,17]]}},"container-title":["2021 60th IEEE Conference on Decision and Control (CDC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9682670\/9682776\/09683261.pdf?arnumber=9683261","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,6]],"date-time":"2022-06-06T20:25:10Z","timestamp":1654547110000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9683261\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,14]]},"references-count":38,"URL":"https:\/\/doi.org\/10.1109\/cdc45484.2021.9683261","relation":{},"subject":[],"published":{"date-parts":[[2021,12,14]]}}}