{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,20]],"date-time":"2025-06-20T19:43:21Z","timestamp":1750448601690,"version":"3.28.0"},"reference-count":32,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2014,12]]},"DOI":"10.1109\/adprl.2014.7010645","type":"proceedings-article","created":{"date-parts":[[2015,1,20]],"date-time":"2015-01-20T02:48:03Z","timestamp":1421722083000},"page":"1-8","source":"Crossref","is-referenced-by-count":2,"title":["Beyond exponential utility functions: A variance-adjusted approach for risk-averse reinforcement learning"],"prefix":"10.1109","author":[{"given":"Abhijit A.","family":"Gosavi","sequence":"first","affiliation":[]},{"given":"Sajal K.","family":"Das","sequence":"additional","affiliation":[]},{"given":"Susan L.","family":"Murray","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"journal-title":"Risk-Sensitive Optimal Control","year":"1990","author":"whittle","key":"ref32"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1002\/bdm.414"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1016\/j.ejor.2008.04.002"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1287\/moor.14.1.147"},{"key":"ref11","first-page":"162","article-title":"Reinforcement learning via bounded risk","author":"geibel","year":"2001","journal-title":"ICML01"},{"key":"ref12","doi-asserted-by":"crossref","DOI":"10.1007\/978-1-4757-3766-0","article-title":"Simulation-Based Optimization: Parametric Optimization Techniques and Reinforcement Learning","author":"gosavi","year":"2003"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/S0377-2217(02)00874-3"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.sysconle.2005.08.011"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2006.02.006"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/WSC.2009.5429344"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/s12555-011-0515-6"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1080\/03081079.2014.883387"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/WSC.2011.6147989"},{"journal-title":"Reinforcement Learning","year":"1998","author":"sutton","key":"ref28"},{"journal-title":"Neuro-Dynamic Programming","year":"1996","author":"bertsekas","key":"ref4"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1287\/opre.42.1.175"},{"article-title":"Dynamic Programming and Optimal Control: Volume II","year":"2012","author":"bertsekas","key":"ref3"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1287\/moor.27.2.294.324"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1016\/S0005-1098(99)00099-0"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/s00186-005-0045-1"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.jebo.2010.02.012"},{"key":"ref7","doi-asserted-by":"crossref","DOI":"10.1201\/9781439821091","article-title":"Reinforcement Learning and Dynamic Programming Using Function Approximators","author":"busoniu","year":"2010"},{"key":"ref2","doi-asserted-by":"crossref","first-page":"319","DOI":"10.1613\/jair.806","article-title":"Infinite-horizon policy-gradient estimation","volume":"15","author":"baxter","year":"2001","journal-title":"Journal of Artificial Intelligence"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1080\/714044439"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s00186-006-0135-8"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50021-0"},{"key":"ref22","first-page":"77","article-title":"Portfolio selection","volume":"7","author":"markowitz","year":"1952","journal-title":"Journal of Finance"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1287\/mnsc.18.7.356"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1111\/1468-0262.00158"},{"key":"ref23","first-page":"267","article-title":"Risk-sensitive reinforcement learning","volume":"49","author":"mihatsch","year":"2002"},{"key":"ref26","first-page":"473","article-title":"Average-reward reinforcement learning for variance penalized markov decision problems","author":"sato","year":"2001","journal-title":"ICML '01 Proceedings of the Eighteenth International Conference on Machine Learning"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-010-0393-3"}],"event":{"name":"2014 IEEE Symposium on Adaptive Dynamic Programming and Reinforcement Learning (ADPRL)","start":{"date-parts":[[2014,12,9]]},"location":"Orlando, FL, USA","end":{"date-parts":[[2014,12,12]]}},"container-title":["2014 IEEE Symposium on Adaptive Dynamic Programming and Reinforcement Learning (ADPRL)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7000183\/7010603\/07010645.pdf?arnumber=7010645","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2019,8,19]],"date-time":"2019-08-19T21:42:41Z","timestamp":1566250961000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/7010645\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2014,12]]},"references-count":32,"URL":"https:\/\/doi.org\/10.1109\/adprl.2014.7010645","relation":{},"subject":[],"published":{"date-parts":[[2014,12]]}}}