{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,28]],"date-time":"2025-10-28T10:58:30Z","timestamp":1761649110477,"version":"3.37.3"},"reference-count":36,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,12,6]],"date-time":"2022-12-06T00:00:00Z","timestamp":1670284800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,12,6]],"date-time":"2022-12-06T00:00:00Z","timestamp":1670284800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100007270","name":"University of Michigan","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100007270","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,12,6]]},"DOI":"10.1109\/cdc51059.2022.9992450","type":"proceedings-article","created":{"date-parts":[[2023,1,10]],"date-time":"2023-01-10T19:26:56Z","timestamp":1673378816000},"page":"2307-2312","source":"Crossref","is-referenced-by-count":1,"title":["Risk-Averse Reinforcement Learning via Dynamic Time-Consistent Risk Measures"],"prefix":"10.1109","author":[{"given":"Xian","family":"Yu","sequence":"first","affiliation":[{"name":"The Ohio State University,Department of Integrated Systems Engineering,Columbus,Ohio,USA"}]},{"given":"Siqian","family":"Shen","sequence":"additional","affiliation":[{"name":"University of Michigan,Department of Industrial and Operations Engineering,Ann Arbor,Michigan,USA"}]}],"member":"263","reference":[{"volume-title":"Markov Decision Processes: Discrete Stochastic Dynamic Programming.","year":"2014","author":"Puterman","key":"ref1"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1287\/opre.1050.0216"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1287\/moor.1040.0129"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1287\/moor.1120.0566"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1287\/moor.2016.0779"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1287\/moor.1120.0540"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2015.2495174"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1137\/19M1268410"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50021-0"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/S0005-1098(98)00153-8"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/9.847737"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1287\/mnsc.18.7.356"},{"key":"ref13","volume-title":"Mean-variance analysis in portfolio choice and capital markets.","volume":"66","author":"Markowitz","year":"2000"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1287\/moor.27.2.294.324"},{"key":"ref15","first-page":"1651","article-title":"Policy gradients with variance related risk criteria","volume-title":"Proceedings of the 29th International Coference on International Conference on Machine Learning","author":"Tamar"},{"key":"ref16","article-title":"Actor-critic algorithms for risk-sensitive MDPs","volume":"26","author":"La","year":"2013","journal-title":"Advances in neural information processing systems"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1111\/1467-9965.00068"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21314\/JOR.2000.038"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/S0378-4266(02)00271-6"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1287\/moor.1050.0186"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1137\/1.9780898718751"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2017.8264389"},{"key":"ref23","article-title":"Algorithms for CVaR optimization in MDPs","volume":"27","author":"Chow","year":"2014","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v29i1.9561"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.ejor.2013.11.037"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-010-0393-3"},{"article-title":"An approximate solution method for large risk-averse Markov decision processes","year":"2012","author":"Petrik","key":"ref27"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2013.6579868"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2014.6859437"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1016\/j.ejor.2015.05.048"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/BF00115009"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/tnn.1998.712192"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1126\/science.153.3731.34"},{"article-title":"Playing atari with deep reinforcement learning","year":"2013","author":"Mnih","key":"ref34"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"article-title":"Adam: A method for stochastic optimization","year":"2014","author":"Kingma","key":"ref36"}],"event":{"name":"2022 IEEE 61st Conference on Decision and Control (CDC)","start":{"date-parts":[[2022,12,6]]},"location":"Cancun, Mexico","end":{"date-parts":[[2022,12,9]]}},"container-title":["2022 IEEE 61st Conference on Decision and Control (CDC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9992315\/9992317\/09992450.pdf?arnumber=9992450","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T11:49:55Z","timestamp":1706788195000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9992450\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,12,6]]},"references-count":36,"URL":"https:\/\/doi.org\/10.1109\/cdc51059.2022.9992450","relation":{},"subject":[],"published":{"date-parts":[[2022,12,6]]}}}