{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,23]],"date-time":"2024-10-23T08:28:24Z","timestamp":1729672104217,"version":"3.28.0"},"reference-count":28,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2014,12]]},"DOI":"10.1109\/adprl.2014.7010613","type":"proceedings-article","created":{"date-parts":[[2015,1,20]],"date-time":"2015-01-20T02:48:03Z","timestamp":1421722083000},"page":"1-8","source":"Crossref","is-referenced-by-count":5,"title":["Convergence of value iterations for total-cost MDPs and POMDPs with general state and action sets"],"prefix":"10.1109","author":[{"given":"Eugene A.","family":"Feinberg","sequence":"first","affiliation":[]},{"given":"Pavlo O.","family":"Kasyanov","sequence":"additional","affiliation":[]},{"given":"Michael Z.","family":"Zgurovsky","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"crossref","DOI":"10.1007\/978-1-4615-6746-2","author":"dynkin","year":"1979","journal-title":"Controlled Markov processes"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.orl.2013.12.011"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.jmaa.2012.07.051"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1287\/moor.1120.0555"},{"article-title":"Partially observable total-cost Markov decision processes with weakly continuous transition probabilities","year":"2014","author":"feinberg","key":"ref14"},{"article-title":"Convergence of probability measures and Markov decision models with incomplete information","year":"2014","author":"feinberg","key":"ref15"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1287\/moor.1070.0269"},{"key":"ref17","doi-asserted-by":"crossref","DOI":"10.1007\/978-1-4419-8714-3","author":"hern\u00e1ndez-lerma","year":"1989","journal-title":"Adaptive Markov Control Processes"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4612-0729-0"},{"key":"ref19","first-page":"21","article-title":"Finite state and action MDPs","author":"kallenberg","year":"2002","journal-title":"Handbook of Markov Decision Processes"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1137\/1121014"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1137\/040620321"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1287\/moor.1110.0516"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1137\/070688663"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s10957-010-9678-1"},{"key":"ref5","first-page":"500","article-title":"Filtering for discrete-time Markov processes and applications to inventory control with incomplete information","author":"bensoussan","year":"2011","journal-title":"The Oxford Handbook of Nonlinear Filtering"},{"journal-title":"1978 Stochastic Optimal Control The Discrete-Time Case Academic Press New York","year":"1996","author":"bertsekas","key":"ref8"},{"journal-title":"Dynamic Programming and Stochastic Control","year":"1976","author":"bertsekas","key":"ref7"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511526503"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1137\/1110001"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/0016-0032(65)90528-4"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1214\/aos\/1176342886"},{"article-title":"Quantized stationary control policies in Markov decision processes","year":"2013","author":"saldi","key":"ref22"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.2307\/1426080"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1287\/opre.21.5.1071"},{"key":"ref23","first-page":"131","article-title":"Some new results in the theory of controlled random processes","volume":"8","author":"shiryaev","year":"1965","journal-title":"Trans IV Prague Conf Information Theory Statistical Decision Functions Random Processes"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/0167-6377(90)90022-W"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-45470-7"}],"event":{"name":"2014 IEEE Symposium on Adaptive Dynamic Programming and Reinforcement Learning (ADPRL)","start":{"date-parts":[[2014,12,9]]},"location":"Orlando, FL, USA","end":{"date-parts":[[2014,12,12]]}},"container-title":["2014 IEEE Symposium on Adaptive Dynamic Programming and Reinforcement Learning (ADPRL)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7000183\/7010603\/07010613.pdf?arnumber=7010613","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2017,6,23]],"date-time":"2017-06-23T03:55:01Z","timestamp":1498190101000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/7010613\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2014,12]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/adprl.2014.7010613","relation":{},"subject":[],"published":{"date-parts":[[2014,12]]}}}