{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,23]],"date-time":"2024-10-23T06:53:19Z","timestamp":1729666399893,"version":"3.28.0"},"reference-count":27,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2010,7]]},"DOI":"10.1109\/ijcnn.2010.5596811","type":"proceedings-article","created":{"date-parts":[[2010,10,19]],"date-time":"2010-10-19T14:58:15Z","timestamp":1287500295000},"page":"1-8","source":"Crossref","is-referenced-by-count":3,"title":["Region enhanced neural Q-learning for solving model-based POMDPs"],"prefix":"10.1109","author":[{"given":"Marco A.","family":"Wiering","sequence":"first","affiliation":[]},{"given":"Thijs","family":"Kooi","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1023\/A:1022628806385"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-377-6.50052-9"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/BF02055574"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1287\/mnsc.28.1.1"},{"key":"ref14","doi-asserted-by":"crossref","first-page":"335","DOI":"10.1613\/jair.2078","article-title":"Anytime point-based approximations for large POMDPs","volume":"27","author":"pineau","year":"2006","journal-title":"Journal of Artificial Intelligence Research (JAIR)"},{"key":"ref15","first-page":"2329","article-title":"Point-based value iteration for continuous POMDPs","volume":"7","author":"porta","year":"2006","journal-title":"Journal of Machine Learning Research"},{"key":"ref16","doi-asserted-by":"crossref","DOI":"10.1002\/9780470316887","author":"puterman","year":"1994","journal-title":"Markov Decision Processes-Discrete Stochastic Dynamic Programming"},{"key":"ref17","doi-asserted-by":"crossref","first-page":"317","DOI":"10.1007\/11564096_32","article-title":"Neural fitted Q iteration ? first experiences with a data efficient neural reinforcement learning method","author":"riedmiller","year":"2005","journal-title":"Machine Learning ECML 2005 16th European Conference on Machine Learning"},{"journal-title":"On-line Q-learning using connectionist systems","year":"1994","author":"rummery","key":"ref18"},{"journal-title":"Artificial Intelligence A Modern Approach","year":"1994","author":"russel","key":"ref19"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1080\/08839510802170538"},{"key":"ref27","doi-asserted-by":"crossref","first-page":"123","DOI":"10.1613\/jair.1379","article-title":"Restricted value iteration: Theory and algorithms","volume":"23","author":"zhang","year":"2005","journal-title":"Journal of Artificial Intelligence Research (JAIR)"},{"journal-title":"Dynamic Programming","year":"1957","author":"bellman","key":"ref3"},{"key":"ref6","article-title":"Automated handwashing assistance for persons with dementia using video and a partially observable Markov decision process","author":"hoey","year":"2009","journal-title":"Computer Vision and Image Understanding"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.omega.2006.02.002"},{"key":"ref8","article-title":"SARSOP: Efficient point-based POMDP planning by approximating optimally reachable belief spaces","author":"kurniawati","year":"2008","journal-title":"Proc Robotics Science and Systems"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"237","DOI":"10.1613\/jair.301","article-title":"Reinforcement learning: A survey","volume":"4","author":"kaelbling","year":"1996","journal-title":"Journal of Artificial Intelligence Research"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/0022-247X(65)90154-X"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2006.68"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2009.5178743"},{"journal-title":"The optimal control of partially observable Markov Decision Processes","year":"1971","author":"sondik","key":"ref20"},{"key":"ref22","doi-asserted-by":"crossref","first-page":"195","DOI":"10.1613\/jair.1659","article-title":"Perseus: Randomized point-based value iteration for POMDPs","volume":"24","author":"spaan","year":"2005","journal-title":"Journal of Artificial Intelligence Research"},{"journal-title":"Approximate planning under uncertainty in partially observable domains","year":"2006","author":"spaan","key":"ref21"},{"journal-title":"Learning from delayed rewards","year":"1989","author":"watkins","key":"ref24"},{"journal-title":"Reinforcement Learning An Introduction","year":"1998","author":"sutton","key":"ref23"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCB.2008.920231"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1023\/A:1022676722315"}],"event":{"name":"2010 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2010,7,18]]},"location":"Barcelona, Spain","end":{"date-parts":[[2010,7,23]]}},"container-title":["The 2010 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx5\/5581822\/5595732\/05596811.pdf?arnumber=5596811","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,11,11]],"date-time":"2021-11-11T22:15:34Z","timestamp":1636668934000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/5596811\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2010,7]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/ijcnn.2010.5596811","relation":{},"subject":[],"published":{"date-parts":[[2010,7]]}}}