{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,12]],"date-time":"2025-07-12T01:14:43Z","timestamp":1752282883306,"version":"3.28.0"},"reference-count":13,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2015,9]]},"DOI":"10.1109\/iros.2015.7354106","type":"proceedings-article","created":{"date-parts":[[2015,12,17]],"date-time":"2015-12-17T21:52:55Z","timestamp":1450389175000},"page":"5174-5180","source":"Crossref","is-referenced-by-count":10,"title":["Robot action plans that form and maintain expectations"],"prefix":"10.1109","author":[{"given":"Jan","family":"Winkler","sequence":"first","affiliation":[]},{"given":"Michael","family":"Beetz","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"crossref","first-page":"237","DOI":"10.1613\/jair.301","article-title":"Reinforcement learning: A survey","author":"kaelbling","year":"1996","journal-title":"Journal of Artificial Intelligence Research"},{"key":"ref11","article-title":"The nsf workshop on reinforcement learning: Summary and observations","author":"mahadevan","year":"1996","journal-title":"AT Magazine"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/7902.7906"},{"journal-title":"SPARC","year":"0","key":"ref13"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2009.05.001"},{"article-title":"C4.5: Programs for Machine Learning","year":"1993","author":"quinlan","key":"ref3"},{"article-title":"Efficient memory-based learning for robot control","year":"1990","author":"moore","key":"ref6"},{"key":"ref5","article-title":"Probabilistic hybrid action models for predicting concurrent percept-driven robot behavior","author":"beetz","year":"2000","journal-title":"Proceedings of the Sixth International Conference on AT Planning Systems"},{"key":"ref8","first-page":"118","article-title":"The maxq method for hierarchical reinforcement learning","author":"dietterich","year":"1998","journal-title":"ICML Cite seer"},{"key":"ref7","first-page":"119","article-title":"State abstraction for programmable reinforcement learning agents","author":"andre","year":"2002","journal-title":"AAAI\/IAAI"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ANZIIS.1994.396988"},{"key":"ref1","first-page":"47","article-title":"CRAMm - memories for robots performing everyday manipulation activities","volume":"3","author":"winkler","year":"2014","journal-title":"Advances in Cognitive Systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-79629-6_7"}],"event":{"name":"2015 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","start":{"date-parts":[[2015,9,28]]},"location":"Hamburg, Germany","end":{"date-parts":[[2015,10,2]]}},"container-title":["2015 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7347169\/7353104\/07354106.pdf?arnumber=7354106","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2019,9,2]],"date-time":"2019-09-02T17:10:28Z","timestamp":1567444228000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/7354106\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2015,9]]},"references-count":13,"URL":"https:\/\/doi.org\/10.1109\/iros.2015.7354106","relation":{},"subject":[],"published":{"date-parts":[[2015,9]]}}}