{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T02:03:19Z","timestamp":1760061799651,"version":"3.28.0"},"reference-count":16,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2011,7]]},"DOI":"10.1109\/roman.2011.6005277","type":"proceedings-article","created":{"date-parts":[[2011,9,12]],"date-time":"2011-09-12T19:33:05Z","timestamp":1315855985000},"page":"455-460","source":"Crossref","is-referenced-by-count":3,"title":["Human-like action segmentation for option learning"],"prefix":"10.1109","author":[{"given":"Jaeeun","family":"Shim","sequence":"first","affiliation":[]},{"given":"Andrea L.","family":"Thomaz","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"15","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2007.09.009"},{"key":"16","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553529"},{"key":"13","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"14","doi-asserted-by":"publisher","DOI":"10.1109\/ICHR.2006.321307"},{"key":"11","doi-asserted-by":"crossref","first-page":"212","DOI":"10.1007\/3-540-45622-8_16","article-title":"Learning options in reinforcement learning","author":"stolle","year":"2002","journal-title":"Abstraction Reformulation and Approximation 6th International Symposium"},{"journal-title":"Reinforcement Learning An Introduction","year":"1998","author":"sutton","key":"12"},{"key":"3","doi-asserted-by":"publisher","DOI":"10.1016\/j.cognition.2007.07.005"},{"key":"2","doi-asserted-by":"publisher","DOI":"10.1109\/ICHR.2006.321361"},{"key":"1","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2008.10.024"},{"key":"10","first-page":"361","article-title":"Automatic discovery of subgoals in reinforcement learning using diverse density","author":"mcgovern","year":"2001","journal-title":"ICML '01 Proceedings of the Eighteenth International Conference on Machine Learning"},{"key":"7","doi-asserted-by":"publisher","DOI":"10.1177\/0278364904042199"},{"key":"6","first-page":"709","article-title":"Optimal policy switching algorithms for reinforcement learning","volume":"1","author":"comanici","year":"2010","journal-title":"Proc 1st Int Conf Autonomous Agents Multiagent Syst"},{"key":"5","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2004.833816"},{"key":"4","doi-asserted-by":"publisher","DOI":"10.1016\/S1364-6613(00)01615-6"},{"key":"9","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2009.2026508"},{"key":"8","doi-asserted-by":"publisher","DOI":"10.1177\/0278364908091153"}],"event":{"name":"2011 RO-MAN: The 20th IEEE International Symposium on Robot and Human Interactive Communication","start":{"date-parts":[[2011,7,31]]},"location":"Atlanta, GA, USA","end":{"date-parts":[[2011,8,3]]}},"container-title":["2011 RO-MAN"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx5\/5992813\/6005194\/06005277.pdf?arnumber=6005277","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2017,6,20]],"date-time":"2017-06-20T06:12:06Z","timestamp":1497939126000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/6005277\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2011,7]]},"references-count":16,"URL":"https:\/\/doi.org\/10.1109\/roman.2011.6005277","relation":{},"subject":[],"published":{"date-parts":[[2011,7]]}}}