{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,23]],"date-time":"2024-10-23T07:56:27Z","timestamp":1729670187060,"version":"3.28.0"},"reference-count":18,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2012,9]]},"DOI":"10.1109\/roman.2012.6343863","type":"proceedings-article","created":{"date-parts":[[2012,11,15]],"date-time":"2012-11-15T17:07:06Z","timestamp":1352999226000},"page":"886-891","source":"Crossref","is-referenced-by-count":2,"title":["Monte Carlo preference elicitation for learning additive reward functions"],"prefix":"10.1109","author":[{"given":"Stephanie","family":"Rosenthal","sequence":"first","affiliation":[]},{"given":"Manuela","family":"Veloso","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"journal-title":"Psychology themes and variations","year":"2011","author":"weiten","key":"17"},{"journal-title":"Linear Programming and Extensions","year":"1963","author":"dantzig","key":"18"},{"key":"15","doi-asserted-by":"publisher","DOI":"10.1086\/269282"},{"key":"16","doi-asserted-by":"publisher","DOI":"10.2307\/2094771"},{"key":"13","doi-asserted-by":"publisher","DOI":"10.1037\/h0044655"},{"key":"14","doi-asserted-by":"publisher","DOI":"10.1086\/269460"},{"key":"11","first-page":"663","article-title":"Algorithms for inverse reinforcement learning","author":"ng","year":"2000","journal-title":"ICML"},{"key":"12","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2008.4651020"},{"journal-title":"Decisions with Multiple Objectives Preferences and Value Trade-Offs","year":"1976","author":"keeney","key":"3"},{"key":"2","doi-asserted-by":"publisher","DOI":"10.2307\/2525541"},{"key":"1","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1613\/jair.575","article-title":"Decision theoretic planning: Structural assumptions and computational leverage","volume":"11","author":"boutilier","year":"1999","journal-title":"JAIR"},{"key":"10","doi-asserted-by":"publisher","DOI":"10.2307\/2280232"},{"key":"7","article-title":"Eliciting additive reward functions for markov decision processes","author":"regan","year":"2011","journal-title":"Twenty-Second Joint Conference on Artificial Intelligence (IJCAI 2011)"},{"journal-title":"A Study in Preference Elicitation under Uncertainty","year":"2011","author":"hines","key":"6"},{"key":"5","first-page":"1433","article-title":"Maximum entropy inverse reinforcement learning","author":"ziebart","year":"2008","journal-title":"23rd National Conference on Artificial Intelligence"},{"key":"4","article-title":"Preference elicitation and generalized additive utility","author":"braziunas","year":"2006","journal-title":"Twenty-First Conference on Artificial Intelligence (AAAI-06)"},{"key":"9","first-page":"224","article-title":"Gai networks for utility elicitation","author":"gonzales","year":"2004","journal-title":"9th Intl Conference on Principles of Knowledge Representation and Reasoning (KR-04)"},{"key":"8","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015430"}],"event":{"name":"2012 RO-MAN: The 21st IEEE International Symposium on Robot and Human Interactive Communication","start":{"date-parts":[[2012,9,9]]},"location":"Paris, France","end":{"date-parts":[[2012,9,13]]}},"container-title":["2012 IEEE RO-MAN: The 21st IEEE International Symposium on Robot and Human Interactive Communication"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx5\/6331794\/6343721\/06343863.pdf?arnumber=6343863","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2019,7,5]],"date-time":"2019-07-05T17:28:57Z","timestamp":1562347737000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/6343863\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2012,9]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/roman.2012.6343863","relation":{},"subject":[],"published":{"date-parts":[[2012,9]]}}}