{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,24]],"date-time":"2025-12-24T12:27:03Z","timestamp":1766579223978},"reference-count":30,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,3,1]],"date-time":"2019-03-01T00:00:00Z","timestamp":1551398400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,3,1]],"date-time":"2019-03-01T00:00:00Z","timestamp":1551398400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,3,1]],"date-time":"2019-03-01T00:00:00Z","timestamp":1551398400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,3]]},"DOI":"10.1109\/hri.2019.8673256","type":"proceedings-article","created":{"date-parts":[[2019,3,25]],"date-time":"2019-03-25T22:51:27Z","timestamp":1553554287000},"source":"Crossref","is-referenced-by-count":28,"title":["On the Utility of Model Learning in HRI"],"prefix":"10.1109","author":[{"given":"Rohan","family":"Choudhury","sequence":"first","affiliation":[]},{"given":"Gokul","family":"Swamy","sequence":"additional","affiliation":[]},{"given":"Dylan","family":"Hadfield-Menell","sequence":"additional","affiliation":[]},{"given":"Anca D.","family":"Dragan","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref30","author":"dhariwal","year":"2017","journal-title":"OpenAI Baselines"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/S1364-6613(03)00128-1"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.infbeh.2003.05.006"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.cognition.2009.07.005"},{"key":"ref13","author":"becker","year":"2013","journal-title":"The Economic Approach to Human Behavior"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/HRI.2013.6483499"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/s12369-017-0400-4"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2018.XIV.005"},{"key":"ref17","author":"schmerling","year":"2017","journal-title":"Multimodal probabilistic model-based planning for human-robot interaction"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2009.5354147"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/HRI.2013.6483603"},{"key":"ref28","author":"kingma","year":"2014","journal-title":"Adam A method for stochastic optimization"},{"key":"ref4","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"International Conference on Machine Learning"},{"key":"ref27","doi-asserted-by":"crossref","first-page":"33","DOI":"10.1145\/1273496.1273501","article-title":"Scalable training of 11-regularized log-linear models","author":"andrew","year":"2007","journal-title":"Proceedings of the 24th International Conference on Machine Learning (ICML)"},{"key":"ref3","author":"mnih","year":"2013","journal-title":"Playing atari with deep reinforcement learning"},{"key":"ref6","first-page":"64","article-title":"Unsupervised learning for physical interaction through video prediction","author":"finn","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref29","first-page":"627","article-title":"A reduction of imitation learning and structured prediction to no-regret online learning","author":"ross","year":"2011","journal-title":"Proceedings of the Fourteenth International Conference on Artificial Intelligence and Statistics"},{"key":"ref5","author":"schulman","year":"2017","journal-title":"Proximal policy optimization algorithms"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511752902.011"},{"key":"ref7","author":"perez","year":"0","journal-title":"Predictive learning is the new buzzword in deep learning"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.neuron.2013.11.028"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511597985"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neuron.2010.04.016"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2015.7139219"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2016.XII.029"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2015.XI.032"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015430"},{"key":"ref23","author":"tu","year":"2017","journal-title":"Least-squares temporal difference learning for the linear quadratic regulator"},{"key":"ref26","first-page":"1433","article-title":"Maximum entropy inverse reinforcement learning","author":"ziebart","year":"2008","journal-title":"AAAI'08 Proceedings of the 23rd National Conference on Artificial Intelligence"},{"key":"ref25","author":"levine","year":"2012","journal-title":"Continuous inverse optimal control with locally optimal examples"}],"event":{"name":"2019 14th ACM\/IEEE International Conference on Human-Robot Interaction (HRI)","location":"Daegu, Korea (South)","start":{"date-parts":[[2019,3,11]]},"end":{"date-parts":[[2019,3,14]]}},"container-title":["2019 14th ACM\/IEEE International Conference on Human-Robot Interaction (HRI)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8666012\/8673065\/08673256.pdf?arnumber=8673256","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,14]],"date-time":"2022-09-14T10:17:53Z","timestamp":1663150673000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8673256\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,3]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/hri.2019.8673256","relation":{},"subject":[],"published":{"date-parts":[[2019,3]]}}}