{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,7]],"date-time":"2024-09-07T06:48:48Z","timestamp":1725691728803},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,3,1]],"date-time":"2019-03-01T00:00:00Z","timestamp":1551398400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,3,1]],"date-time":"2019-03-01T00:00:00Z","timestamp":1551398400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,3,1]],"date-time":"2019-03-01T00:00:00Z","timestamp":1551398400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,3]]},"DOI":"10.1109\/hri.2019.8673019","type":"proceedings-article","created":{"date-parts":[[2019,3,25]],"date-time":"2019-03-25T22:51:27Z","timestamp":1553554287000},"page":"468-477","source":"Crossref","is-referenced-by-count":4,"title":["SAIL: Simulation-Informed Active In-the-Wild Learning"],"prefix":"10.1109","author":[{"given":"Elaine Schaertl","family":"Short","sequence":"first","affiliation":[]},{"given":"Adam","family":"Allevato","sequence":"additional","affiliation":[]},{"given":"Andrea L.","family":"Thomaz","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Learning hand-eye coordination for robotic grasping with deep learning and large-scale data collection","author":"levine","year":"2017","journal-title":"The International Journal of Robotics Research"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1613\/jair.5242"},{"key":"ref12","doi-asserted-by":"crossref","first-page":"1127","DOI":"10.1609\/aaai.v24i1.7740","article-title":"Robust Policy Computation in Reward-Uncertain MDPs Using Nondominated Policies","author":"regan","year":"2010","journal-title":"Twenty-fourth AAAI Conference on Artificial Intelligence (AAAI-10)"},{"key":"ref13","first-page":"439","author":"freire da silva","year":"2011","journal-title":"A Geometric Approach to Find Nondominated Policies to Imprecise Reward MDPs"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1177\/0278364917713116"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3171221.3171267"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2014.6943191"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/2157689.2157693"},{"journal-title":"Leveraging demonstrations for deep reinforcement learning on robotics problems with sparse rewards","year":"2017","author":"ve?er\u00edk","key":"ref18"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2011.04.005"},{"key":"ref4","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v32i1.12118","article-title":"An Experimental Study of Advice in Sequential Decision-Making under Uncertainty","author":"benavent","year":"2018","journal-title":"The 32nd AAAI Conference on Artificial Intelligence (AAAI-18)"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/HRI.2016.7451780"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2016.7727695"},{"journal-title":"Trial without Error Towards Safe Reinforcement Learning via Human Intervention","year":"2018","author":"saunders","key":"ref5"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2011.5979757"},{"key":"ref7","first-page":"1815","article-title":"Repeated Inverse Reinforcement Learning","author":"amin","year":"2017","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"ref2","first-page":"23","article-title":"Robots Asking for Directions: The Willingness of Passers-by to Support Robots","author":"weiss","year":"2010","journal-title":"Proceedings of the 5th ACM\/IEEE International Conference on Human-robot Interaction"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2012.05.005"},{"key":"ref1","first-page":"2625","article-title":"Policy shaping: Integrating human feedback with reinforcement learning","author":"griffith","year":"2013","journal-title":"Advances in Neural Information Processing Systems 26"},{"key":"ref20","article-title":"Deep reinforcement learning from human preferences","author":"christiano","year":"2017","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/1597735.1597738"},{"key":"ref21","first-page":"475","article-title":"Reinforcement learning from simultaneous human and MDP reward","author":"knox","year":"2012","journal-title":"Proceedings of the 11 th International Conference on Autonomous Agents and Multiagent Systems-Volume 1"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2009.07.008"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/s10514-015-9448-x"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/2157689.2157815"},{"key":"ref25","article-title":"Actor-Critic Algorithms","author":"konda","year":"0","journal-title":"Advances in Neural Information Processing Systems (NIPS)"}],"event":{"name":"2019 14th ACM\/IEEE International Conference on Human-Robot Interaction (HRI)","start":{"date-parts":[[2019,3,11]]},"location":"Daegu, Korea (South)","end":{"date-parts":[[2019,3,14]]}},"container-title":["2019 14th ACM\/IEEE International Conference on Human-Robot Interaction (HRI)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8666012\/8673065\/08673019.pdf?arnumber=8673019","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,14]],"date-time":"2022-09-14T10:17:46Z","timestamp":1663150666000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8673019\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,3]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/hri.2019.8673019","relation":{},"subject":[],"published":{"date-parts":[[2019,3]]}}}