{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,4]],"date-time":"2025-11-04T10:52:32Z","timestamp":1762253552750,"version":"3.28.0"},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,8,1]],"date-time":"2020-08-01T00:00:00Z","timestamp":1596240000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,8,1]],"date-time":"2020-08-01T00:00:00Z","timestamp":1596240000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,8,1]],"date-time":"2020-08-01T00:00:00Z","timestamp":1596240000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,8]]},"DOI":"10.1109\/ro-man47096.2020.9223516","type":"proceedings-article","created":{"date-parts":[[2020,10,14]],"date-time":"2020-10-14T15:56:21Z","timestamp":1602690981000},"page":"706-712","source":"Crossref","is-referenced-by-count":6,"title":["Human Social Feedback for Efficient Interactive Reinforcement Agent Learning"],"prefix":"10.1109","author":[{"given":"Jinying","family":"Lin","sequence":"first","affiliation":[]},{"given":"Qilei","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Randy","family":"Gomez","sequence":"additional","affiliation":[]},{"given":"Keisuke","family":"Nakamura","sequence":"additional","affiliation":[]},{"given":"Bo","family":"He","sequence":"additional","affiliation":[]},{"given":"Guangliang","family":"Li","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"article-title":"Incorporating advice into agents that learn from reinforcements","year":"1994","author":"Maclin","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-7908-1772-0_16"},{"key":"ref4","first-page":"187","article-title":"Towards instructable connectionist systems","volume-title":"Computational Architectures Integrating Neural and Symbolic Processes","author":"Noelle","year":"1995"},{"article-title":"Guiding a reinforcement learner with natural language advice: Initial results in robocup soccer","volume-title":"The AAAI 2004 workshop on supervisory control of learning and adaptive systems","author":"Kuhlmann","key":"ref5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/1597735.1597738"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2015.03.009"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/2449396.2449422"},{"key":"ref9","first-page":"1","article-title":"Face valuing: training user interfaces with facial expressions and reinforcement learning","volume-title":"Interactive Machine Learning Workshop at IJCAI 2016","author":"Veeriah"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/s12369-012-0163-x"},{"key":"ref11","first-page":"909","article-title":"Using informative behavior to increase engagement in the TAMER framework","volume-title":"Proceedings of the 12th International Conference on Autonomous Agents and Multi-Agent Systems","author":"Li"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-020-09447-w"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/S0921-8890(02)00168-9"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref15","first-page":"1000","article-title":"Reinforcement learning with human teachers: evidence of feedback and guidance with implications for learning performance","volume-title":"Proceedings of the 20th AAAI Conference on Artificial Intelligence","volume":"6","author":"Thomaz"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2007.09.009"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11485"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v28i1.8839"},{"key":"ref19","first-page":"2285","article-title":"Interactive learning from policy-dependent human feedback","volume-title":"Proceedings of the 34th International Conference on Machine Learning","volume":"70","author":"MacGlashan"},{"article-title":"Deep reinforcement learning from policy-dependent human feedback","year":"2019","author":"Arumugam","key":"ref20"},{"article-title":"Dqn-tamer: Human-in-the-loop reinforcement learning with intractable feedback","year":"2018","author":"Arakawa","key":"ref21"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/THMS.2019.2912447"},{"key":"ref23","article-title":"Learning from human-generated reward","volume-title":"Ph.D. dissertation","author":"Knox","year":"2012"},{"article-title":"Real-time convolutional neural networks for emotion and gender classification","year":"2017","author":"Arriaga","key":"ref24"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1609\/aimag.v35i3.2548"}],"event":{"name":"2020 29th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)","start":{"date-parts":[[2020,8,31]]},"location":"Naples, Italy","end":{"date-parts":[[2020,9,4]]}},"container-title":["2020 29th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9219088\/9223329\/09223516.pdf?arnumber=9223516","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,23]],"date-time":"2024-01-23T20:05:53Z","timestamp":1706040353000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9223516\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,8]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/ro-man47096.2020.9223516","relation":{},"subject":[],"published":{"date-parts":[[2020,8]]}}}