{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T03:42:10Z","timestamp":1774064530726,"version":"3.50.1"},"reference-count":32,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,1]],"date-time":"2021-12-01T00:00:00Z","timestamp":1638316800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"ARL","award":["W011NF-19-2-0146"],"award-info":[{"award-number":["W011NF-19-2-0146"]}]},{"name":"AFOSR\/AFRL Center of Excellence","award":["FA9550-18-1-0251"],"award-info":[{"award-number":["FA9550-18-1-0251"]}]},{"name":"AFOSR Trust &amp; Influence","award":["F9550-18-1-0097"],"award-info":[{"award-number":["F9550-18-1-0097"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Human-Mach. Syst."],"published-print":{"date-parts":[[2021,12]]},"DOI":"10.1109\/thms.2021.3107675","type":"journal-article","created":{"date-parts":[[2021,9,17]],"date-time":"2021-09-17T19:58:26Z","timestamp":1631908706000},"page":"706-714","source":"Crossref","is-referenced-by-count":20,"title":["Individualized Mutual Adaptation in Human-Agent Teams"],"prefix":"10.1109","volume":"51","author":[{"given":"Huao","family":"Li","sequence":"first","affiliation":[{"name":"School of Information Science, University of Pittsburgh, Pittsburgh, PA, USA"}]},{"given":"Tianwei","family":"Ni","sequence":"additional","affiliation":[{"name":"University of Montreal, Quebec, Canada"}]},{"given":"Siddharth","family":"Agrawal","sequence":"additional","affiliation":[{"name":"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA"}]},{"given":"Fan","family":"Jia","sequence":"additional","affiliation":[{"name":"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8453-5106","authenticated-orcid":false,"given":"Suhas","family":"Raja","sequence":"additional","affiliation":[{"name":"University of Texas, Austin, USA"}]},{"given":"Yikang","family":"Gui","sequence":"additional","affiliation":[{"name":"University of Georgia, Athens, GA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4493-959X","authenticated-orcid":false,"given":"Dana","family":"Hughes","sequence":"additional","affiliation":[{"name":"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1013-9482","authenticated-orcid":false,"given":"Michael","family":"Lewis","sequence":"additional","affiliation":[{"name":"School of Information Science, University of Pittsburgh, Pittsburgh, PA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5635-1406","authenticated-orcid":false,"given":"Katia","family":"Sycara","sequence":"additional","affiliation":[{"name":"Robotics Institute, Carnegie Mellon University, Pittsburgh, PA, USA"}]}],"member":"263","reference":[{"key":"ref32","first-page":"5923","article-title":"Effective diversity in population based reinforcement learning","author":"parker-holder","year":"2020","journal-title":"Proc 29th Int Joint Conf Artif Intell Int Joint Conf Artif Intell Org"},{"key":"ref31","article-title":"Reinforcement learning from imperfect demonstrations","author":"gao","year":"2018","journal-title":"Proc 35th Int Conf Machine Learning"},{"key":"ref30","first-page":"2094","article-title":"Deep reinforcement learning with double Q-learning","author":"hasselt","year":"2016","journal-title":"Proc 30th AAAI Conf Artificial Intell"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794007"},{"key":"ref11","first-page":"397","article-title":"Modeling uncertainty in leading ad hoc teams","author":"agmon","year":"2014","journal-title":"Proc 13th Int Conf Auton Agents Multiagent Syst"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/36"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1177\/0278364918772017"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/RO-MAN47096.2020.9223520"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1177\/0278364917690593"},{"key":"ref16","first-page":"5670","article-title":"On the feasibility of learning, rather than assuming, human biases for reward inference","author":"shah","year":"2019","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref17","first-page":"2010","article-title":"Cooperating with unknown teammates in complex domains: A robot soccer case study of ad hoc teamwork","author":"barrett","year":"2015","journal-title":"Proc 29th AAAI Conf Artif Intell"},{"key":"ref18","first-page":"567","article-title":"Empirical evaluation of ad hoc teamwork in the pursuit domain","author":"barrett","year":"2011","journal-title":"Proc of 10th Int Conf Auton Agents Multiagent Syst"},{"key":"ref19","first-page":"189","article-title":"Efficient Model Learning from Joint-Action Demonstrations for Human-Robot Collaborative Tasks","author":"nikolaidis","year":"2015","journal-title":"2007 2nd ACM\/IEEE International Conference on Human-Robot Interaction (HRI) HRI"},{"key":"ref28","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/78"},{"key":"ref27","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1111\/cogs.12009"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1518\/001872007779598037"},{"key":"ref29","article-title":"SQIL: Imitation learning via reinforcement learning with sparse rewards","author":"reddy","year":"2020","journal-title":"Proc 8th Int Conf Learning Representations"},{"key":"ref5","article-title":"Electric elves: What went wrong and why","volume":"29","author":"tambe","year":"2008","journal-title":"AI Mag"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1561\/2300000049"},{"key":"ref7","first-page":"77","article-title":"Anticipatory Bayesian policy selection for online adaptation of collaborative robots to unknown human types","author":"g\u00f6r\u00fcr","year":"2019","journal-title":"Proc 18th Int Conf Auton Agents MultiAgent Syst"},{"key":"ref2","article-title":"Enhancing human-agent teaming with individualized, adaptive technologies: A discussion of critical scientific questions","author":"decnstanza","year":"2018"},{"key":"ref9","first-page":"1454","author":"reddy","year":"2018","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref1","first-page":"117","author":"stone","year":"2010","journal-title":"Proc 9th Int Conf Auton Agents Multiagent Syst Int Found Auton Agents Multiagent Syst"},{"key":"ref20","first-page":"1155","article-title":"A game-theoretic model and best-response learning method for ad hoc coordination in multiagent systems","author":"albrecht","year":"2013","journal-title":"Proc Int Conf Autonomous Agents and Multiagent Systems"},{"key":"ref22","article-title":"Challenges of context and time in reinforcement learning: Introducing space fortress as a benchmark","author":"agarwal","year":"2019","journal-title":"Proc AAAI Reinforcement Learning in Games Workshop"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2016.02.004"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1177\/1071181320641023"},{"key":"ref23","article-title":"Individual adaptation in teamwork","author":"li","year":"2020","journal-title":"Proc 42nd Annu Conf Cogn Sci Soc"},{"key":"ref26","first-page":"1008","article-title":"Actor-critic algorithms","author":"konda","year":"2000","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref25","first-page":"192","article-title":"Semi-supervised learning of decision-making models for human-robot collaboration","author":"unhelkar","year":"2019","journal-title":"Proc Conf Robot Learn"}],"container-title":["IEEE Transactions on Human-Machine Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6221037\/9614265\/09540646.pdf?arnumber=9540646","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T19:07:35Z","timestamp":1710788855000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9540646\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12]]},"references-count":32,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/thms.2021.3107675","relation":{},"ISSN":["2168-2291","2168-2305"],"issn-type":[{"value":"2168-2291","type":"print"},{"value":"2168-2305","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,12]]}}}