{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,7]],"date-time":"2026-05-07T02:42:47Z","timestamp":1778121767562,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":57,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,3,13]],"date-time":"2023-03-13T00:00:00Z","timestamp":1678665600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,3,13]]},"DOI":"10.1145\/3568294.3580049","type":"proceedings-article","created":{"date-parts":[[2023,3,8]],"date-time":"2023-03-08T18:51:31Z","timestamp":1678301491000},"page":"91-96","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":3,"title":["TEAM3 Challenge"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9168-0923","authenticated-orcid":false,"given":"Michael J.","family":"Munje","sequence":"first","affiliation":[{"name":"Georgia Institute of Technology, Atlanta, GA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6107-3893","authenticated-orcid":false,"given":"Lylybell K.","family":"Teran","sequence":"additional","affiliation":[{"name":"Columbia University, New York, NY, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3754-404X","authenticated-orcid":false,"given":"Bradon","family":"Thymes","sequence":"additional","affiliation":[{"name":"Cornell University, Ithaca, NY, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9448-9024","authenticated-orcid":false,"given":"Joseph P.","family":"Salisbury","sequence":"additional","affiliation":[{"name":"Riverside Research, Lexington, MA, USA"}]}],"member":"320","published-online":{"date-parts":[[2023,3,13]]},"reference":[{"key":"e_1_3_2_1_1_1","first-page":"1176","volume-title":"Sapporo","author":"Schneiders E.","year":"2022","unstructured":"E. Schneiders, \"Non-Dyadic Human-Robot Interaction: Concepts and Interaction Techniques,\" in Proceedings of the 2022 ACM\/IEEE International Conference on Human-Robot Interaction, Sapporo, Hokkaido, Japan, Mar. 2022, pp. 1176--1178."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/9407.001.0001"},{"key":"e_1_3_2_1_3_1","volume-title":"Accessed","author":"Rabinowitz N. C.","year":"2022","unstructured":"N. C. Rabinowitz, F. Perbet, H. F. Song, C. Zhang, S. M. A. Eslami, and M. Botvinick, ?Machine Theory of Mind,\" ArXiv180207740 Cs, Mar. 2018, Accessed: Feb. 07, 2022. [Online]. Available: http:\/\/arxiv.org\/abs\/1802.07740"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.3389\/frai.2022.750763"},{"key":"e_1_3_2_1_5_1","first-page":"10","article-title":"Human-Aware Planning Revisited: A Tale of Three Models","author":"Chakraborti T.","year":"2018","unstructured":"T. Chakraborti, S. Sreedharan, and S. Kambhampati, \"Human-Aware Planning Revisited: A Tale of Three Models,\" IJCAI-ECAI XAIICAPS XAIP Workshop, p. 10, 2018.","journal-title":"IJCAI-ECAI XAIICAPS XAIP Workshop"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/3434074.3446365"},{"key":"e_1_3_2_1_7_1","first-page":"69 10","volume-title":"Uppsala","author":"Janarthanam S.","year":"2010","unstructured":"S. Janarthanam and O. Lemon, \"Learning to Adapt to Unknown Users: Referring Expression Generation in Spoken Dialogue Systems,\" in Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics, Uppsala, Sweden, Jul. 2010, pp. 69--78. Accessed: Nov. 18, 2022. [Online]. Available: https:\/\/aclanthology.org\/P10-1008"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.patrec.2017.06.002"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989108"},{"key":"e_1_3_2_1_10_1","first-page":"1","volume-title":"Towards Collaborative Robots as Intelligent Co-workers in Human-Robot Joint Tasks: what to do and who does it?,\" in ISR 2020","author":"Cunha A.","year":"2020","unstructured":"A. Cunha et al., \"Towards Collaborative Robots as Intelligent Co-workers in Human-Robot Joint Tasks: what to do and who does it?,\" in ISR 2020; 52th International Symposium on Robotics, Dec. 2020, pp. 1--8."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1109\/RO-MAN47096.2020.9223461"},{"key":"e_1_3_2_1_12_1","volume-title":"https:\/\/www","year":"2022","unstructured":"?TEAM3,\" braingames. https:\/\/www.publishing.brain-games.com\/team3 (accessed Nov. 22, 2022)."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/9082.001.0001"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1016\/0364-0213(95)90018-7"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1613\/jair.5477"},{"key":"e_1_3_2_1_16_1","volume-title":"May 21, 2018. Accessed","author":"Ferreira T. C.","year":"2022","unstructured":"T. C. Ferreira, D. Moussallem, \u00c1. K\u00e1d\u00e1r, S. Wubben, and E. Krahmer, \"NeuralREG: An end-to-end approach to referring expression generation.\" arXiv, May 21, 2018. Accessed: Nov. 17, 2022. [Online]. Available: http:\/\/arxiv.org\/abs\/1805.08093"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCSE.2019.8845336"},{"key":"e_1_3_2_1_18_1","volume-title":"Understanding intentions in human teaching to design interactive task learning robots,\" in RSS 2020 Workshop: AI & Its Alternatives in Assistive & Collaborative Robotics: Decoding Intent","author":"Ramaraj P.","year":"2020","unstructured":"P. Ramaraj, M. Klenk, and S. Mohan, \"Understanding intentions in human teaching to design interactive task learning robots,\" in RSS 2020 Workshop: AI & Its Alternatives in Assistive & Collaborative Robotics: Decoding Intent, 2020."},{"key":"e_1_3_2_1_19_1","volume-title":"Establishing common ground for learning robots,\" in RSS 2018: Workshop on Models and Representations for Natural Human-Robot Communication","author":"Ramaraj P.","year":"2018","unstructured":"P. Ramaraj and J. E. Laird, \"Establishing common ground for learning robots,\" in RSS 2018: Workshop on Models and Representations for Natural Human-Robot Communication, 2018."},{"key":"e_1_3_2_1_20_1","volume-title":"Apr. 19, 2021. Accessed","author":"Do\u011fan F. I.","year":"2022","unstructured":"F. I. Do\u011fan and I. Leite, \"Open Challenges on Generating Referring Expressions for Human-Robot Interaction.\" arXiv, Apr. 19, 2021. Accessed: Nov. 18, 2022. [Online]. Available: http:\/\/arxiv.org\/abs\/2104.09193"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/SMARTCOMP.2016.7501708"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.470"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v28i1.8934"},{"key":"e_1_3_2_1_24_1","volume-title":"Aug. 19, 2022. Accessed","author":"Akgun S. A.","year":"2022","unstructured":"S. A. Akgun, M. Ghafurian, M. Crowley, and K. Dautenhahn, \"Using Affect as a Communication Modality to Improve Human-Robot Communication in Robot-Assisted Search and Rescue Scenarios.\" arXiv, Aug. 19, 2022. Accessed: Nov. 20, 2022. [Online]. Available: http:\/\/arxiv.org\/abs\/2208.09580"},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2022.3161588"},{"key":"e_1_3_2_1_26_1","first-page":"7","volume-title":"An Organizing Principle for Learning in Human-Robot Interaction","author":"Berlin M.","unstructured":"M. Berlin, \"Perspective Taking: An Organizing Principle for Learning in Human-Robot Interaction,\" p. 7."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1080\/01690965.2013.855802"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636375"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2018.8625022"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1109\/HRI53351.2022.9889368"},{"key":"e_1_3_2_1_31_1","volume-title":"Jun. 11, 2018. Accessed","author":"Shridhar M.","year":"2022","unstructured":"M. Shridhar and D. Hsu, \"Interactive Visual Grounding of Referring Expressions for Human-Robot Interaction.\" arXiv, Jun. 11, 2018. Accessed: Nov. 18, 2022. [Online]. Available: http:\/\/arxiv.org\/abs\/1806.03831"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3139667"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2209.11302"},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1109\/Humanoids.2011.6100810"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2005.1545011"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2015.7363561"},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-11015-4_17"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2013.6696343"},{"key":"e_1_3_2_1_39_1","volume-title":"Learning Gaze Behaviors for Balancing Participation in Group Human-Robot Interactions,\" in Proceedings of the 2022 ACM\/IEEE International Conference on Human-Robot Interaction","author":"Gillet S.","year":"2022","unstructured":"S. Gillet, M. T. Parreira, M. Vazquez, and I. Leite, \"Learning Gaze Behaviors for Balancing Participation in Group Human-Robot Interactions,\" in Proceedings of the 2022 ACM\/IEEE International Conference on Human-Robot Interaction, 2022, p. 10."},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1109\/HRI53351.2022.9889641"},{"key":"e_1_3_2_1_41_1","first-page":"634","volume-title":"Republic of Korea","author":"de Wit J.","year":"2019","unstructured":"J. de Wit et al., \"Playing charades with a robot: collecting a large dataset of human gestures through HRI,\" in Proceedings of the 14th ACM\/IEEE International Conference on Human-Robot Interaction, Daegu, Republic of Korea, Mar. 2019, pp. 634--635."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636105"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.procs.2018.01.089"},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-70022-9_45"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.3389\/fnbot.2018.00036"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394287"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1109\/RO-MAN47096.2020.9223524"},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1109\/HRI53351.2022.9889512"},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1155\/2022\/2341898"},{"key":"e_1_3_2_1_50_1","first-page":"14","article-title":"Sawyer Robot","year":"2022","unstructured":"\"Sawyer Robot.\" Rethink Robotics GmbH, Sep. 03, 2022. Accessed: Nov. 14, 2022. [Online]. Available: https:\/\/github.com\/RethinkRobotics\/sawyer_robot","journal-title":"Rethink Robotics GmbH"},{"key":"e_1_3_2_1_51_1","unstructured":"\"noetic - ROS Wiki.\" http:\/\/wiki.ros.org\/noetic (accessed Nov. 09 2022)."},{"key":"e_1_3_2_1_52_1","unstructured":"\"Text to Speech Software -- Amazon Polly -- Amazon Web Services \" Amazon Web Services Inc. https:\/\/aws.amazon.com\/polly\/ (accessed Nov. 09 2022)."},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICMT53429.2021.9687192"},{"key":"e_1_3_2_1_54_1","unstructured":"\"Speech Recognition: Library for performing speech recognition with support for several engines and APIs online and offline.\" [Online]. Available: https:\/\/github.com\/Uberi\/speech_recognition#readme"},{"key":"e_1_3_2_1_55_1","volume-title":"Cross-platform audio I\/O with Port Audio.\" Accessed","year":"2022","unstructured":"\"PyAudio: Cross-platform audio I\/O with Port Audio.\" Accessed: Nov. 09, 2022. [Online]. Available: https:\/\/people.csail.mit.edu\/hubert\/pyaudio\/"},{"key":"e_1_3_2_1_56_1","first-page":"09","article-title":"cwru-robotics\/baxter_facial_animation","year":"2021","unstructured":"\"cwru-robotics\/baxter_facial_animation.\" CWRU Robotics, Dec. 24, 2021. Accessed: Nov. 09, 2022. [Online]. Available: https:\/\/github.com\/cwru-robotics\/baxter_facial_animation","journal-title":"CWRU Robotics"},{"key":"e_1_3_2_1_57_1","unstructured":"\"turtle - Turtle graphics - Python 3.11.0 documentation.\" https:\/\/docs.python.org\/3\/library\/turtle.html (accessed Nov. 23 2022)."}],"event":{"name":"HRI '23: ACM\/IEEE International Conference on Human-Robot Interaction","location":"Stockholm Sweden","acronym":"HRI '23","sponsor":["SIGAI ACM Special Interest Group on Artificial Intelligence","SIGCHI ACM Special Interest Group on Computer-Human Interaction"]},"container-title":["Companion of the 2023 ACM\/IEEE International Conference on Human-Robot Interaction"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3568294.3580049","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3568294.3580049","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T22:58:17Z","timestamp":1755817097000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3568294.3580049"}},"subtitle":["Tasks for Multi-Human and Multi-Robot Collaboration with Voice and Gestures"],"short-title":[],"issued":{"date-parts":[[2023,3,13]]},"references-count":57,"alternative-id":["10.1145\/3568294.3580049","10.1145\/3568294"],"URL":"https:\/\/doi.org\/10.1145\/3568294.3580049","relation":{},"subject":[],"published":{"date-parts":[[2023,3,13]]},"assertion":[{"value":"2023-03-13","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}