{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T14:30:49Z","timestamp":1776090649231,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":70,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,3,11]],"date-time":"2025-03-11T00:00:00Z","timestamp":1741651200000},"content-version":"vor","delay-in-days":365,"URL":"http:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"ONR","award":["N00014-18-1-2442, N14-18-1-2840, N00014-23-1-2409"],"award-info":[{"award-number":["N00014-18-1-2442, N14-18-1-2840, N00014-23-1-2409"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,3,11]]},"DOI":"10.1145\/3610978.3640767","type":"proceedings-article","created":{"date-parts":[[2024,3,10]],"date-time":"2024-03-10T22:55:43Z","timestamp":1710111343000},"page":"36-45","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":17,"title":["Theory of Mind Abilities of Large Language Models in Human-Robot Interaction: An Illusion?"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4524-2102","authenticated-orcid":false,"given":"Mudit","family":"Verma","sequence":"first","affiliation":[{"name":"School of Computing and Augmented Intelligence, Arizona State University, Tempe, AZ, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1182-4999","authenticated-orcid":false,"given":"Siddhant","family":"Bhambri","sequence":"additional","affiliation":[{"name":"School of Computing and Augmented Intelligence, Arizona State University, Tempe, AZ, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9069-0265","authenticated-orcid":false,"given":"Subbarao","family":"Kambhampati","sequence":"additional","affiliation":[{"name":"School of Computing and Augmented Intelligence, Arizona State University, Tempe, AZ, USA"}]}],"member":"320","published-online":{"date-parts":[[2024,3,11]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"Rohan Anil Andrew M Dai Orhan Firat Melvin Johnson Dmitry Lepikhin Alexandre Passos Siamak Shakeri Emanuel Taropa Paige Bailey Zhifeng Chen et al. 2023. Palm 2 technical report. arXiv preprint arXiv:2305.10403 (2023)."},{"key":"e_1_3_2_1_2_1","volume-title":"Why language matters for theory of mind","author":"Astington Janet Wilde","unstructured":"Janet Wilde Astington and Jodie A Baird. 2005. Why language matters for theory of mind. Oxford University Press."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1080\/02699939508409006"},{"key":"e_1_3_2_1_4_1","volume-title":"Proceedings of the annual meeting of the cognitive science society","volume":"33","author":"Baker Chris","year":"2011","unstructured":"Chris Baker, Rebecca Saxe, and Joshua Tenenbaum. 2011. Bayesian theory of mind: Modeling joint belief-desire attribution. In Proceedings of the annual meeting of the cognitive science society, Vol. 33."},{"key":"e_1_3_2_1_5_1","volume-title":"A categorical archive of chatgpt failures. arXiv preprint arXiv:2302.03494","author":"Borji Ali","year":"2023","unstructured":"Ali Borji. 2023. A categorical archive of chatgpt failures. arXiv preprint arXiv:2302.03494 (2023)."},{"key":"e_1_3_2_1_6_1","volume-title":"Yuanzhi Li, Scott Lundberg, et al. 2023 a. Sparks of artificial general intelligence: Early experiments with gpt-4. arXiv preprint arXiv:2303.12712","author":"Bubeck S\u00e9bastien","year":"2023","unstructured":"S\u00e9bastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yuanzhi Li, Scott Lundberg, et al. 2023 a. Sparks of artificial general intelligence: Early experiments with gpt-4. arXiv preprint arXiv:2303.12712 (2023)."},{"key":"e_1_3_2_1_7_1","volume-title":"Yuanzhi Li, Scott Lundberg, et al. 2023 b. Sparks of artificial general intelligence: Early experiments with gpt-4. arXiv preprint arXiv:2303.12712","author":"Bubeck S\u00e9bastien","year":"2023","unstructured":"S\u00e9bastien Bubeck, Varun Chandrasekaran, Ronen Eldan, Johannes Gehrke, Eric Horvitz, Ece Kamar, Peter Lee, Yin Tat Lee, Yuanzhi Li, Scott Lundberg, et al. 2023 b. Sparks of artificial general intelligence: Early experiments with gpt-4. arXiv preprint arXiv:2303.12712 (2023)."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2015.7354125"},{"key":"e_1_3_2_1_9_1","volume-title":"arxiv","author":"Chakraborti Tathagata","year":"1811","unstructured":"Tathagata Chakraborti, Anagha Kulkarni, Sarath Sreedharan, David E. Smith, and Subbarao Kambhampati. 2018. Explicability? Legibility? Predictability? Transparency? Privacy? Security? The Emerging Landscape of Interpretable Agent Behavior. arxiv: 1811.09722 [cs.AI]"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1609\/icaps.v29i1.3463"},{"key":"e_1_3_2_1_11_1","volume-title":"Plan explanations as model reconciliation: Moving beyond explanation as soliloquy. arXiv preprint arXiv:1701.08317","author":"Chakraborti Tathagata","year":"2017","unstructured":"Tathagata Chakraborti, Sarath Sreedharan, Yu Zhang, and Subbarao Kambhampati. 2017. Plan explanations as model reconciliation: Moving beyond explanation as soliloquy. arXiv preprint arXiv:1701.08317 (2017)."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1109\/HRI.2016.7451768"},{"key":"e_1_3_2_1_13_1","unstructured":"Danny Driess Fei Xia Mehdi S. M. Sajjadi Corey Lynch Aakanksha Chowdhery Brian Ichter Ayzaan Wahid Jonathan Tompson Quan Vuong Tianhe Yu Wenlong Huang Yevgen Chebotar Pierre Sermanet Daniel Duckworth Sergey Levine Vincent Vanhoucke Karol Hausman Marc Toussaint Klaus Greff Andy Zeng Igor Mordatch and Pete Florence. 2023. PaLM-E: An Embodied Multimodal Language Model. In arXiv preprint arXiv:2303.03379."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1038\/s41598-023-37032-0"},{"key":"e_1_3_2_1_15_1","volume-title":"Theory of mind. Current biology","author":"Frith Chris","year":"2005","unstructured":"Chris Frith and Uta Frith. 2005. Theory of mind. Current biology, Vol. 15, 17 (2005), R644--R645."},{"key":"e_1_3_2_1_16_1","first-page":"21885","article-title":"Widening the pipeline in human-guided reinforcement learning with explanation and context-aware data augmentation","volume":"34","author":"Guan Lin","year":"2021","unstructured":"Lin Guan, Mudit Verma, Suna Sihang Guo, Ruohan Zhang, and Subbarao Kambhampati. 2021. Widening the pipeline in human-guided reinforcement learning with explanation and context-aware data augmentation. Advances in Neural Information Processing Systems, Vol. 34 (2021), 21885--21897.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_17_1","volume-title":"Machine Common Sense Concept Paper. arxiv","author":"Gunning David","year":"1810","unstructured":"David Gunning. 2018. Machine Common Sense Concept Paper. arxiv: 1810.07528 [cs.AI]"},{"key":"e_1_3_2_1_18_1","volume-title":"Deception abilities emerged in large language models. arXiv preprint arXiv:2307.16513","author":"Hagendorff Thilo","year":"2023","unstructured":"Thilo Hagendorff. 2023. Deception abilities emerged in large language models. arXiv preprint arXiv:2307.16513 (2023)."},{"key":"e_1_3_2_1_19_1","volume-title":"Fledgling theories of mind: Deception as a marker of three-year-olds' understanding of false belief. Child development","author":"Hala Suzanne","year":"1991","unstructured":"Suzanne Hala, Michael Chandler, and Anna S Fritz. 1991. Fledgling theories of mind: Deception as a marker of three-year-olds' understanding of false belief. Child development, Vol. 62, 1 (1991), 83--97."},{"key":"e_1_3_2_1_20_1","volume-title":"Twenty-Second International Joint Conference on Artificial Intelligence.","author":"Hiatt Laura M","year":"2011","unstructured":"Laura M Hiatt, Anthony M Harrison, and J Gregory Trafton. 2011. Accommodating human variability in human-robot teams through theory of mind. In Twenty-Second International Joint Conference on Artificial Intelligence."},{"key":"e_1_3_2_1_21_1","unstructured":"Wenlong Huang Fei Xia Ted Xiao Harris Chan Jacky Liang Pete Florence Andy Zeng Jonathan Tompson Igor Mordatch Yevgen Chebotar et al. 2022. Inner monologue: Embodied reasoning through planning with language models. arXiv preprint arXiv:2207.05608 (2022)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"crossref","unstructured":"Jan Koco'n Igor Cichecki Oliwier Kaszyca Mateusz Kochanek Dominika Szyd\u0142o Joanna Baran Julita Bielaniewicz Marcin Gruza Arkadiusz Janz Kamil Kanclerz et al. 2023. ChatGPT: Jack of all trades master of none. Information Fusion (2023) 101861.","DOI":"10.1016\/j.inffus.2023.101861"},{"key":"e_1_3_2_1_23_1","volume-title":"Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa.","author":"Kojima Takeshi","year":"2022","unstructured":"Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. 2022. Large language models are zero-shot reasoners. Advances in neural information processing systems, Vol. 35 (2022), 22199--22213."},{"key":"e_1_3_2_1_24_1","volume-title":"2023 a. Theory of mind may have spontaneously emerged in large language models. arXiv preprint arXiv:2302.02083","author":"Kosinski Michal","year":"2023","unstructured":"Michal Kosinski. 2023 a. Theory of mind may have spontaneously emerged in large language models. arXiv preprint arXiv:2302.02083 (2023)."},{"key":"e_1_3_2_1_25_1","volume-title":"2023 b. Theory of mind may have spontaneously emerged in large language models. arXiv preprint arXiv:2302.02083","author":"Kosinski Michal","year":"2023","unstructured":"Michal Kosinski. 2023 b. Theory of mind may have spontaneously emerged in large language models. arXiv preprint arXiv:2302.02083 (2023)."},{"key":"e_1_3_2_1_26_1","volume-title":"Signaling friends and head-faking enemies simultaneously: Balancing goal obfuscation and goal legibility. arXiv preprint arXiv:1905.10672","author":"Kulkarni Anagha","year":"2019","unstructured":"Anagha Kulkarni, Siddharth Srivastava, and Subbarao Kambhampati. 2019a. Signaling friends and head-faking enemies simultaneously: Balancing goal obfuscation and goal legibility. arXiv preprint arXiv:1905.10672 (2019)."},{"key":"e_1_3_2_1_27_1","volume-title":"Planning for proactive assistance in environments with partial observability. arXiv preprint arXiv:2105.00525","author":"Kulkarni Anagha","year":"2021","unstructured":"Anagha Kulkarni, Siddharth Srivastava, and Subbarao Kambhampati. 2021. Planning for proactive assistance in environments with partial observability. arXiv preprint arXiv:2105.00525 (2021)."},{"key":"e_1_3_2_1_28_1","volume-title":"AAMAS Conference proceedings.","author":"Kulkarni Anagha","year":"2019","unstructured":"Anagha Kulkarni, Yantian Zha, Tathagata Chakraborti, Satya Gautam Vadlamudi, Yu Zhang, and Subbarao Kambhampati. 2019b. Explicable planning as minimizing distance from expected behavior. In AAMAS Conference proceedings."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1109\/HRI.2019.8673023"},{"key":"e_1_3_2_1_30_1","volume-title":"2023 a. Llm p: Empowering large language models with optimal planning proficiency. arXiv preprint arXiv:2304.11477","author":"Liu Bo","year":"2023","unstructured":"Bo Liu, Yuqian Jiang, Xiaohan Zhang, Qiang Liu, Shiqi Zhang, Joydeep Biswas, and Peter Stone. 2023 a. Llm p: Empowering large language models with optimal planning proficiency. arXiv preprint arXiv:2304.11477 (2023)."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"},{"key":"e_1_3_2_1_32_1","volume-title":"The role of language in the development of false belief understanding: A training study. Child development","author":"Lohmann Heidemarie","year":"2003","unstructured":"Heidemarie Lohmann and Michael Tomasello. 2003. The role of language in the development of false belief understanding: A training study. Child development, Vol. 74, 4 (2003), 1130--1144."},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1145\/3610171"},{"key":"e_1_3_2_1_34_1","volume-title":"Proceedings of the international conference on cognitive modeling","volume":"36","author":"Marsella Stacy C","year":"2004","unstructured":"Stacy C Marsella, David V Pynadath, and Stephen J Read. 2004. PsychSim: Agent-based modeling of social interactions and influence. In Proceedings of the international conference on cognitive modeling, Vol. 36. 243--248."},{"key":"e_1_3_2_1_35_1","volume-title":"Griffiths","author":"McCoy R. Thomas","year":"2023","unstructured":"R. Thomas McCoy, Shunyu Yao, Dan Friedman, Matthew Hardy, and Thomas L. Griffiths. 2023. Embers of Autoregression: Understanding Large Language Models Through the Problem They are Trained to Solve. arxiv: 2309.13638 [cs.CL]"},{"key":"e_1_3_2_1_36_1","unstructured":"OpenAI. 2023 a. ChatGPT can now see hear and speak. https:\/\/openai.com\/blog\/ chatgpt-can-now-see-hear-and-speak. [Accessed 16-01--2024]."},{"key":"e_1_3_2_1_37_1","unstructured":"OpenAI. 2023 b. GPT-4 Technical Report. arxiv: 2303.08774 [cs.CL]"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.jbef.2017.12.004"},{"key":"e_1_3_2_1_39_1","first-page":"1181","article-title":"PsychSim: Modeling theory of mind with decision-theoretic agents","volume":"5","author":"Pynadath David V","year":"2005","unstructured":"David V Pynadath and Stacy C Marsella. 2005. PsychSim: Modeling theory of mind with decision-theoretic agents. In IJCAI, Vol. 5. 1181--1186.","journal-title":"IJCAI"},{"key":"e_1_3_2_1_40_1","volume-title":"Event2mind: Commonsense inference on events, intents, and reactions. arXiv preprint arXiv:1805.06939","author":"Rashkin Hannah","year":"2018","unstructured":"Hannah Rashkin, Maarten Sap, Emily Allaway, Noah A Smith, and Yejin Choi. 2018. Event2mind: Commonsense inference on events, intents, and reactions. arXiv preprint arXiv:1805.06939 (2018)."},{"key":"e_1_3_2_1_41_1","volume-title":"Neural theory-of-mind? on the limits of social intelligence in large lms. arXiv preprint arXiv:2210.13312","author":"Sap Maarten","year":"2022","unstructured":"Maarten Sap, Ronan LeBras, Daniel Fried, and Yejin Choi. 2022. Neural theory-of-mind? on the limits of social intelligence in large lms. arXiv preprint arXiv:2210.13312 (2022)."},{"key":"e_1_3_2_1_42_1","volume-title":"Socialiqa: Commonsense reasoning about social interactions. arXiv preprint arXiv:1904.09728","author":"Sap Maarten","year":"2019","unstructured":"Maarten Sap, Hannah Rashkin, Derek Chen, Ronan LeBras, and Yejin Choi. 2019. Socialiqa: Commonsense reasoning about social interactions. arXiv preprint arXiv:1904.09728 (2019)."},{"key":"e_1_3_2_1_43_1","volume-title":"Foundations for a Theory of Mind for a Humanoid Robot. Ph.,D. Dissertation","author":"Scassellati Brian M","unstructured":"Brian M Scassellati. 2001. Foundations for a Theory of Mind for a Humanoid Robot. Ph.,D. Dissertation. Massachusetts Institute of Technology."},{"key":"e_1_3_2_1_44_1","volume-title":"Xuhui Zhou, Yejin Choi, Yoav Goldberg, Maarten Sap, and Vered Shwartz.","author":"Shapira Natalie","year":"2023","unstructured":"Natalie Shapira, Mosh Levy, Seyed Hossein Alavi, Xuhui Zhou, Yejin Choi, Yoav Goldberg, Maarten Sap, and Vered Shwartz. 2023. Clever hans or neural theory of mind? stress testing social reasoning in large language models. arXiv preprint arXiv:2305.14763 (2023)."},{"key":"e_1_3_2_1_45_1","volume-title":"Proceedings of the 40th International Conference on Machine Learning (Proceedings of Machine Learning Research","volume":"31227","author":"Shi Freda","year":"2023","unstructured":"Freda Shi, Xinyun Chen, Kanishka Misra, Nathan Scales, David Dohan, Ed H. Chi, Nathanael Scharli, and Denny Zhou. 2023. Large Language Models Can Be Easily Distracted by Irrelevant Context. In Proceedings of the 40th International Conference on Machine Learning (Proceedings of Machine Learning Research, Vol. 202), Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (Eds.). PMLR, 31210--31227. https:\/\/proceedings.mlr.press\/v202\/shi23a.html"},{"key":"e_1_3_2_1_46_1","volume-title":"Llm-planner: Few-shot grounded planning for embodied agents with large language models. arXiv preprint arXiv:2212.04088","author":"Song Chan Hee","year":"2022","unstructured":"Chan Hee Song, Jiaman Wu, Clayton Washington, Brian M Sadler, Wei-Lun Chao, and Yu Su. 2022. Llm-planner: Few-shot grounded planning for embodied agents with large language models. arXiv preprint arXiv:2212.04088 (2022)."},{"key":"e_1_3_2_1_47_1","volume-title":"2017 AAAI Fall Symposium Series.","author":"Sreedharan Sarath","year":"2017","unstructured":"Sarath Sreedharan, Subbarao Kambhampati, et al. 2017. Balancing explicability and explanation in human-aware planning. In 2017 AAAI Fall Symposium Series."},{"key":"e_1_3_2_1_48_1","volume-title":"Explainable Human-AI Interaction: A Planning Perspective","author":"Sreedharan Sarath","unstructured":"Sarath Sreedharan, Anagha Kulkarni, and Subbarao Kambhampati. 2022. Explainable Human-AI Interaction: A Planning Perspective. Springer Nature."},{"key":"e_1_3_2_1_49_1","volume-title":"Bridging the Gap: Providing Post-Hoc Symbolic Explanations for Sequential Decision-Making Problems with Inscrutable Representations. arXiv preprint arXiv:2002.01080","author":"Sreedharan Sarath","year":"2020","unstructured":"Sarath Sreedharan, Utkarsh Soni, Mudit Verma, Siddharth Srivastava, and Subbarao Kambhampati. 2020. Bridging the Gap: Providing Post-Hoc Symbolic Explanations for Sequential Decision-Making Problems with Inscrutable Representations. arXiv preprint arXiv:2002.01080 (2020)."},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACII52823.2021.9597390"},{"key":"e_1_3_2_1_51_1","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)."},{"key":"e_1_3_2_1_52_1","unstructured":"Tomer Ullman. 2023 a. Large Language Models Fail on Trivial Alterations to Theory-of-Mind Tasks. arxiv: 2302.08399 [cs.AI]"},{"key":"e_1_3_2_1_53_1","volume-title":"2023 b. Large language models fail on trivial alterations to theory-of-mind tasks. arXiv preprint arXiv:2302.08399","author":"Ullman Tomer","year":"2023","unstructured":"Tomer Ullman. 2023 b. Large language models fail on trivial alterations to theory-of-mind tasks. arXiv preprint arXiv:2302.08399 (2023)."},{"key":"e_1_3_2_1_54_1","unstructured":"Karthik Valmeekam Alberto Olmo Sarath Sreedharan and Subbarao Kambhampati. 2023. Large Language Models Still Can't Plan (A Benchmark for LLMs on Planning and Reasoning about Change). arxiv: 2206.10498 [cs.CL]"},{"key":"e_1_3_2_1_55_1","volume-title":"Exploiting Unlabeled Data for Feedback Efficient Human Preference based Reinforcement Learning. arXiv preprint arXiv:2302.08738","author":"Verma Mudit","year":"2023","unstructured":"Mudit Verma, Siddhant Bhambri, and Subbarao Kambhampati. 2023. Exploiting Unlabeled Data for Feedback Efficient Human Preference based Reinforcement Learning. arXiv preprint arXiv:2302.08738 (2023)."},{"key":"e_1_3_2_1_56_1","volume-title":"Symbol Guided Hindsight Priors for Reward Learning from Human Preferences. arXiv preprint arXiv:2210.09151","author":"Verma Mudit","year":"2022","unstructured":"Mudit Verma and Katherine Metcalf. 2022. Symbol Guided Hindsight Priors for Reward Learning from Human Preferences. arXiv preprint arXiv:2210.09151 (2022)."},{"key":"e_1_3_2_1_57_1","doi-asserted-by":"publisher","DOI":"10.1098\/rstb.2018.0032"},{"key":"e_1_3_2_1_58_1","volume-title":"Aakanksha Chowdhery, and Denny Zhou.","author":"Wang Xuezhi","year":"2022","unstructured":"Xuezhi Wang, Jason Wei, Dale Schuurmans, Quoc Le, Ed Chi, Sharan Narang, Aakanksha Chowdhery, and Denny Zhou. 2022. Self-consistency improves chain of thought reasoning in language models. arXiv preprint arXiv:2203.11171 (2022)."},{"key":"e_1_3_2_1_59_1","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume":"35","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems, Vol. 35 (2022), 24824--24837.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_60_1","volume-title":"The child's theory of mind","author":"Wellman Henry M","unstructured":"Henry M Wellman. 1992. The child's theory of mind. The MIT Press."},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.1016\/0010-0277(83)90004-5"},{"key":"e_1_3_2_1_62_1","volume-title":"Workshop, :, and Teven Le Scao et al.","year":"2023","unstructured":"BigScience Workshop, :, and Teven Le Scao et al. 2023. BLOOM: A 176B-Parameter Open-Access Multilingual Language Model. arxiv: 2211.05100 [cs.CL]"},{"key":"e_1_3_2_1_63_1","volume-title":"Emotionx-ku: Bert-max based contextual emotion classifier. arXiv preprint arXiv:1906.11565","author":"Yang Kisu","year":"2019","unstructured":"Kisu Yang, Dongyub Lee, Taesun Whang, Seolhwa Lee, and Heuiseok Lim. 2019. Emotionx-ku: Bert-max based contextual emotion classifier. arXiv preprint arXiv:1906.11565 (2019)."},{"key":"e_1_3_2_1_64_1","doi-asserted-by":"crossref","unstructured":"Xin Ye Lionel Robert et al. 2023. Human Security Robot Interaction and Anthropomorphism: An Examination of Pepper RAMSEE and Knightscope Robots. (2023).","DOI":"10.1109\/RO-MAN57019.2023.10309400"},{"key":"e_1_3_2_1_65_1","unstructured":"Zahra Zahedi Sarath Sreedharan and Subbarao Kambhampati. 2022a. A Mental-Model Centric Landscape of Human-AI Symbiosis. arxiv: 2202.09447 [cs.AI]"},{"key":"e_1_3_2_1_66_1","unstructured":"Zahra Zahedi Sarath Sreedharan and Subbarao Kambhampati. 2023. A Mental Model Based Theory of Trust. arxiv: 2301.12569 [cs.AI]"},{"key":"e_1_3_2_1_67_1","doi-asserted-by":"publisher","DOI":"10.1109\/HRI53351.2022.9889475"},{"key":"e_1_3_2_1_68_1","volume-title":"Trust-Aware Planning: Modeling Trust Evolution in Longitudinal Human-Robot Interaction. CoRR","author":"Zahedi Zahra","year":"2021","unstructured":"Zahra Zahedi, Mudit Verma, Sarath Sreedharan, and Subbarao Kambhampati. 2021. Trust-Aware Planning: Modeling Trust Evolution in Longitudinal Human-Robot Interaction. CoRR, Vol. abs\/2105.01220 (2021). showeprint[arXiv]2105.01220 https:\/\/arxiv.org\/abs\/2105.01220"},{"key":"e_1_3_2_1_69_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989155"},{"key":"e_1_3_2_1_70_1","unstructured":"Wayne Xin Zhao Kun Zhou Junyi Li Tianyi Tang Xiaolei Wang Yupeng Hou Yingqian Min Beichen Zhang Junjie Zhang Zican Dong Yifan Du Chen Yang Yushuo Chen Zhipeng Chen Jinhao Jiang Ruiyang Ren Yifan Li Xinyu Tang Zikang Liu Peiyu Liu Jian-Yun Nie and Ji-Rong Wen. 2023. A Survey of Large Language Models. arxiv: 2303.18223 [cs.CL]io"}],"event":{"name":"HRI '24: ACM\/IEEE International Conference on Human-Robot Interaction","location":"Boulder CO USA","acronym":"HRI '24","sponsor":["SIGAI ACM Special Interest Group on Artificial Intelligence","SIGCHI ACM Special Interest Group on Computer-Human Interaction"]},"container-title":["Companion of the 2024 ACM\/IEEE International Conference on Human-Robot Interaction"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3610978.3640767","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3610978.3640767","content-type":"application\/pdf","content-version":"vor","intended-application":"syndication"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3610978.3640767","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T01:27:01Z","timestamp":1755826021000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3610978.3640767"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,3,11]]},"references-count":70,"alternative-id":["10.1145\/3610978.3640767","10.1145\/3610978"],"URL":"https:\/\/doi.org\/10.1145\/3610978.3640767","relation":{},"subject":[],"published":{"date-parts":[[2024,3,11]]},"assertion":[{"value":"2024-03-11","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}