{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T13:10:04Z","timestamp":1755868204828,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":32,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,11,24]],"date-time":"2024-11-24T00:00:00Z","timestamp":1732406400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100006374","name":"Air Force Office of Scientific Research","doi-asserted-by":"publisher","award":["FA8655-23-1-7257"],"award-info":[{"award-number":["FA8655-23-1-7257"]}],"id":[{"id":"10.13039\/501100006374","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,11,24]]},"DOI":"10.1145\/3687272.3688325","type":"proceedings-article","created":{"date-parts":[[2024,11,20]],"date-time":"2024-11-20T00:24:28Z","timestamp":1732062268000},"page":"195-203","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Modeling a Trust Factor in Composite Tasks for Multi-Agent Reinforcement Learning"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-9034-5307","authenticated-orcid":false,"given":"Giuseppe","family":"Contino","sequence":"first","affiliation":[{"name":"DIAG, Sapienza university of Rome, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0421-5792","authenticated-orcid":false,"given":"Roberto","family":"Cipollone","sequence":"additional","affiliation":[{"name":"DIAG, Sapienza University of Rome, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2040-3355","authenticated-orcid":false,"given":"Francesco","family":"Frattolillo","sequence":"additional","affiliation":[{"name":"DIAG, Sapienza university of Rome, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-0764-3965","authenticated-orcid":false,"given":"Andrea","family":"Fanti","sequence":"additional","affiliation":[{"name":"DIAG, Sapienza University of Rome, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3191-6623","authenticated-orcid":false,"given":"Nicolo'","family":"Brandizzi","sequence":"additional","affiliation":[{"name":"rococo, DIAG, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9057-8946","authenticated-orcid":false,"given":"Luca","family":"Iocchi","sequence":"additional","affiliation":[{"name":"Department of Computer, Control, and Management Engineering Antonio Ruberti, University of Rome, La Sapienza, Italy"}]}],"member":"320","published-online":{"date-parts":[[2024,11,24]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1109\/Trustcom.2015.436"},{"key":"e_1_3_2_1_2_1","first-page":"679","article-title":"A Markovian decision process","volume":"6","author":"Bellman Richard","year":"1957","unstructured":"Richard Bellman. 1957. A Markovian decision process. Journal of Mathematics and Mechanics 6, 5 (1957), 679\u2013684. http:\/\/www.jstor.org\/stable\/24900506","journal-title":"Journal of Mathematics and Mechanics"},{"key":"e_1_3_2_1_3_1","volume-title":"LTLf \/ LDLf Non-Markovian Rewards. 32nd AAAI Conference on Artificial Intelligence, AAAI 2018","author":"Brafman I.","year":"2018","unstructured":"Ronen\u00a0I. Brafman, Giuseppe De\u00a0Giacomo, and Fabio Patrizi. 2018. LTLf \/ LDLf Non-Markovian Rewards. 32nd AAAI Conference on Artificial Intelligence, AAAI 2018 (2018), 1771\u20131778."},{"key":"e_1_3_2_1_4_1","volume-title":"Proceedings of the Tenth International Symposium on Combinatorial Search, SOCS 2017","author":"Camacho Alberto","year":"2017","unstructured":"Alberto Camacho, Oscar Chen, Scott Sanner, and Sheila\u00a0A. McIlraith. 2017. Non-Markovian Rewards Expressed in LTL: Guiding Search via Reward Shaping. In Proceedings of the Tenth International Symposium on Combinatorial Search, SOCS 2017, 16-17 June 2017, Pittsburgh, Pennsylvania, USA, Alex Fukunaga and Akihiro Kishimoto (Eds.). AAAI Press, 159\u2013160. https:\/\/aaai.org\/ocs\/index.php\/SOCS\/SOCS17\/paper\/view\/15811"},{"key":"e_1_3_2_1_5_1","volume-title":"Framing Factors: The Importance of Context and the Individual in Understanding Trust in Human-Robot Interaction.","author":"Cameron Dave","year":"2015","unstructured":"Dave Cameron, Jonathan Aitken, Emily Collins, Luke Boorman, Adriel Chua, Samuel Fernando, Owen McAree, Uriel Martinez-Hernandez, and James Law. 2015. Framing Factors: The Importance of Context and the Individual in Understanding Trust in Human-Robot Interaction."},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2831228"},{"key":"e_1_3_2_1_7_1","volume-title":"Proceedings of the 2nd International Workshop on Multidisciplinary Perspectives on Human-AI Team Trust, Vol.\u00a03634","author":"Frattolillo Francesco","year":"2023","unstructured":"Francesco Frattolillo, Nicolo\u2019 Brandizzi, Roberto Cipollone, and Luca Iocchi. 2023. Towards Computational Models for Reinforcement Learning in Human-AI Teams. In Proceedings of the 2nd International Workshop on Multidisciplinary Perspectives on Human-AI Team Trust, Vol.\u00a03634. CEUR Workshop Proceedings, Gothenburg, Sweden, 41\u201351. https:\/\/ceur-ws.org\/Vol-3634\/"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.cogsys.2023.101157"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","unstructured":"Ho\u00a0Long Fung Victor-Alexandru Darvariu Stephen Hailes and Mirco Musolesi. 2022. Trust-based Consensus in Multi-Agent Reinforcement Learning Systems. https:\/\/doi.org\/10.48550\/arXiv.2205.12880 arXiv:2205.12880 [cs].","DOI":"10.48550\/arXiv.2205.12880"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1177\/0018720811417254"},{"key":"e_1_3_2_1_11_1","volume-title":"International Conference on Machine Learning. PMLR, 2107\u20132116","author":"Icarte Rodrigo\u00a0Toro","year":"2018","unstructured":"Rodrigo\u00a0Toro Icarte, Toryn Klassen, Richard Valenzano, and Sheila McIlraith. 2018. Using reward machines for high-level task specification and decomposition in reinforcement learning. In International Conference on Machine Learning. PMLR, 2107\u20132116."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.12440"},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-62056-1_44"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-64816-3_8"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.5555\/3091574.3091594"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/COASE.2016.7743428"},{"key":"e_1_3_2_1_17_1","volume-title":"Playing atari with deep reinforcement learning. arXiv preprint arXiv:1312.5602","author":"Mnih Volodymyr","year":"2013","unstructured":"Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Alex Graves, Ioannis Antonoglou, Daan Wierstra, and Martin Riedmiller. 2013. Playing atari with deep reinforcement learning. arXiv preprint arXiv:1312.5602 (2013)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1504\/IJISTA.2015.074078"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/THMS.2019.2896845"},{"key":"e_1_3_2_1_20_1","volume-title":"Reward machines for cooperative multi-agent reinforcement learning. arXiv preprint arXiv:2007.01962","author":"Neary Cyrus","year":"2020","unstructured":"Cyrus Neary, Zhe Xu, Bo Wu, and Ufuk Topcu. 2020. Reward machines for cooperative multi-agent reinforcement learning. arXiv preprint arXiv:2007.01962 (2020)."},{"key":"e_1_3_2_1_21_1","unstructured":"Andrew\u00a0Y Ng Stuart Russell 2000. Algorithms for inverse reinforcement learning.. In Icml Vol.\u00a01. 2."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.48550\/arxiv.1912.06680"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1518\/001872097778543886"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4899-7668-0_10"},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01177-2_27\/TABLES\/4"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","unstructured":"David Silver Aja Huang Chris\u00a0J. Maddison Arthur Guez Laurent Sifre George Van Den Driessche Julian Schrittwieser Ioannis Antonoglou Veda Panneershelvam Marc Lanctot Sander Dieleman Dominik Grewe John Nham Nal Kalchbrenner Ilya Sutskever Timothy Lillicrap Madeleine Leach Koray Kavukcuoglu Thore Graepel and Demis Hassabis. 2016. Mastering the game of Go with deep neural networks and tree search. Nature 2016 529:7587 529 7587 (Jan. 2016) 484\u2013489. https:\/\/doi.org\/10.1038\/nature16961","DOI":"10.1038\/nature16961"},{"key":"e_1_3_2_1_27_1","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton S.","year":"2018","unstructured":"Richard\u00a0S. Sutton and Andrew\u00a0G. Barto. 2018. Reinforcement Learning: An Introduction (second edition ed.). The MIT Press."},{"volume-title":"Multi-Agent systems: Simulation and applications","author":"Uhrmacher M","key":"e_1_3_2_1_28_1","unstructured":"Adelinde\u00a0M Uhrmacher and Danny Weyns. 2009. Multi-Agent systems: Simulation and applications. CRC press."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3145957"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1145\/2696454.2696492"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1037\/e578652012-008"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1007\/s12369-012-0144-0"}],"event":{"name":"HAI '24: International Conference on Human-Agent Interaction","sponsor":["SIGCHI ACM Special Interest Group on Computer-Human Interaction"],"location":"Swansea United Kingdom","acronym":"HAI '24"},"container-title":["Proceedings of the 12th International Conference on Human-Agent Interaction"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3687272.3688325","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3687272.3688325","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T12:40:53Z","timestamp":1755866453000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3687272.3688325"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,24]]},"references-count":32,"alternative-id":["10.1145\/3687272.3688325","10.1145\/3687272"],"URL":"https:\/\/doi.org\/10.1145\/3687272.3688325","relation":{},"subject":[],"published":{"date-parts":[[2024,11,24]]},"assertion":[{"value":"2024-11-24","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}