{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T04:44:19Z","timestamp":1776055459947,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":33,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2026,4,13]]},"DOI":"10.1145\/3772363.3798613","type":"proceedings-article","created":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T01:55:28Z","timestamp":1776045328000},"page":"1-7","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Seeing the Reasoning: How LLM Rationales Influence User Trust and Decision-Making in Factual Verification Tasks"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8188-7576","authenticated-orcid":false,"given":"Xin","family":"Sun","sequence":"first","affiliation":[{"name":"Digital Content and Media Sciences Research Division, National Institute of Informatics (NII), Tokyo, Japan and Social and Behavioural Science, University of Amsterdam, Amsterdam, Netherlands"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6854-2488","authenticated-orcid":false,"given":"Shu","family":"Wei","sequence":"additional","affiliation":[{"name":"XR Pediatrics, Yale School of Medicine, New Haven, Connecticut, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7780-4806","authenticated-orcid":false,"given":"Jos A","family":"Bosch","sequence":"additional","affiliation":[{"name":"Psychology, University of Amsterdam, Amsterdcam, Netherlands"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4908-1860","authenticated-orcid":false,"given":"Isao","family":"Echizen","sequence":"additional","affiliation":[{"name":"Information and Society Research Division, National Institute of Informatics, Tokyo, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0061-0680","authenticated-orcid":false,"given":"Saku","family":"Sugawara","sequence":"additional","affiliation":[{"name":"Digital Content and Media Sciences Research Division, National Institute of Informatics (NII), Tokyo, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9954-4088","authenticated-orcid":false,"given":"Abdallah","family":"El Ali","sequence":"additional","affiliation":[{"name":"Centrum Wiskunde &amp; Informatica (CWI), Amsterdam, Netherlands and Utrecht University, Utrecht, Netherlands"}]}],"member":"320","published-online":{"date-parts":[[2026,4,13]]},"reference":[{"key":"e_1_3_3_1_2_2","unstructured":"Alejandro\u00a0Barredo Arrieta Natalia D\u00edaz-Rodr\u00edguez Javier\u00a0Del Ser Adrien Bennetot Siham Tabik Alberto Barbado Salvador Garc\u00eda Sergio Gil-L\u00f3pez Daniel Molina Richard Benjamins Raja Chatila and Francisco Herrera. 2019. Explainable Artificial Intelligence (XAI): Concepts Taxonomies Opportunities and Challenges toward Responsible AI. arxiv:https:\/\/arXiv.org\/abs\/1910.10045\u00a0[cs.AI] https:\/\/arxiv.org\/abs\/1910.10045"},{"key":"e_1_3_3_1_3_2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-04898-2_132"},{"key":"e_1_3_3_1_4_2","doi-asserted-by":"publisher","unstructured":"Severin Bachmann. 2025. Efficient XAI: A Low-Cost Data Reduction Approach to SHAP Interpretability. J. Artif. Int. Res. (2025) 21\u00a0pages. 10.1613\/jair.1.18325","DOI":"10.1613\/jair.1.18325"},{"key":"e_1_3_3_1_5_2","doi-asserted-by":"publisher","unstructured":"Kevin Bauer Moritz von Zahn and Oliver Hinz. 2023. Expl(AI)ned: The Impact of Explainable Artificial Intelligence on Users\u2019 Information Processing. Information Systems Research 34 4 (2023) 1582\u20131602. arXiv:10.1287\/isre.2023.1199","DOI":"10.1287\/isre.2023.1199"},{"key":"e_1_3_3_1_6_2","unstructured":"Angelica Chen Jason Phang Alicia Parrish Vishakh Padmakumar Chen Zhao Samuel\u00a0R. Bowman and Kyunghyun Cho. 2024. Two Failures of Self-Consistency in the Multi-Step Reasoning of LLMs. arxiv:https:\/\/arXiv.org\/abs\/2305.14279\u00a0[cs.CL] https:\/\/arxiv.org\/abs\/2305.14279"},{"key":"e_1_3_3_1_7_2","doi-asserted-by":"publisher","unstructured":"Cheng Chen and Xinyan Zhao. 2025. Let\u2019s Think Step by Step: Effects of Chain-of-Thought Prompt Coaching on Users\u2019 Perceptions and Trust in Image Generative AI Tools. International Journal of Human\u2013Computer Interaction (2025). 10.1080\/10447318.2025.2530762","DOI":"10.1080\/10447318.2025.2530762"},{"key":"e_1_3_3_1_8_2","doi-asserted-by":"publisher","unstructured":"Jing Chen Scott Mishler and Bin Hu. 2021. Automation Error Type and Methods of Communicating Automation Reliability Affect Trust and Performance: An Empirical Study in the Cyber Domain. IEEE Transactions on Human-Machine Systems 51 5 (2021) 463\u2013473. 10.1109\/THMS.2021.3051137","DOI":"10.1109\/THMS.2021.3051137"},{"key":"e_1_3_3_1_9_2","doi-asserted-by":"publisher","unstructured":"Carolin Ebermann Matthias Selisky and Stephan Weibelzahl. 2023. Explainable AI: The Effect of Contradictory Decisions and Explanations on Users\u2019 Acceptance of AI Systems. International Journal of Human\u2013Computer Interaction 39 9 (2023) 1807\u20131826. arXiv:10.1080\/10447318.2022.2126812","DOI":"10.1080\/10447318.2022.2126812"},{"key":"e_1_3_3_1_10_2","doi-asserted-by":"crossref","unstructured":"Satu Elo and Helvi Kyng\u00e4s. 2008. The qualitative content analysis process. Journal of advanced nursing 62 1 (2008) 107\u2013115.","DOI":"10.1111\/j.1365-2648.2007.04569.x"},{"key":"e_1_3_3_1_11_2","doi-asserted-by":"crossref","unstructured":"Franz Faul Edgar Erdfelder Albert-Georg Lang and Axel Buchner. 2007. G*Power 3: a flexible statistical power analysis program for the social behavioral and biomedical sciences. Behav. Res. Methods 39 2 (May 2007) 175\u2013191.","DOI":"10.3758\/BF03193146"},{"key":"e_1_3_3_1_12_2","doi-asserted-by":"crossref","unstructured":"Mor Geva Daniel Khashabi Elad Segal Tushar Khot Dan Roth and Jonathan Berant. 2021. Did Aristotle Use a Laptop? A Question Answering Benchmark with Implicit Reasoning Strategies. arxiv:https:\/\/arXiv.org\/abs\/2101.02235\u00a0[cs.CL] https:\/\/arxiv.org\/abs\/2101.02235","DOI":"10.1162\/tacl_a_00370"},{"key":"e_1_3_3_1_13_2","unstructured":"Himabindu Lakkaraju and Osbert Bastani. 2019. \"How do I fool you?\": Manipulating User Trust via Misleading Black Box Explanations. arxiv:https:\/\/arXiv.org\/abs\/1911.06473\u00a0[cs.AI] https:\/\/arxiv.org\/abs\/1911.06473"},{"key":"e_1_3_3_1_14_2","doi-asserted-by":"publisher","DOI":"10.1145\/3706598.3713336"},{"key":"e_1_3_3_1_15_2","doi-asserted-by":"publisher","DOI":"10.1145\/3576840.3578331"},{"key":"e_1_3_3_1_16_2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.19"},{"key":"e_1_3_3_1_17_2","doi-asserted-by":"publisher","unstructured":"Katelyn Morrison Philipp Spitzer Violet Turri Michelle Feng Niklas K\u00fchl and Adam Perer. 2024. The Impact of Imperfect XAI on Human-AI Decision-Making. Proc. ACM Hum.-Comput. Interact. 8 CSCW1 Article 183 (April 2024) 39\u00a0pages. 10.1145\/3641022","DOI":"10.1145\/3641022"},{"key":"e_1_3_3_1_18_2","doi-asserted-by":"crossref","unstructured":"Saugat Pandey Oen\u00a0G. McKinley R.\u00a0Jordan Crouser and Alvitta Ottley. 2023. Do You Trust What You See? Toward A Multidimensional Measure of Trust in Visualization. arxiv:https:\/\/arXiv.org\/abs\/2308.04727\u00a0[cs.HC] https:\/\/arxiv.org\/abs\/2308.04727","DOI":"10.1109\/VIS54172.2023.00014"},{"key":"e_1_3_3_1_19_2","unstructured":"Andrea Papenmeier Gwenn Englebienne and Christin Seifert. 2019. How model accuracy and explanation fidelity influence user trust. arxiv:https:\/\/arXiv.org\/abs\/1907.12652\u00a0[cs.CY] https:\/\/arxiv.org\/abs\/1907.12652"},{"key":"e_1_3_3_1_20_2","doi-asserted-by":"publisher","DOI":"10.1145\/3544549.3585808"},{"key":"e_1_3_3_1_21_2","unstructured":"Prolific. 2014. https:\/\/www.prolific.com"},{"key":"e_1_3_3_1_22_2","doi-asserted-by":"publisher","unstructured":"Ahmed\u00a0M. Salih Zahra Raisi\u2010Estabragh Ilaria\u00a0Boscolo Galazzo Petia Radeva Steffen\u00a0E. Petersen Karim Lekadir and Gloria Menegaz. 2024. A Perspective on Explainable Artificial Intelligence Methods: SHAP and LIME. Advanced Intelligent Systems 7 1 (June 2024). 10.1002\/aisy.202400304","DOI":"10.1002\/aisy.202400304"},{"key":"e_1_3_3_1_23_2","doi-asserted-by":"publisher","unstructured":"S.\u00a0S. SHAPIRO and M.\u00a0B. WILK. 1965. An analysis of variance test for normality (complete samples). Biometrika 52 3-4 (dec 1965) 591\u2013611. 10.1093\/biomet\/52.3-4.591","DOI":"10.1093\/biomet\/52.3-4.591"},{"key":"e_1_3_3_1_24_2","doi-asserted-by":"publisher","unstructured":"Aaron Springer and Steve Whittaker. 2020. Progressive Disclosure: When Why and How Do Users Want Algorithmic Transparency Information? ACM Trans. Interact. Intell. Syst. 10 4 Article 29 (Oct. 2020) 32\u00a0pages. 10.1145\/3374218","DOI":"10.1145\/3374218"},{"key":"e_1_3_3_1_25_2","doi-asserted-by":"publisher","unstructured":"Anna Taudien Andreas F\u00fcgener Alok Gupta and Wolfgang Ketter. 2022. The Effect of AI Advice on Human Confidence in Decision-Making. 10.24251\/HICSS.2022.029","DOI":"10.24251\/HICSS.2022.029"},{"key":"e_1_3_3_1_26_2","series-title":"(NIPS \u201923)","volume-title":"Proceedings of the 37th International Conference on Neural Information Processing Systems","author":"Turpin Miles","year":"2023","unstructured":"Miles Turpin, Julian Michael, Ethan Perez, and Samuel\u00a0R. Bowman. 2023. Language models don\u2019t always say what they think: unfaithful explanations in chain-of-thought prompting. In Proceedings of the 37th International Conference on Neural Information Processing Systems (New Orleans, LA, USA) (NIPS \u201923). Red Hook, NY, USA, Article 3275, 14\u00a0pages."},{"key":"e_1_3_3_1_27_2","unstructured":"Helena Vasconcelos Matthew J\u00f6rke Madeleine Grunde-McLaughlin Tobias Gerstenberg Michael Bernstein and Ranjay Krishna. 2023. Explanations Can Reduce Overreliance on AI Systems During Decision-Making. arxiv:https:\/\/arXiv.org\/abs\/2212.06823\u00a0[cs.HC] https:\/\/arxiv.org\/abs\/2212.06823"},{"key":"e_1_3_3_1_28_2","doi-asserted-by":"publisher","unstructured":"Oleksandra Vereschak Gilles Bailly and Baptiste Caramiaux. 2021. How to Evaluate Trust in AI-Assisted Decision Making? A Survey of Empirical Methodologies. Proc. ACM Hum.-Comput. Interact. 5 CSCW2 Article 327 (Oct. 2021) 39\u00a0pages. 10.1145\/3476068","DOI":"10.1145\/3476068"},{"key":"e_1_3_3_1_29_2","unstructured":"William Walden. 2026. Reasoning Models Will Blatantly Lie About Their Reasoning. arxiv:https:\/\/arXiv.org\/abs\/2601.07663\u00a0[cs.AI] https:\/\/arxiv.org\/abs\/2601.07663"},{"key":"e_1_3_3_1_30_2","unstructured":"Jason Wei Xuezhi Wang Dale Schuurmans Maarten Bosma Brian Ichter Fei Xia Ed Chi Quoc Le and Denny Zhou. 2023. Chain-of-Thought Prompting Elicits Reasoning in Large Language Models. arxiv:https:\/\/arXiv.org\/abs\/2201.11903\u00a0[cs.CL] https:\/\/arxiv.org\/abs\/2201.11903"},{"key":"e_1_3_3_1_31_2","unstructured":"Fengli Xu Qianyue Hao Zefang Zong Jingwei Wang Yunke Zhang Jingyi Wang Xiaochong Lan Jiahui Gong Tianjian Ouyang Fanjin Meng Chenyang Shao Yuwei Yan Qinglong Yang Yiwen Song Sijian Ren Xinyuan Hu Yu Li Jie Feng Chen Gao and Yong Li. 2025. Towards Large Reasoning Models: A Survey of Reinforced Reasoning with Large Language Models. arxiv:https:\/\/arXiv.org\/abs\/2501.09686\u00a0[cs.AI] https:\/\/arxiv.org\/abs\/2501.09686"},{"key":"e_1_3_3_1_32_2","doi-asserted-by":"publisher","unstructured":"Zhengtao Xu Tianqi Song and Yi-Chieh Lee. 2025. Confronting verbalized uncertainty: Understanding how LLM\u2019s verbalized uncertainty influences users in AI-assisted decision-making. Int. J. Hum.-Comput. Stud. 197 C (March 2025) 17\u00a0pages. 10.1016\/j.ijhcs.2025.103455","DOI":"10.1016\/j.ijhcs.2025.103455"},{"key":"e_1_3_3_1_33_2","doi-asserted-by":"publisher","unstructured":"Yunfeng Zhang Q.\u00a0Vera Liao and Rachel K.\u00a0E. Bellamy. 2020. Effect of Confidence and Explanation on Accuracy and Trust Calibration in AI-Assisted Decision Making(FAT* \u201920). ACM New York NY USA 295\u2013305. 10.1145\/3351095.3372852","DOI":"10.1145\/3351095.3372852"},{"key":"e_1_3_3_1_34_2","doi-asserted-by":"publisher","DOI":"10.1145\/3640794.3665550"}],"event":{"name":"CHI EA '26: Extended Abstracts of the 2026 CHI Conference on Human Factors in Computing Systems","location":"Barcelona , Spain","acronym":"CHI EA '26","sponsor":["SIGCHI ACM Special Interest Group on Computer-Human Interaction"]},"container-title":["Proceedings of the Extended Abstracts of the 2026 CHI Conference on Human Factors in Computing Systems"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3772363.3798613","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T03:55:02Z","timestamp":1776052502000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3772363.3798613"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4,13]]},"references-count":33,"alternative-id":["10.1145\/3772363.3798613","10.1145\/3772363"],"URL":"https:\/\/doi.org\/10.1145\/3772363.3798613","relation":{},"subject":[],"published":{"date-parts":[[2026,4,13]]},"assertion":[{"value":"2026-04-13","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}