{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,19]],"date-time":"2026-03-19T07:03:09Z","timestamp":1773903789275,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":36,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,12,4]],"date-time":"2023-12-04T00:00:00Z","timestamp":1701648000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,12,4]]},"DOI":"10.1145\/3623809.3623938","type":"proceedings-article","created":{"date-parts":[[2023,12,3]],"date-time":"2023-12-03T12:50:35Z","timestamp":1701607835000},"page":"395-397","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":6,"title":["Enhancing Explanaibility in AI: Food Recommender System Use Case"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-7525-5522","authenticated-orcid":false,"given":"Melissa","family":"Tessa","sequence":"first","affiliation":[{"name":"Ecole nationale sup\u00e9rieure d'informatique d'Alger ESI ex-INI, Algeria and AI Robolab\/ICR, University of Luxembourg, Luxembourg"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-1097-8527","authenticated-orcid":false,"given":"Sarah","family":"Abchiche","sequence":"additional","affiliation":[{"name":"Ecole nationale sup\u00e9rieure d'informatique d'Alger ESI ex-INI, Algeria"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-2982-1281","authenticated-orcid":false,"given":"Yves Claude","family":"Ferstler","sequence":"additional","affiliation":[{"name":"AI Robolab\/ICR, University of Luxembourg, Luxembourg"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5437-1817","authenticated-orcid":false,"given":"Igor","family":"Tchappi","sequence":"additional","affiliation":[{"name":"AI-Robolab\/ICR, Computer Science and Communications, University of Luxembourg, Luxembourg"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3302-7344","authenticated-orcid":false,"given":"Karima","family":"Benatchba","sequence":"additional","affiliation":[{"name":"Ecole nationale sup\u00e9rieure d'informatique d'Alger ESI ex-INI, Algeria"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7784-6176","authenticated-orcid":false,"given":"Amro","family":"Najjar","sequence":"additional","affiliation":[{"name":"ITIS, Luxembourg Institute of Science and Technology, Luxembourg"}]}],"member":"320","published-online":{"date-parts":[[2023,12,4]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Towards better understanding of gradient-based attribution methods for deep neural networks. arXiv preprint arXiv:1711.06104","author":"Ancona Marco","year":"2017","unstructured":"Marco Ancona, Enea Ceolini, Cengiz \u00d6ztireli, and Markus Gross. 2017. Towards better understanding of gradient-based attribution methods for deep neural networks. arXiv preprint arXiv:1711.06104 (2017)."},{"key":"e_1_3_2_1_2_1","volume-title":"Explainable Artificial Intelligence (XAI): Concepts, taxonomies, opportunities and challenges toward responsible AI. Information fusion 58","author":"Arrieta Alejandro\u00a0Barredo","year":"2020","unstructured":"Alejandro\u00a0Barredo Arrieta, Natalia D\u00edaz-Rodr\u00edguez, Javier Del\u00a0Ser, Adrien Bennetot, Siham Tabik, Alberto Barbado, Salvador Garc\u00eda, Sergio Gil-L\u00f3pez, Daniel Molina, Richard Benjamins, 2020. Explainable Artificial Intelligence (XAI): Concepts, taxonomies, opportunities and challenges toward responsible AI. Information fusion 58 (2020), 82\u2013115."},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.2514\/6.2000-4891"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10844-019-00591-8"},{"key":"e_1_3_2_1_5_1","unstructured":"Tom\u00a0B. Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell Sandhini Agarwal Ariel Herbert-Voss Gretchen Krueger Tom Henighan Rewon Child Aditya Ramesh Daniel\u00a0M. Ziegler Jeffrey Wu Clemens Winter Christopher Hesse Mark Chen Eric Sigler Mateusz Litwin Scott Gray Benjamin Chess Jack Clark Christopher Berner Sam McCandlish Alec Radford Ilya Sutskever and Dario Amodei. 2020. Language Models are Few-Shot Learners. arxiv:2005.14165\u00a0[cs.CL]"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/3485447.3512077"},{"key":"e_1_3_2_1_7_1","volume-title":"A survey of methods for explaining black box models. ACM computing surveys (CSUR) 51, 5","author":"Guidotti Riccardo","year":"2018","unstructured":"Riccardo Guidotti, Anna Monreale, Salvatore Ruggieri, Franco Turini, Fosca Giannotti, and Dino Pedreschi. 2018. A survey of methods for explaining black box models. ACM computing surveys (CSUR) 51, 5 (2018), 1\u201342."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1609\/aimag.v40i2.2850"},{"key":"e_1_3_2_1_9_1","volume-title":"Service-learning essentials: Questions, answers, and lessons learned","author":"Jacoby Barbara","unstructured":"Barbara Jacoby. 2014. Service-learning essentials: Questions, answers, and lessons learned. John Wiley & Sons."},{"key":"e_1_3_2_1_10_1","volume-title":"Interpretable recommender system with heterogeneous information: A geometric deep learning perspective. SSRN Electron. J","author":"Leng Yan","year":"2020","unstructured":"Yan Leng, Rodrigo Ruiz, Xiaowen Dong, and Alex Pentland. 2020. Interpretable recommender system with heterogeneous information: A geometric deep learning perspective. SSRN Electron. J (2020)."},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1007\/s13347-017-0279-x"},{"key":"e_1_3_2_1_12_1","volume-title":"Cancergpt: Few-shot drug pair synergy prediction using large pre-trained language models. arXiv preprint arXiv:2304.10946","author":"Li Tianhao","year":"2023","unstructured":"Tianhao Li, Sandesh Shetty, Advaith Kamath, Ajay Jaiswal, Xianqian Jiang, Ying Ding, and Yejin Kim. 2023. Cancergpt: Few-shot drug pair synergy prediction using large pre-trained language models. arXiv preprint arXiv:2304.10946 (2023)."},{"key":"e_1_3_2_1_13_1","volume-title":"The mythos of model interpretability: In machine learning, the concept of interpretability is both important and slippery.Queue 16, 3","author":"Lipton C","year":"2018","unstructured":"Zachary\u00a0C Lipton. 2018. The mythos of model interpretability: In machine learning, the concept of interpretability is both important and slippery.Queue 16, 3 (2018), 31\u201357."},{"key":"e_1_3_2_1_14_1","volume-title":"The flan collection: Designing data and methods for effective instruction tuning. arXiv preprint arXiv:2301.13688","author":"Longpre Shayne","year":"2023","unstructured":"Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung\u00a0Won Chung, Yi Tay, Denny Zhou, Quoc\u00a0V Le, Barret Zoph, Jason Wei, 2023. The flan collection: Designing data and methods for effective instruction tuning. arXiv preprint arXiv:2301.13688 (2023)."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1002\/asi.24750"},{"key":"e_1_3_2_1_16_1","volume-title":"Explanation in artificial intelligence: Insights from the social sciences. Artificial intelligence 267","author":"Miller Tim","year":"2019","unstructured":"Tim Miller. 2019. Explanation in artificial intelligence: Insights from the social sciences. Artificial intelligence 267 (2019), 1\u201338."},{"key":"e_1_3_2_1_17_1","unstructured":"Christoph Molnar. 2020. Interpretable machine learning. Lulu. com."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDEW53142.2021.00010"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCIS.2006.252279"},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1109\/I2CT54291.2022.9824265"},{"key":"e_1_3_2_1_21_1","volume-title":"Natural language generation challenges for explainable AI. arXiv preprint arXiv:1911.08794","author":"Reiter Ehud","year":"2019","unstructured":"Ehud Reiter. 2019. Natural language generation challenges for explainable AI. arXiv preprint arXiv:1911.08794 (2019)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/2939672.2939778"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1145\/3383313.3411530"},{"key":"e_1_3_2_1_24_1","volume-title":"Explainable AI: interpreting, explaining and visualizing deep learning. Vol.\u00a011700","author":"Samek Wojciech","unstructured":"Wojciech Samek, Gr\u00e9goire Montavon, Andrea Vedaldi, Lars\u00a0Kai Hansen, and Klaus-Robert M\u00fcller. 2019. Explainable AI: interpreting, explaining and visualizing deep learning. Vol.\u00a011700. Springer Nature."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijhcs.2020.102551"},{"key":"e_1_3_2_1_26_1","volume-title":"Deep inside convolutional networks: Visualising image classification models and saliency maps. arXiv preprint arXiv:1312.6034","author":"Simonyan Karen","year":"2013","unstructured":"Karen Simonyan, Andrea Vedaldi, and Andrew Zisserman. 2013. Deep inside convolutional networks: Visualising image classification models and saliency maps. arXiv preprint arXiv:1312.6034 (2013)."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"crossref","unstructured":"Rashmi Sinha and Kirsten Swearingen. 2002. The role of transparency in recommender systems. In CHI\u201902 extended abstracts on Human factors in computing systems. 830\u2013831.","DOI":"10.1145\/506443.506619"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1145\/3519265"},{"key":"e_1_3_2_1_29_1","volume-title":"An interpretive approach to evaluating information systems: A content, context, process framework. European journal of operational research 173, 3","author":"Stockdale Rosemary","year":"2006","unstructured":"Rosemary Stockdale and Craig Standing. 2006. An interpretive approach to evaluating information systems: A content, context, process framework. European journal of operational research 173, 3 (2006), 1090\u20131102."},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.3390\/data7070093"},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2020.103404"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.2991\/ijcis.11.1.13"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2021.3114794"},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1561\/1500000066"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1109\/NeuroNT53022.2021.9472817"},{"key":"e_1_3_2_1_36_1","volume-title":"Large language models are human-level prompt engineers. arXiv preprint arXiv:2211.01910","author":"Zhou Yongchao","year":"2022","unstructured":"Yongchao Zhou, Andrei\u00a0Ioan Muresanu, Ziwen Han, Keiran Paster, Silviu Pitis, Harris Chan, and Jimmy Ba. 2022. Large language models are human-level prompt engineers. arXiv preprint arXiv:2211.01910 (2022)."}],"event":{"name":"HAI '23: International Conference on Human-Agent Interaction","location":"Gothenburg Sweden","acronym":"HAI '23"},"container-title":["International Conference on Human-Agent Interaction"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3623809.3623938","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3623809.3623938","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,6]],"date-time":"2025-10-06T19:34:12Z","timestamp":1759779252000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3623809.3623938"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,4]]},"references-count":36,"alternative-id":["10.1145\/3623809.3623938","10.1145\/3623809"],"URL":"https:\/\/doi.org\/10.1145\/3623809.3623938","relation":{},"subject":[],"published":{"date-parts":[[2023,12,4]]},"assertion":[{"value":"2023-12-04","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}