{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T07:01:59Z","timestamp":1771484519514,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":32,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,6,29]],"date-time":"2022-06-29T00:00:00Z","timestamp":1656460800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2022,6,29]]},"DOI":"10.1145\/3529190.3535693","type":"proceedings-article","created":{"date-parts":[[2022,7,11]],"date-time":"2022-07-11T17:06:00Z","timestamp":1657559160000},"page":"562-568","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":12,"title":["Towards FAIR Explainable AI: a standardized ontology for mapping XAI solutions to use cases, explanations, and AI systems"],"prefix":"10.1145","author":[{"given":"Ajaya","family":"Adhikari","sequence":"first","affiliation":[{"name":"Data Science, Netherlands Organisation for Applied Scientific Research (TNO), Netherlands"}]},{"given":"Edwin","family":"Wenink","sequence":"additional","affiliation":[{"name":"Data Science, Netherlands Organisation for Applied Scientific Research (TNO), Netherlands"}]},{"given":"Jasper","family":"van der Waa","sequence":"additional","affiliation":[{"name":"Human Machine Teaming, Netherlands Organisation for Applied Scientific Research (TNO), Netherlands and Interactive Intelligence group, University of Delft, Netherlands"}]},{"given":"Cornelis","family":"Bouter","sequence":"additional","affiliation":[{"name":"Data Science, Netherlands Organisation for Applied Scientific Research (TNO), Netherlands"}]},{"given":"Ioannis","family":"Tolios","sequence":"additional","affiliation":[{"name":"Data Science, Netherlands Organisation for Applied Scientific Research (TNO), Netherlands"}]},{"given":"Stephan","family":"Raaijmakers","sequence":"additional","affiliation":[{"name":"Data Science, Netherlands Organisation for Applied Scientific Research (TNO), Netherlands and Centre for Linguistics, University of Leiden, Netherlands"}]}],"member":"320","published-online":{"date-parts":[[2022,7,11]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1109\/FUZZ-IEEE.2019.8858846"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2019.12.012"},{"key":"e_1_3_2_1_3_1","unstructured":"Vaishak Belle and Ioannis Papantonis. 2020. Principles and Practice of Explainable Machine Learning. CoRR abs\/2009.11698(2020). arXiv:2009.11698https:\/\/arxiv.org\/abs\/2009.11698  Vaishak Belle and Ioannis Papantonis. 2020. Principles and Practice of Explainable Machine Learning. CoRR abs\/2009.11698(2020). arXiv:2009.11698https:\/\/arxiv.org\/abs\/2009.11698"},{"key":"e_1_3_2_1_4_1","volume-title":"Pattern recognition. Machine learning 128, 9","author":"Bishop M.","year":"2006","unstructured":"Christopher\u00a0 M. Bishop . 2006. Pattern recognition. Machine learning 128, 9 ( 2006 ). Christopher\u00a0M. Bishop. 2006. Pattern recognition. Machine learning 128, 9 (2006)."},{"key":"e_1_3_2_1_5_1","volume-title":"Man is to computer programmer as woman is to homemaker? Debiasing word embeddings. Advances in neural information processing systems 29","author":"Bolukbasi Tolga","year":"2016","unstructured":"Tolga Bolukbasi , Kai-Wei Chang , 2016. Man is to computer programmer as woman is to homemaker? Debiasing word embeddings. Advances in neural information processing systems 29 ( 2016 ), 4349\u20134357. Tolga Bolukbasi, Kai-Wei Chang, 2016. Man is to computer programmer as woman is to homemaker? Debiasing word embeddings. Advances in neural information processing systems 29 (2016), 4349\u20134357."},{"key":"e_1_3_2_1_6_1","volume-title":"Explanation Ontology: A Model of Explanations for User-Centered AI. CoRR abs\/2010.01479(2020). arXiv:2010.01479https:\/\/arxiv.org\/abs\/2010.01479","author":"Chari Shruthi","year":"2020","unstructured":"Shruthi Chari , Oshani Seneviratne , 2020 . Explanation Ontology: A Model of Explanations for User-Centered AI. CoRR abs\/2010.01479(2020). arXiv:2010.01479https:\/\/arxiv.org\/abs\/2010.01479 Shruthi Chari, Oshani Seneviratne, 2020. Explanation Ontology: A Model of Explanations for User-Centered AI. CoRR abs\/2010.01479(2020). arXiv:2010.01479https:\/\/arxiv.org\/abs\/2010.01479"},{"key":"e_1_3_2_1_7_1","volume-title":"XXAI Workshop. ICML.","author":"Dasgupta Sanjoy","year":"2020","unstructured":"Sanjoy Dasgupta , Nave Frost , 2020 . Explainable k-means clustering: theory and practice . In XXAI Workshop. ICML. Sanjoy Dasgupta, Nave Frost, 2020. Explainable k-means clustering: theory and practice. In XXAI Workshop. ICML."},{"key":"e_1_3_2_1_8_1","unstructured":"Finale Doshi-Velez and Been Kim. 2017. Towards a rigorous science of interpretable machine learning. arXiv preprint arXiv:1702.08608(2017).  Finale Doshi-Velez and Been Kim. 2017. Towards a rigorous science of interpretable machine learning. arXiv preprint arXiv:1702.08608(2017)."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-60117-1_33"},{"key":"e_1_3_2_1_10_1","unstructured":"European Commission. 2021. Proposal for a regulation of the European Parliament and of the Council Laying down harmonised rules on Artificial Intelligence (Artificial Intelligence Act) and amending certain union legislative acts. https:\/\/eur-lex.europa.eu\/legal-content\/EN\/TXT\/?uri=CELEX:52021PC0206  European Commission. 2021. Proposal for a regulation of the European Parliament and of the Council Laying down harmonised rules on Artificial Intelligence (Artificial Intelligence Act) and amending certain union legislative acts. https:\/\/eur-lex.europa.eu\/legal-content\/EN\/TXT\/?uri=CELEX:52021PC0206"},{"key":"e_1_3_2_1_11_1","unstructured":"Timnit Gebru Jamie Morgenstern 2018. Datasheets for Datasets. CoRR abs\/1803.09010(2018). arXiv:1803.09010http:\/\/arxiv.org\/abs\/1803.09010  Timnit Gebru Jamie Morgenstern 2018. Datasheets for Datasets. CoRR abs\/1803.09010(2018). arXiv:1803.09010http:\/\/arxiv.org\/abs\/1803.09010"},{"key":"e_1_3_2_1_12_1","unstructured":"Robert\u00a0R. Hoffman Shane\u00a0T. Mueller 2018. Metrics for explainable AI: Challenges and prospects. arXiv preprint arXiv:1812.04608(2018).  Robert\u00a0R. Hoffman Shane\u00a0T. Mueller 2018. Metrics for explainable AI: Challenges and prospects. arXiv preprint arXiv:1812.04608(2018)."},{"key":"e_1_3_2_1_13_1","unstructured":"Weina Jin Sheelagh Carpendale 2019. Bridging AI Developers and End Users: an End-User-Centred Explainable AI Taxonomy and Visual Vocabularies.  Weina Jin Sheelagh Carpendale 2019. Bridging AI Developers and End Users: an End-User-Centred Explainable AI Taxonomy and Visual Vocabularies."},{"key":"e_1_3_2_1_14_1","volume-title":"EUCA: A Practical Prototyping Framework towards End-User-Centered Explainable Artificial Intelligence. CoRR abs\/2102.02437(2021). arXiv:2102.02437https:\/\/arxiv.org\/abs\/2102.02437","author":"Jin Weina","year":"2021","unstructured":"Weina Jin , Jianyu Fan , 2021 . EUCA: A Practical Prototyping Framework towards End-User-Centered Explainable Artificial Intelligence. CoRR abs\/2102.02437(2021). arXiv:2102.02437https:\/\/arxiv.org\/abs\/2102.02437 Weina Jin, Jianyu Fan, 2021. EUCA: A Practical Prototyping Framework towards End-User-Centered Explainable Artificial Intelligence. CoRR abs\/2102.02437(2021). arXiv:2102.02437https:\/\/arxiv.org\/abs\/2102.02437"},{"key":"e_1_3_2_1_15_1","volume-title":"Explanation in artificial intelligence: Insights from the social sciences. Artificial intelligence 267","author":"Miller Tim","year":"2019","unstructured":"Tim Miller . 2019. Explanation in artificial intelligence: Insights from the social sciences. Artificial intelligence 267 ( 2019 ), 1\u201338. Tim Miller. 2019. Explanation in artificial intelligence: Insights from the social sciences. Artificial intelligence 267 (2019), 1\u201338."},{"key":"e_1_3_2_1_16_1","volume-title":"Model Cards for Model Reporting. CoRR","author":"Mitchell Margaret","year":"2018","unstructured":"Margaret Mitchell , Simone Wu , 2018. Model Cards for Model Reporting. CoRR ( 2018 ). arXiv:1810.03993http:\/\/arxiv.org\/abs\/1810.03993 Margaret Mitchell, Simone Wu, 2018. Model Cards for Model Reporting. CoRR (2018). arXiv:1810.03993http:\/\/arxiv.org\/abs\/1810.03993"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/3287560.3287574"},{"key":"e_1_3_2_1_18_1","volume-title":"Scala for machine learning","author":"Nicolas R.","unstructured":"Patrick\u00a0 R. Nicolas . 2015. Scala for machine learning . Packt Publishing Ltd . Patrick\u00a0R. Nicolas. 2015. Scala for machine learning. Packt Publishing Ltd."},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"crossref","unstructured":"Marco\u00a0Tulio Ribeiro Sameer Singh and Carlos Guestrin. 2016. \u201dWhy Should I Trust You?\u201d: Explaining the Predictions of Any Classifier. arxiv:1602.04938\u00a0[cs.LG]  Marco\u00a0Tulio Ribeiro Sameer Singh and Carlos Guestrin. 2016. \u201dWhy Should I Trust You?\u201d: Explaining the Predictions of Any Classifier. arxiv:1602.04938\u00a0[cs.LG]","DOI":"10.18653\/v1\/N16-3020"},{"key":"e_1_3_2_1_20_1","volume-title":"Hands-On Explainable AI (XAI) with Python: Interpret, visualize, explain, and integrate reliable AI for fair, secure, and trustworthy AI apps","author":"Rothman Denis","unstructured":"Denis Rothman . 2020. Hands-On Explainable AI (XAI) with Python: Interpret, visualize, explain, and integrate reliable AI for fair, secure, and trustworthy AI apps . Packt Publishing Ltd . Denis Rothman. 2020. Hands-On Explainable AI (XAI) with Python: Interpret, visualize, explain, and integrate reliable AI for fair, secure, and trustworthy AI apps. Packt Publishing Ltd."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"crossref","unstructured":"Cynthia Rudin. 2019. Stop Explaining Black Box Machine Learning Models for High Stakes Decisions and Use Interpretable Models Instead. arXiv:1811.10154\u00a0[stat.ML]  Cynthia Rudin. 2019. Stop Explaining Black Box Machine Learning Models for High Stakes Decisions and Use Interpretable Models Instead. arXiv:1811.10154\u00a0[stat.ML]","DOI":"10.1038\/s42256-019-0048-x"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijhcs.2021.102684"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"crossref","unstructured":"Ben Shneiderman. 2020. Human-centered artificial intelligence: Reliable safe & trustworthy. International Journal of Human\u2013Computer Interaction 36 6(2020) 495\u2013504.  Ben Shneiderman. 2020. Human-centered artificial intelligence: Reliable safe & trustworthy. International Journal of Human\u2013Computer Interaction 36 6(2020) 495\u2013504.","DOI":"10.1080\/10447318.2020.1741118"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/2815833.2815844"},{"key":"e_1_3_2_1_25_1","volume-title":"Recommender systems handbook","author":"Tintarev Nava","unstructured":"Nava Tintarev and Judith Masthoff . 2011. Designing and evaluating explanations for recommender systems . In Recommender systems handbook . Springer , 479\u2013510. Nava Tintarev and Judith Masthoff. 2011. Designing and evaluating explanations for recommender systems. In Recommender systems handbook. Springer, 479\u2013510."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2020.103404"},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijhcs.2020.102493"},{"key":"e_1_3_2_1_28_1","volume-title":"Evaluating Fidelity of Explainable Methods for Predictive Process Analytics. In International Conference on Advanced Information Systems Engineering. Springer, 64\u201372","author":"Velmurugan Mythreyi","year":"2021","unstructured":"Mythreyi Velmurugan and Chun\u00a0others Ouyang . 2021 . Evaluating Fidelity of Explainable Methods for Predictive Process Analytics. In International Conference on Advanced Information Systems Engineering. Springer, 64\u201372 . Mythreyi Velmurugan and Chun\u00a0others Ouyang. 2021. Evaluating Fidelity of Explainable Methods for Predictive Process Analytics. In International Conference on Advanced Information Systems Engineering. Springer, 64\u201372."},{"key":"e_1_3_2_1_29_1","first-page":"841","article-title":"Counterfactual explanations without opening the black box: Automated decisions and the GDPR","volume":"31","author":"Wachter Sandra","year":"2017","unstructured":"Sandra Wachter , Brent Mittelstadt , and Chris Russell . 2017 . Counterfactual explanations without opening the black box: Automated decisions and the GDPR . Harv. JL & Tech. 31 (2017), 841 . Sandra Wachter, Brent Mittelstadt, and Chris Russell. 2017. Counterfactual explanations without opening the black box: Automated decisions and the GDPR. Harv. JL & Tech. 31(2017), 841.","journal-title":"Harv. JL & Tech."},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1038\/sdata.2016.18"},{"key":"e_1_3_2_1_31_1","volume-title":"International conference on machine learning. PMLR","author":"Xu Kelvin","year":"2015","unstructured":"Kelvin Xu , Jimmy Ba , 2015 . Show, attend and tell: Neural image caption generation with visual attention . In International conference on machine learning. PMLR , 2048\u20132057. Kelvin Xu, Jimmy Ba, 2015. Show, attend and tell: Neural image caption generation with visual attention. In International conference on machine learning. PMLR, 2048\u20132057."},{"key":"e_1_3_2_1_32_1","unstructured":"Rex Ying Dylan Bourgeois 2019. Gnn explainer: A tool for post-hoc explanation of graph neural networks. arXiv preprint arXiv:1903.03894(2019).  Rex Ying Dylan Bourgeois 2019. Gnn explainer: A tool for post-hoc explanation of graph neural networks. arXiv preprint arXiv:1903.03894(2019)."}],"event":{"name":"PETRA '22: The15th International Conference on PErvasive Technologies Related to Assistive Environments","location":"Corfu Greece","acronym":"PETRA '22"},"container-title":["Proceedings of the 15th International Conference on PErvasive Technologies Related to Assistive Environments"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3529190.3535693","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3529190.3535693","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T19:02:20Z","timestamp":1750186940000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3529190.3535693"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,29]]},"references-count":32,"alternative-id":["10.1145\/3529190.3535693","10.1145\/3529190"],"URL":"https:\/\/doi.org\/10.1145\/3529190.3535693","relation":{},"subject":[],"published":{"date-parts":[[2022,6,29]]},"assertion":[{"value":"2022-07-11","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}