{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,25]],"date-time":"2025-03-25T15:22:31Z","timestamp":1742916151414,"version":"3.40.3"},"publisher-location":"Cham","reference-count":28,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031440663"},{"type":"electronic","value":"9783031440670"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-44067-0_19","type":"book-chapter","created":{"date-parts":[[2023,10,20]],"date-time":"2023-10-20T06:02:33Z","timestamp":1697781753000},"page":"361-381","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Quantifying the\u00a0Intrinsic Usefulness of\u00a0Attributional Explanations for\u00a0Graph Neural Networks with\u00a0Artificial Simulatability Studies"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9228-9395","authenticated-orcid":false,"given":"Jonas","family":"Teufel","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2205-6753","authenticated-orcid":false,"given":"Luca","family":"Torresi","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4465-1465","authenticated-orcid":false,"given":"Pascal","family":"Friederich","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,10,21]]},"reference":[{"key":"19_CR1","unstructured":"Adebayo, J., Gilmer, J., Muelly, M., Goodfellow, I., Hardt, M., Kim, B.: Sanity checks for saliency maps. In: Advances in Neural Information Processing Systems, vol. 31. Curran Associates, Inc. (2018). https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2018\/hash\/294a8ed24b1ad22ec2e7efea049b8737-Abstract.html"},{"key":"19_CR2","doi-asserted-by":"publisher","unstructured":"Arora, S., Pruthi, D., Sadeh, N., Cohen, W.W., Lipton, Z.C., Neubig, G.: Explain, edit, and understand: rethinking user study design for evaluating model explanations. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, no. 5, pp. 5277\u20135285 (2022). https:\/\/doi.org\/10.1609\/aaai.v36i5.20464. https:\/\/ojs.aaai.org\/index.php\/AAAI\/article\/view\/20464","DOI":"10.1609\/aaai.v36i5.20464"},{"key":"19_CR3","doi-asserted-by":"publisher","unstructured":"Boyd, A., Tinsley, P., Bowyer, K., Czajka, A.: CYBORG: blending human saliency into the loss improves deep learning (2022). https:\/\/doi.org\/10.48550\/arXiv.2112.00686. http:\/\/arxiv.org\/abs\/2112.00686. arXiv:2112.00686","DOI":"10.48550\/arXiv.2112.00686"},{"key":"19_CR4","doi-asserted-by":"publisher","unstructured":"Chandrasekaran, A., Prabhu, V., Yadav, D., Chattopadhyay, P., Parikh, D.: Do explanations make VQA models more predictable to a human? (2018). https:\/\/doi.org\/10.48550\/arXiv.1810.12366. http:\/\/arxiv.org\/abs\/1810.12366. arXiv:1810.12366","DOI":"10.48550\/arXiv.1810.12366"},{"key":"19_CR5","doi-asserted-by":"publisher","unstructured":"Dai, E., Wang, S.: Towards self-explainable graph neural network. In: Proceedings of the 30th ACM International Conference on Information & Knowledge Management, CIKM 2021, pp. 302\u2013311. Association for Computing Machinery, New York (2021). https:\/\/doi.org\/10.1145\/3459637.3482306","DOI":"10.1145\/3459637.3482306"},{"key":"19_CR6","unstructured":"Doshi-Velez, F., Kim, B.: Towards a rigorous science of interpretable machine learning. arXiv:1702.08608 (2017). http:\/\/arxiv.org\/abs\/1702.08608"},{"key":"19_CR7","doi-asserted-by":"publisher","unstructured":"Fernandes, P., Treviso, M., Pruthi, D., Martins, A.F.T., Neubig, G.: Learning to scaffold: optimizing model explanations for teaching (2022). https:\/\/doi.org\/10.48550\/arXiv.2204.10810. http:\/\/arxiv.org\/abs\/2204.10810. arXiv:2204.10810","DOI":"10.48550\/arXiv.2204.10810"},{"key":"19_CR8","doi-asserted-by":"publisher","unstructured":"Gao, Y., Sun, T., Bhatt, R., Yu, D., Hong, S., Zhao, L.: GNES: learning to explain graph neural networks. In: 2021 IEEE International Conference on Data Mining (ICDM), pp. 131\u2013140 (2021). https:\/\/doi.org\/10.1109\/ICDM51629.2021.00023. ISSN: 2374-8486","DOI":"10.1109\/ICDM51629.2021.00023"},{"issue":"9","key":"19_CR9","doi-asserted-by":"publisher","first-page":"2077","DOI":"10.1021\/ci900161g","volume":"49","author":"K Hansen","year":"2009","unstructured":"Hansen, K., et al.: Benchmark data set for in silico prediction of Ames mutagenicity. J. Chem. Inf. Model. 49(9), 2077\u20132081 (2009). https:\/\/doi.org\/10.1021\/ci900161g","journal-title":"J. Chem. Inf. Model."},{"key":"19_CR10","doi-asserted-by":"publisher","unstructured":"Hase, P., Bansal, M.: Evaluating explainable AI: which algorithmic explanations help users predict model behavior? (2020). https:\/\/doi.org\/10.48550\/arXiv.2005.01831. http:\/\/arxiv.org\/abs\/2005.01831. arXiv:2005.01831","DOI":"10.48550\/arXiv.2005.01831"},{"key":"19_CR11","doi-asserted-by":"publisher","unstructured":"Hase, P., Zhang, S., Xie, H., Bansal, M.: Leakage-adjusted simulatability: can models generate non-trivial explanations of their behavior in natural language? (2020). https:\/\/doi.org\/10.48550\/arXiv.2010.04119. http:\/\/arxiv.org\/abs\/2010.04119. arXiv:2010.04119","DOI":"10.48550\/arXiv.2010.04119"},{"key":"19_CR12","series-title":"Lecture Notes in Computer Science (Lecture Notes in Artificial Intelligence)","doi-asserted-by":"publisher","first-page":"267","DOI":"10.1007\/978-3-030-28954-6_14","volume-title":"Explainable AI: Interpreting, Explaining and Visualizing Deep Learning","author":"P-J Kindermans","year":"2019","unstructured":"Kindermans, P.-J., et al.: The (un)reliability of saliency methods. In: Samek, W., Montavon, G., Vedaldi, A., Hansen, L.K., M\u00fcller, K.-R. (eds.) Explainable AI: Interpreting, Explaining and Visualizing Deep Learning. LNCS (LNAI), vol. 11700, pp. 267\u2013280. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-28954-6_14"},{"key":"19_CR13","doi-asserted-by":"publisher","unstructured":"Lai, V., Liu, H., Tan, C.: \u201cWhy is \u2018Chicago\u2019 deceptive?\u201d Towards building model-driven tutorials for humans. In: Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems, CHI 2020, pp. 1\u201313. Association for Computing Machinery, New York (2020). https:\/\/doi.org\/10.1145\/3313831.3376873","DOI":"10.1145\/3313831.3376873"},{"key":"19_CR14","doi-asserted-by":"publisher","unstructured":"Lai, V., Tan, C.: On human predictions with explanations and predictions of machine learning models: a case study on deception detection. In: Proceedings of the Conference on Fairness, Accountability, and Transparency, FAT* 2019, pp. 29\u201338. Association for Computing Machinery, New York (2019). https:\/\/doi.org\/10.1145\/3287560.3287590. https:\/\/dl.acm.org\/doi\/10.1145\/3287560.3287590","DOI":"10.1145\/3287560.3287590"},{"key":"19_CR15","unstructured":"Lin, W., Lan, H., Li, B.: Generative causal explanations for graph neural networks. In: Proceedings of the 38th International Conference on Machine Learning, pp. 6666\u20136679. PMLR (2021). https:\/\/proceedings.mlr.press\/v139\/lin21d.html. ISSN: 2640-3498"},{"key":"19_CR16","unstructured":"Linsley, D., Shiebler, D., Eberhardt, S., Serre, T.: Learning what and where to attend (2019). https:\/\/openreview.net\/forum?id=BJgLg3R9KQ"},{"key":"19_CR17","unstructured":"Luo, D., et al.: Parameterized explainer for graph neural network. In: Advances in Neural Information Processing Systems, vol. 33, pp. 19620\u201319631. Curran Associates, Inc. (2020). https:\/\/proceedings.neurips.cc\/paper\/2020\/hash\/e37b08dd3015330dcbb5d6663667b8b8-Abstract.html"},{"key":"19_CR18","doi-asserted-by":"publisher","unstructured":"Magister, L.C., et al.: Encoding concepts in graph neural networks (2022). https:\/\/doi.org\/10.48550\/arXiv.2207.13586. http:\/\/arxiv.org\/abs\/2207.13586. arXiv:2207.13586","DOI":"10.48550\/arXiv.2207.13586"},{"key":"19_CR19","doi-asserted-by":"publisher","unstructured":"Magister, L.C., Kazhdan, D., Singh, V., Li\u00f2, P.: GCExplainer: human-in-the-loop concept-based explanations for graph neural networks (2021). https:\/\/doi.org\/10.48550\/arXiv.2107.11889. http:\/\/arxiv.org\/abs\/2107.11889. arXiv:2107.11889","DOI":"10.48550\/arXiv.2107.11889"},{"key":"19_CR20","doi-asserted-by":"publisher","unstructured":"Prado-Romero, M.A., Stilo, G.: GRETEL: graph counterfactual explanation evaluation framework. In: Proceedings of the 31st ACM International Conference on Information & Knowledge Management, CIKM 2022, pp. 4389\u20134393. Association for Computing Machinery, New York (2022). https:\/\/doi.org\/10.1145\/3511808.3557608. https:\/\/dl.acm.org\/doi\/10.1145\/3511808.3557608","DOI":"10.1145\/3511808.3557608"},{"key":"19_CR21","unstructured":"Pruthi, D., et al.: Evaluating explanations: how much do explanations from the teacher aid students? arXiv:2012.00893 (2021). http:\/\/arxiv.org\/abs\/2012.00893"},{"key":"19_CR22","doi-asserted-by":"publisher","unstructured":"Reiser, P., Eberhard, A., Friederich, P.: Graph neural networks in TensorFlow-Keras with RaggedTensor representation (KGCNN). Softw. Impacts 9, 100095 (2021). https:\/\/doi.org\/10.1016\/j.simpa.2021.100095. https:\/\/www.sciencedirect.com\/science\/article\/pii\/S266596382100035X","DOI":"10.1016\/j.simpa.2021.100095"},{"key":"19_CR23","doi-asserted-by":"publisher","unstructured":"Shin, Y.M., Kim, S.W., Shin, W.Y.: PAGE: prototype-based model-level explanations for graph neural networks (2022). https:\/\/doi.org\/10.48550\/arXiv.2210.17159. http:\/\/arxiv.org\/abs\/2210.17159. arXiv:2210.17159","DOI":"10.48550\/arXiv.2210.17159"},{"key":"19_CR24","doi-asserted-by":"publisher","unstructured":"Sorkun, M.C., Khetan, A., Er, S.: AqSolDB, a curated reference set of aqueous solubility and 2D descriptors for a diverse set of compounds. Sci. Data 6(1), 143 (2019). https:\/\/doi.org\/10.1038\/s41597-019-0151-1. https:\/\/www.nature.com\/articles\/s41597-019-0151-1","DOI":"10.1038\/s41597-019-0151-1"},{"key":"19_CR25","doi-asserted-by":"publisher","unstructured":"Tan, J., et al.: Learning and evaluating graph neural network explanations based on counterfactual and factual reasoning. In: Proceedings of the ACM Web Conference 2022, WWW 2022, pp. 1018\u20131027. Association for Computing Machinery, New York (2022). https:\/\/doi.org\/10.1145\/3485447.3511948","DOI":"10.1145\/3485447.3511948"},{"key":"19_CR26","doi-asserted-by":"publisher","unstructured":"Teufel, J., Torresi, L., Reiser, P., Friederich, P.: MEGAN: multi-explanation graph attention network (2022). https:\/\/doi.org\/10.48550\/arXiv.2211.13236. http:\/\/arxiv.org\/abs\/2211.13236. arXiv:2211.13236","DOI":"10.48550\/arXiv.2211.13236"},{"key":"19_CR27","doi-asserted-by":"publisher","unstructured":"Treviso, M.V., Martins, A.F.T.: The explanation game: towards prediction explainability through sparse communication (2020). https:\/\/doi.org\/10.48550\/arXiv.2004.13876. http:\/\/arxiv.org\/abs\/2004.13876. arXiv:2004.13876","DOI":"10.48550\/arXiv.2004.13876"},{"key":"19_CR28","doi-asserted-by":"publisher","unstructured":"Zhang, Z., Liu, Q., Wang, H., Lu, C., Lee, C.: ProtGNN: towards self-explaining graph neural networks. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, no. 8, pp. 9127\u20139135 (2022). https:\/\/doi.org\/10.1609\/aaai.v36i8.20898. https:\/\/ojs.aaai.org\/index.php\/AAAI\/article\/view\/20898","DOI":"10.1609\/aaai.v36i8.20898"}],"container-title":["Communications in Computer and Information Science","Explainable Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-44067-0_19","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,13]],"date-time":"2024-02-13T06:05:57Z","timestamp":1707804357000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-44067-0_19"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031440663","9783031440670"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-44067-0_19","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"21 October 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"xAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"World Conference on Explainable Artificial Intelligence","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Lisbon","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Portugal","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 July 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 July 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"xai2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/xaiworldconference.com\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"220","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"94","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"43% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}