{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T05:29:52Z","timestamp":1730266192546,"version":"3.28.0"},"reference-count":41,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,7,18]],"date-time":"2021-07-18T00:00:00Z","timestamp":1626566400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,7,18]],"date-time":"2021-07-18T00:00:00Z","timestamp":1626566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,7,18]],"date-time":"2021-07-18T00:00:00Z","timestamp":1626566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,7,18]]},"DOI":"10.1109\/ijcnn52387.2021.9533809","type":"proceedings-article","created":{"date-parts":[[2021,9,20]],"date-time":"2021-09-20T21:27:41Z","timestamp":1632173261000},"page":"1-8","source":"Crossref","is-referenced-by-count":2,"title":["Semantics for Global and Local Interpretation of Deep Convolutional Neural Networks"],"prefix":"10.1109","author":[{"given":"Jindong","family":"Gu","sequence":"first","affiliation":[]},{"given":"Rui","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Volker","family":"Tresp","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_33"},{"key":"ref38","article-title":"A theoretical explanation for perplexing behaviors of backpropagation-based visualizations","author":"nie","year":"2018","journal-title":"2018 Workshop on Human Interpretability in Machine Learning (WHI)"},{"journal-title":"Understanding intra-class knowledge inside cnn","year":"2015","author":"wei","key":"ref33"},{"journal-title":"Multifaceted feature visualization Uncovering the different types of features learned by each neuron in deep neural networks","year":"2016","author":"nguyen","key":"ref32"},{"journal-title":"Visualizing Higher-layer Features of Deep Networks","year":"2009","author":"erhan","key":"ref31"},{"key":"ref30","first-page":"2579","article-title":"Visualizing data using t-sne","volume":"9","author":"maaten","year":"2008","journal-title":"Journal of Machine Learning Research"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.338"},{"key":"ref36","article-title":"Towards better understanding of gradient-based attribution methods for deep neural networks","author":"ancona","year":"2018","journal-title":"ICLRE"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1016\/j.visres.2008.07.012"},{"key":"ref34","first-page":"2673","article-title":"Interpretability beyond feature attribution: Quantitative testing with concept activation vectors (tcav)","author":"kim","year":"2018","journal-title":"ICML"},{"key":"ref10","first-page":"2376","article-title":"Coun-terfactual visual explanations","author":"goyal","year":"2019","journal-title":"International Conference on Machine Learning"},{"journal-title":"SmoothGrad Removing noise by adding noise","year":"2017","author":"smilkov","key":"ref40"},{"key":"ref11","first-page":"818","article-title":"Visualizing and understanding convolutional networks","author":"zeiler","year":"2014","journal-title":"ECCV"},{"journal-title":"Object detectors emerge in deep scene cnns","year":"2014","author":"zhou","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.354"},{"key":"ref14","article-title":"Visualizing deep neural network decisions: Prediction difference analysis","author":"zintgraf","year":"2017","journal-title":"ICLRE"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.319"},{"key":"ref16","article-title":"Learning important features through propagating activation differences","author":"shrikumar","year":"2017","journal-title":"ICML"},{"key":"ref17","first-page":"9505","article-title":"Sanity checks for saliency maps","author":"adebayo","year":"2018","journal-title":"NeurIPS"},{"key":"ref18","article-title":"Evaluating feature importance estimates","author":"hooker","year":"2018","journal-title":"2018 Workshop on Human Interpretability in Machine Learning (WHI)"},{"journal-title":"The (un) reliability of saliency methods","year":"2017","author":"kindermans","key":"ref19"},{"key":"ref28","article-title":"Intriguing properties of neural networks","author":"szegedy","year":"2014","journal-title":"ICLRE"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0130140"},{"key":"ref6","first-page":"119","article-title":"Understanding individual decisions of cnns via contrastive backpropagation","author":"gu","year":"2018","journal-title":"Asian Conference on Computer Vision"},{"key":"ref29","article-title":"Explaining and harnessing adversarial examples","author":"goodfellow","year":"2015","journal-title":"ICLRE"},{"key":"ref5","article-title":"Axiomatic attribution for deep networks","author":"sundararajan","year":"2017","journal-title":"ICML"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01216-8_17"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_1"},{"key":"ref2","article-title":"Striving for simplicity: The all convolutional net","author":"springenberg","year":"2014","journal-title":"ICLRE"},{"journal-title":"Explaining image classifiers by counterfactual generation","year":"2018","author":"chang","key":"ref9"},{"key":"ref1","article-title":"Deep inside convolutional networks: Visualising image classification models and saliency maps","author":"simonyan","year":"2013","journal-title":"ICLRE"},{"key":"ref20","article-title":"On the robustness of interpretability methods","author":"alvarez-melis","year":"2018","journal-title":"2018 Workshop on Human Interpretability in Machine Learning (WHI)"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10584-0_22"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46466-4_8"},{"key":"ref24","first-page":"647","article-title":"Decaf: A deep convolutional activation feature for generic visual recognition","author":"donahue","year":"2014","journal-title":"ICML"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/3065386"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.522"},{"key":"ref26","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"2015","journal-title":"ICLRE"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.222"}],"event":{"name":"2021 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2021,7,18]]},"location":"Shenzhen, China","end":{"date-parts":[[2021,7,22]]}},"container-title":["2021 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9533266\/9533267\/09533809.pdf?arnumber=9533809","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T15:46:17Z","timestamp":1652197577000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9533809\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,7,18]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/ijcnn52387.2021.9533809","relation":{},"subject":[],"published":{"date-parts":[[2021,7,18]]}}}