{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,30]],"date-time":"2026-04-30T17:08:23Z","timestamp":1777568903835,"version":"3.51.4"},"reference-count":55,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,6,1]],"date-time":"2022-06-01T00:00:00Z","timestamp":1654041600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"U.S. National Science Foundation","doi-asserted-by":"publisher","award":["IIS-1741536"],"award-info":[{"award-number":["IIS-1741536"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100012884","name":"Bosch Research","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100012884","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Visual. Comput. Graphics"],"published-print":{"date-parts":[[2022,6]]},"DOI":"10.1109\/tvcg.2022.3165347","type":"journal-article","created":{"date-parts":[[2022,4,7]],"date-time":"2022-04-07T19:26:00Z","timestamp":1649359560000},"page":"2326-2337","source":"Crossref","is-referenced-by-count":32,"title":["VAC-CNN: A Visual Analytics System for Comparative Studies of Deep Convolutional Neural Networks"],"prefix":"10.1109","volume":"28","author":[{"given":"Xiwei","family":"Xuan","sequence":"first","affiliation":[{"name":"University of California, Davis, USA"}]},{"given":"Xiaoyu","family":"Zhang","sequence":"additional","affiliation":[{"name":"University of California, Davis, USA"}]},{"given":"Oh-Hyun","family":"Kwon","sequence":"additional","affiliation":[{"name":"University of California, Davis, USA"}]},{"given":"Kwan-Liu","family":"Ma","sequence":"additional","affiliation":[{"name":"University of California, Davis, USA"}]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2019.2934659"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2017.2744718"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2831899"},{"key":"ref32","first-page":"818","article-title":"Visualizing and understanding convolutional networks","author":"zeiler","year":"2014","journal-title":"European Conference on Computer Vision"},{"key":"ref31","article-title":"Striving for simplicity: The all convolutional net","author":"springenberg","year":"2014","journal-title":"ArXiv Preprint"},{"key":"ref30","article-title":"Deep inside convolutional networks: Visualising image classification models and saliency maps","author":"simonyan","year":"2013","journal-title":"ArXiv Preprint"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2017.2744683"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2017.2744878"},{"key":"ref35","doi-asserted-by":"crossref","first-page":"91","DOI":"10.1109\/TVCG.2016.2598831","article-title":"Towards better analysis of deep convolutional neural networks","volume":"23","author":"liu","year":"2016","journal-title":"IEEE Transactions on Visualization and Computer Graphics"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2016.2598838"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.23915\/distill.00007"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3377325.3377514"},{"key":"ref29","article-title":"DeepDream-a code example for visualizing neural networks","volume":"2","author":"mordvintsev","year":"2015","journal-title":"Google research"},{"key":"ref2","first-page":"276","article-title":"Learning algorithms for classification: A comparison on handwritten digit recognition","volume":"261","author":"lecun","year":"1995","journal-title":"Neural Networks The Statistical Mechanics Perspective"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.3390\/electronics10202470"},{"key":"ref20","article-title":"Smooth Grad-CAM ++: An enhanced inference level visualization technique for deep convolutional neural network models","author":"omeiza","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/2858036.2858529"},{"key":"ref21","article-title":"Score-CAM: Improved visual explanations via score-weighted class activation mapping","author":"wang","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2018.2864499"},{"key":"ref23","article-title":"CNNComparator: Comparative analytics of convolutional neural networks","author":"zeng","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/MCG.2019.2919033"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/MCG.2019.2922592"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.265"},{"key":"ref51","article-title":"Diversity-generated image inpainting with style extraction","author":"cai","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref55","article-title":"Sanity checks for saliency maps","author":"adebayo","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/SITIS.2018.00057"},{"key":"ref53","first-page":"532","article-title":"Image clustering using color moments, histogram, edge and k-means clustering","volume":"2","author":"malakar","year":"2013","journal-title":"International Journal of Science and Research"},{"key":"ref52","first-page":"538","article-title":"Image similarity measure using color histogram, color coherence vector, and sobel method","volume":"2","author":"roy","year":"2013","journal-title":"International Journal of Science and Research (IJSR)"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICSC.2017.61"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2016.12.038"},{"key":"ref40","author":"schubert","year":"2020","journal-title":"Openai microscope"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.29007\/qsxc"},{"key":"ref13","article-title":"An analysis of deep neural network models for practical applications","author":"canziani","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3338472.3338480"},{"key":"ref15","article-title":"Explainable artificial intelligence (xai)","volume":"2","author":"gunning","year":"2017","journal-title":"Defense Advanced Research Projects Agency (DARPA)"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.319"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.371"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/WACV.2018.00097"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3065386"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref5","article-title":"SqueezeNet: Alexnet-level accuracy with 50x fewer parameters and <0.5 mb model size","author":"iandola","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00716"},{"key":"ref7","article-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications","author":"howard","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref49","article-title":"ImageNet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness","author":"geirhos","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref9","article-title":"Competitive inner-imaging squeeze and excitation for residual network","author":"hu","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/MCSE.2007.55"},{"key":"ref45","first-page":"2579","article-title":"Visualizing data using t-sne","volume":"9","author":"maaten","year":"2008","journal-title":"Journal of Machine Learning Research"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pcbi.1006613"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.21105\/joss.03021"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1016\/j.visinf.2020.04.005"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2020.3028888"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3084827"}],"container-title":["IEEE Transactions on Visualization and Computer Graphics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/2945\/9766250\/09751204.pdf?arnumber=9751204","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,4]],"date-time":"2025-04-04T19:16:01Z","timestamp":1743794161000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9751204\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6]]},"references-count":55,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/tvcg.2022.3165347","relation":{},"ISSN":["1077-2626","1941-0506","2160-9306"],"issn-type":[{"value":"1077-2626","type":"print"},{"value":"1941-0506","type":"electronic"},{"value":"2160-9306","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,6]]}}}