{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T16:17:00Z","timestamp":1773332220017,"version":"3.50.1"},"reference-count":25,"publisher":"MDPI AG","issue":"20","license":[{"start":{"date-parts":[[2023,10,10]],"date-time":"2023-10-10T00:00:00Z","timestamp":1696896000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["52001168"],"award-info":[{"award-number":["52001168"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["11704200"],"award-info":[{"award-number":["11704200"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["2022M710668"],"award-info":[{"award-number":["2022M710668"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["2242020k30039"],"award-info":[{"award-number":["2242020k30039"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["202210298021Z"],"award-info":[{"award-number":["202210298021Z"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["NY217130"],"award-info":[{"award-number":["NY217130"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Postdoctoral Science Foundation of China","award":["52001168"],"award-info":[{"award-number":["52001168"]}]},{"name":"Postdoctoral Science Foundation of China","award":["11704200"],"award-info":[{"award-number":["11704200"]}]},{"name":"Postdoctoral Science Foundation of China","award":["2022M710668"],"award-info":[{"award-number":["2022M710668"]}]},{"name":"Postdoctoral Science Foundation of China","award":["2242020k30039"],"award-info":[{"award-number":["2242020k30039"]}]},{"name":"Postdoctoral Science Foundation of China","award":["202210298021Z"],"award-info":[{"award-number":["202210298021Z"]}]},{"name":"Postdoctoral Science Foundation of China","award":["NY217130"],"award-info":[{"award-number":["NY217130"]}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["52001168"],"award-info":[{"award-number":["52001168"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["11704200"],"award-info":[{"award-number":["11704200"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["2022M710668"],"award-info":[{"award-number":["2022M710668"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["2242020k30039"],"award-info":[{"award-number":["2242020k30039"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["202210298021Z"],"award-info":[{"award-number":["202210298021Z"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["NY217130"],"award-info":[{"award-number":["NY217130"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Open Research Fund of key Laboratory of MEMS of Ministry of Education, Southeast University","award":["52001168"],"award-info":[{"award-number":["52001168"]}]},{"name":"Open Research Fund of key Laboratory of MEMS of Ministry of Education, Southeast University","award":["11704200"],"award-info":[{"award-number":["11704200"]}]},{"name":"Open Research Fund of key Laboratory of MEMS of Ministry of Education, Southeast University","award":["2022M710668"],"award-info":[{"award-number":["2022M710668"]}]},{"name":"Open Research Fund of key Laboratory of MEMS of Ministry of Education, Southeast University","award":["2242020k30039"],"award-info":[{"award-number":["2242020k30039"]}]},{"name":"Open Research Fund of key Laboratory of MEMS of Ministry of Education, Southeast University","award":["202210298021Z"],"award-info":[{"award-number":["202210298021Z"]}]},{"name":"Open Research Fund of key Laboratory of MEMS of Ministry of Education, Southeast University","award":["NY217130"],"award-info":[{"award-number":["NY217130"]}]},{"name":"Nanjing Forestry University College Student Innovation Training Program","award":["52001168"],"award-info":[{"award-number":["52001168"]}]},{"name":"Nanjing Forestry University College Student Innovation Training Program","award":["11704200"],"award-info":[{"award-number":["11704200"]}]},{"name":"Nanjing Forestry University College Student Innovation Training Program","award":["2022M710668"],"award-info":[{"award-number":["2022M710668"]}]},{"name":"Nanjing Forestry University College Student Innovation Training Program","award":["2242020k30039"],"award-info":[{"award-number":["2242020k30039"]}]},{"name":"Nanjing Forestry University College Student Innovation Training Program","award":["202210298021Z"],"award-info":[{"award-number":["202210298021Z"]}]},{"name":"Nanjing Forestry University College Student Innovation Training Program","award":["NY217130"],"award-info":[{"award-number":["NY217130"]}]},{"name":"NUPTSF","award":["52001168"],"award-info":[{"award-number":["52001168"]}]},{"name":"NUPTSF","award":["11704200"],"award-info":[{"award-number":["11704200"]}]},{"name":"NUPTSF","award":["2022M710668"],"award-info":[{"award-number":["2022M710668"]}]},{"name":"NUPTSF","award":["2242020k30039"],"award-info":[{"award-number":["2242020k30039"]}]},{"name":"NUPTSF","award":["202210298021Z"],"award-info":[{"award-number":["202210298021Z"]}]},{"name":"NUPTSF","award":["NY217130"],"award-info":[{"award-number":["NY217130"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["Sensors"],"abstract":"<jats:p>Regarding the interpretable techniques in the field of image recognition, Grad-CAM is widely used for feature localization in images to reflect the logical decision-making information behind the neural network due to its high applicability. However, extensive experimentation on a customized dataset revealed that the deep convolutional neural network (CNN) model based on Gradient-weighted Class Activation Mapping (Grad-CAM) technology cannot effectively resist the interference of large-scale noise. In this article, an optimization of the deep CNN model was proposed by incorporating the Dropkey and Dropout (as a comparison) algorithm. Compared with Grad-CAM, the improved Grad-CAM based on Dropkey applies an attention mechanism to the feature map before calculating the gradient, which can introduce randomness and eliminate some areas by applying a mask to the attention score. Experimental results show that the optimized Grad-CAM deep CNN model based on the Dropkey algorithm can effectively resist large-scale noise interference and achieve accurate localization of image features. For instance, under the interference of a noise variance of 0.6, the Dropkey-enhanced ResNet50 model achieves a confidence level of 0.878 in predicting results, while the other two models exhibit confidence levels of 0.766 and 0.481, respectively. Moreover, it exhibits excellent performance in visualizing tasks related to image features such as distortion, low contrast, and small object characteristics. Furthermore, it has promising prospects in practical computer vision applications. For instance, in the field of autonomous driving, it can assist in verifying whether deep learning models accurately understand and process crucial objects, road signs, pedestrians, or other elements in the environment.<\/jats:p>","DOI":"10.3390\/s23208351","type":"journal-article","created":{"date-parts":[[2023,10,10]],"date-time":"2023-10-10T10:23:42Z","timestamp":1696933422000},"page":"8351","update-policy":"https:\/\/doi.org\/10.3390\/mdpi_crossmark_policy","source":"Crossref","is-referenced-by-count":16,"title":["Optimized Dropkey-Based Grad-CAM: Toward Accurate Image Feature Localization"],"prefix":"10.3390","volume":"23","author":[{"given":"Yiwei","family":"Liu","sequence":"first","affiliation":[{"name":"College of Mechanical and Electrical Engineering, Nanjing Forestry University, Nanjing 210037, China"}]},{"given":"Luping","family":"Tang","sequence":"additional","affiliation":[{"name":"College of Mechanical and Electrical Engineering, Nanjing Forestry University, Nanjing 210037, China"},{"name":"SEU-FEI Nano-Pico Center, Key Lab of MEMS of Ministry of Education, Southeast University, Nanjing 210096, China"}]},{"given":"Chen","family":"Liao","sequence":"additional","affiliation":[{"name":"College of Electronic and Optical Engineering & College of Flexible Electronics (Future Technology), Nanjing University of Posts and Telecommunications, Nanjing 210023, China"}]},{"given":"Chun","family":"Zhang","sequence":"additional","affiliation":[{"name":"College of Mechanical and Electrical Engineering, Nanjing Forestry University, Nanjing 210037, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1667-2448","authenticated-orcid":false,"given":"Yingqing","family":"Guo","sequence":"additional","affiliation":[{"name":"College of Mechanical and Electrical Engineering, Nanjing Forestry University, Nanjing 210037, China"}]},{"given":"Yixuan","family":"Xia","sequence":"additional","affiliation":[{"name":"College of Mechanical and Electrical Engineering, Nanjing Forestry University, Nanjing 210037, China"}]},{"given":"Yangyang","family":"Zhang","sequence":"additional","affiliation":[{"name":"College of Mechanical and Electrical Engineering, Nanjing Forestry University, Nanjing 210037, China"}]},{"given":"Sisi","family":"Yao","sequence":"additional","affiliation":[{"name":"College of Mechanical and Electrical Engineering, Nanjing Forestry University, Nanjing 210037, China"}]}],"member":"1968","published-online":{"date-parts":[[2023,10,10]]},"reference":[{"key":"ref_1","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., and Girshick, R. (2017, January 22\u201329). Mask r-cnn. Proceedings of the IEEE International Conference on Computer Vision (ICCV), Venice, Italy.","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref_2","unstructured":"Ren, S., He, K., Girshick, R., and Sun, J. (2015, January 7\u201312). Faster R-CNN: Towards real-time object detection with region proposal networks. Proceedings of the Advances in Neural Information Processing Systems 28 (NIPS 2015), Montreal, QC, Canada."},{"key":"ref_3","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., and Sun, J. (2016, January 11\u201314). Identity Mappings in Deep Residual Networks. Proceedings of the Computer Vision\u2014ECCV 2016\u201414th European Conference, Amsterdam, The Netherlands.","DOI":"10.1007\/978-3-319-46493-0_38"},{"key":"ref_4","doi-asserted-by":"crossref","first-page":"84","DOI":"10.1145\/3065386","article-title":"Imagenet classification with deep convolutional neural networks","volume":"60","author":"Krizhevsky","year":"2017","journal-title":"Commun. ACM"},{"key":"ref_5","doi-asserted-by":"crossref","unstructured":"Dong, Y., Su, H., Wu, B., Li, Z., Liu, W., Zhang, T., and Zhu, J. (2019, January 15\u201320). Efficient Decision-Based Black-Box Adversarial Attacks on Face Recognition. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Long Beach, CA, USA.","DOI":"10.1109\/CVPR.2019.00790"},{"key":"ref_6","doi-asserted-by":"crossref","first-page":"828","DOI":"10.1109\/TEVC.2019.2890858","article-title":"One Pixel Attack for Fooling Deep Neural Networks","volume":"23","author":"Su","year":"2019","journal-title":"IEEE Trans. Evol. Comput."},{"key":"ref_7","doi-asserted-by":"crossref","first-page":"101762","DOI":"10.1109\/ACCESS.2022.3209821","article-title":"Time-Varying Heterogeneous Alternation Control for Synchronization of Delayed Neural Networks","volume":"10","author":"Cheng","year":"2022","journal-title":"IEEE Access"},{"key":"ref_8","unstructured":"Dabkowski, P., and Gal, Y. (2017, January 4\u20139). Real time image saliency for black box classifiers. Proceedings of the 2017 Conference on Neural Information Processing Systems\u2014NeurIPS, Long Beach, CA, USA."},{"key":"ref_9","unstructured":"Petsiuk, V., Das, A., and Saenko, K. (2018). Rise: Randomized input sampling for explanation of black-box models. arXiv."},{"key":"ref_10","doi-asserted-by":"crossref","unstructured":"Wagner, J., Kohler, J.M., Gindele, T., Hetzel, L., Wiedemer, J.T., and Behnke, S. (2019, January 15\u201320). Interpretable and Fine-Grained Visual Explanations for Convolutional Neural Networks. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition\u2014CVPR, Long Beach, CA, USA.","DOI":"10.1109\/CVPR.2019.00931"},{"key":"ref_11","unstructured":"Adebayo, J., Gilmer, J., Goodfellow, I., and Kim, B. (2018). Local explanation methods for deep neural networks lack sensitivity to parameter values. arXiv."},{"key":"ref_12","unstructured":"Omeiza, D., Speakman, S., Cintas, C., and Weldermariam, K. (2019). Smooth Grad-CAM++: An Enhanced Inference Level Visualization Technique for Deep Convolutional Neural Network Models. arXiv."},{"key":"ref_13","doi-asserted-by":"crossref","unstructured":"Zhou, B., Khosla, A., Lapedriza, A., Oliva, A., and Torralba, A. (2016, January 27\u201330). Learning deep features for discriminative localization. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Las Vegas, NV, USA.","DOI":"10.1109\/CVPR.2016.319"},{"key":"ref_14","doi-asserted-by":"crossref","unstructured":"Chattopadhay, A., Sarkar, A., Howlader, P., and Balasubramanian, V.N. (2018, January 12\u201315). Grad-cam++: Generalized gradient-based visual explanations for deep convolutional networks. Proceedings of the 2018 IEEE Winter Conference on Applications of Computer Vision (WACV), Lake Tahoe, NV, USA.","DOI":"10.1109\/WACV.2018.00097"},{"key":"ref_15","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., and Batra, D. (2017, January 22\u201329). Grad-CAM: Visual explanations from deep networks via gradient-based localization. Proceedings of the IEEE International Conference on Computer Vision (ICCV), Venice, Italy.","DOI":"10.1109\/ICCV.2017.74"},{"key":"ref_16","unstructured":"Smilkov, D., Thorat, N., Kim, B., Vi\u00e9gas, F., and Wattenberg, M. (2017). SmoothGrad: Removing noise by adding noise. arXiv."},{"key":"ref_17","doi-asserted-by":"crossref","unstructured":"Wang, H., Wang, Z., Du, M., Yang, F., Zhang, Z., Ding, S., Mardziel, P., and Hu, X. (2020, January 14\u201319). Score-CAM: Score-Weighted Visual Explanations for Convolutional Neural Networks. Proceedings of the Computer Vision and Pattern Recognition (CVPR), Seattle, WA, USA.","DOI":"10.1109\/CVPRW50498.2020.00020"},{"key":"ref_18","doi-asserted-by":"crossref","unstructured":"Cubuk, E.D., Zoph, B., Mane, D., Vasudevan, V., and Le, Q.V. (2018). AutoAugment: Learning Augmentation Policies from Data. arXiv.","DOI":"10.1109\/CVPR.2019.00020"},{"key":"ref_19","unstructured":"Kurakin, A., Goodfellow, I.J., and Bengio, S. (2016). Adversarial Examples in the Physical World. arXiv."},{"key":"ref_20","doi-asserted-by":"crossref","unstructured":"Zhang, H., Cisse, M., Dauphin, Y.N., and Lopez-Paz, D. (2017). mixup: Beyond Empirical Risk Minimization. arXiv.","DOI":"10.1007\/978-1-4899-7687-1_79"},{"key":"ref_21","doi-asserted-by":"crossref","unstructured":"Fong, R.C., and Vedaldi, A. (2017, January 22\u201329). Interpretable Explanations of Black Boxes by Meaningful Perturbation. Proceedings of the IEEE International Conference on Computer Vision, Venice, Italy.","DOI":"10.1109\/ICCV.2017.371"},{"key":"ref_22","unstructured":"Li, B., Hu, Y., Nie, X., Han, C., Jiang, X., Guo, T., and Liu, L. (2022). Dropkey. arXiv."},{"key":"ref_23","unstructured":"Goodfellow, I., Bengio, Y., and Courville, A. (2016). Deep Learning, MIT Press."},{"key":"ref_24","unstructured":"Hinton, G.E., Srivastava, N., Krizhevsky, A., Sutskever, I., and Salakhutdinov, R.R. (2012). Improving neural networks by preventing co-adaptation of feature detectors. arXiv."},{"key":"ref_25","unstructured":"Lin, Z., Liu, P., Huang, L., Chen, J., Qiu, X., and Huang, X. (2019). Dropattention: A regularization method for fully-connected self-attention networks. arXiv."}],"container-title":["Sensors"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.mdpi.com\/1424-8220\/23\/20\/8351\/pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T21:03:59Z","timestamp":1760130239000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.mdpi.com\/1424-8220\/23\/20\/8351"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,10]]},"references-count":25,"journal-issue":{"issue":"20","published-online":{"date-parts":[[2023,10]]}},"alternative-id":["s23208351"],"URL":"https:\/\/doi.org\/10.3390\/s23208351","relation":{},"ISSN":["1424-8220"],"issn-type":[{"value":"1424-8220","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,10,10]]}}}