{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T16:49:02Z","timestamp":1774716542041,"version":"3.50.1"},"reference-count":63,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2024,9,6]],"date-time":"2024-09-06T00:00:00Z","timestamp":1725580800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,6]],"date-time":"2024-09-06T00:00:00Z","timestamp":1725580800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62101481, 62002313, and 62261060"],"award-info":[{"award-number":["62101481, 62002313, and 62261060"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62101481, 62002313, and 62261060"],"award-info":[{"award-number":["62101481, 62002313, and 62261060"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62101481, 62002313, and 62261060"],"award-info":[{"award-number":["62101481, 62002313, and 62261060"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Major Scientific and Technological Project of Yunnan Province","award":["202202AD080002"],"award-info":[{"award-number":["202202AD080002"]}]},{"name":"Major Scientific and Technological Project of Yunnan Province","award":["202202AD080002"],"award-info":[{"award-number":["202202AD080002"]}]},{"name":"Major Scientific and Technological Project of Yunnan Province","award":["202202AD080002"],"award-info":[{"award-number":["202202AD080002"]}]},{"name":"Yunnan Province expert workstations under Grant","award":["202305AF150078"],"award-info":[{"award-number":["202305AF150078"]}]},{"name":"Yunnan Province expert workstations under Grant","award":["202305AF150078"],"award-info":[{"award-number":["202305AF150078"]}]},{"name":"Yunnan Province expert workstations under Grant","award":["202305AF150078"],"award-info":[{"award-number":["202305AF150078"]}]},{"name":"Basic Research Project of Yunnan Province","award":["202301AW070007, 202201AU070033, 202201AT070112, 202301AU070210, 202001BB050076, and 202005AC160007"],"award-info":[{"award-number":["202301AW070007, 202201AU070033, 202201AT070112, 202301AU070210, 202001BB050076, and 202005AC160007"]}]},{"name":"Basic Research Project of Yunnan Province","award":["202301AW070007, 202201AU070033, 202201AT070112, 202301AU070210, 202001BB050076, and 202005AC160007"],"award-info":[{"award-number":["202301AW070007, 202201AU070033, 202201AT070112, 202301AU070210, 202001BB050076, and 202005AC160007"]}]},{"name":"Basic Research Project of Yunnan Province","award":["202301AW070007, 202201AU070033, 202201AT070112, 202301AU070210, 202001BB050076, and 202005AC160007"],"award-info":[{"award-number":["202301AW070007, 202201AU070033, 202201AT070112, 202301AU070210, 202001BB050076, and 202005AC160007"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2025,3]]},"DOI":"10.1007\/s00371-024-03605-x","type":"journal-article","created":{"date-parts":[[2024,9,6]],"date-time":"2024-09-06T20:19:14Z","timestamp":1725653954000},"page":"3329-3344","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Crafting imperceptible and transferable adversarial examples: leveraging conditional residual generator and wavelet transforms to deceive deepfake detection"],"prefix":"10.1007","volume":"41","author":[{"given":"Zhiyuan","family":"Li","sequence":"first","affiliation":[]},{"given":"Xin","family":"Jin","sequence":"additional","affiliation":[]},{"given":"Qian","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Puming","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Shin-Jye","family":"Lee","sequence":"additional","affiliation":[]},{"given":"Shaowen","family":"Yao","sequence":"additional","affiliation":[]},{"given":"Wei","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,6]]},"reference":[{"issue":"8","key":"3605_CR1","doi-asserted-by":"publisher","first-page":"2546","DOI":"10.1109\/TVCG.2019.2894627","volume":"26","author":"B Zhang","year":"2019","unstructured":"Zhang, B., Sheng, B., Li, P., Lee, T.-Y.: Depth of field rendering using multilayer-neighborhood optimization. IEEE Trans. Visual Comput. Graph. 26(8), 2546\u20132559 (2019)","journal-title":"IEEE Trans. Visual Comput. Graph."},{"key":"3605_CR2","doi-asserted-by":"crossref","unstructured":"Cheng, Z., Yang, Q., Sheng, B.: Deep colorization. In: Proceedings of the IEEE international conference on computer vision, pp. 415\u2013423 (2015)","DOI":"10.1109\/ICCV.2015.55"},{"key":"3605_CR3","doi-asserted-by":"crossref","unstructured":"Qin, Y., Zhao, N., Yang, J., Pan, S., Sheng, B., Lau, R.W.: UrbanEvolver: function-aware urban layout regeneration. Int. J. Comput. Vis. 1\u201320 (2024)","DOI":"10.1007\/s11263-024-02030-w"},{"issue":"5","key":"3605_CR4","doi-asserted-by":"publisher","first-page":"2735","DOI":"10.1109\/TCYB.2019.2934823","volume":"51","author":"H Guo","year":"2019","unstructured":"Guo, H., Sheng, B., Li, P., Chen, C.P.: Multiview high dynamic range image synthesis using fuzzy broad learning system. IEEE Trans. Cybern. 51(5), 2735\u20132747 (2019)","journal-title":"IEEE Trans. Cybern."},{"key":"3605_CR5","doi-asserted-by":"publisher","first-page":"2226","DOI":"10.1109\/TMM.2022.3144890","volume":"25","author":"N Jiang","year":"2022","unstructured":"Jiang, N., Sheng, B., Li, P., Lee, T.Y.: Photohelper: portrait photographing guidance via deep feature retrieval and fusion. IEEE Trans. Multimed. 25, 2226\u20132238 (2022)","journal-title":"IEEE Trans. Multimed."},{"key":"3605_CR6","doi-asserted-by":"publisher","first-page":"7192","DOI":"10.1109\/TIP.2020.2999854","volume":"29","author":"A Nazir","year":"2020","unstructured":"Nazir, A., Cheema, M.N., Sheng, B., Li, H., Li, P., Yang, P., Jung, Y., Qin, J., Kim, J., Feng, D.D.: OFF-eNet: an optimally fused fully end-to-end network for automatic dense volumetric 3D intracranial blood vessels segmentation. IEEE Trans. Image Process. 29, 7192\u20137202 (2020)","journal-title":"IEEE Trans. Image Process."},{"issue":"5","key":"3605_CR7","doi-asserted-by":"publisher","first-page":"910","DOI":"10.1109\/JSTSP.2020.3002101","volume":"14","author":"L Verdoliva","year":"2020","unstructured":"Verdoliva, L.: Media forensics and deepfakes: an overview. IEEE J. Sel. Top. Signal Process. 14(5), 910\u2013932 (2020)","journal-title":"IEEE J. Sel. Top. Signal Process."},{"key":"3605_CR8","doi-asserted-by":"crossref","unstructured":"Kurakin, A., Goodfellow, I.J., Bengio, S.: Adversarial examples in the physical world. In: Artificial intelligence safety and security, pp. 99\u2013112. Chapman and Hall\/CRC (2018)","DOI":"10.1201\/9781351251389-8"},{"key":"3605_CR9","doi-asserted-by":"crossref","unstructured":"Long, Y., Zhang, Q., Zeng, B., Gao, L., Liu, X., Zhang, J., Song, J.: Frequency domain model augmentation for adversarial attack. In: European conference on computer vision, pp. 549\u2013566. Springer (2022)","DOI":"10.1007\/978-3-031-19772-7_32"},{"key":"3605_CR10","doi-asserted-by":"crossref","unstructured":"Afchar, D., Nozick, V., Yamagishi, J., Echizen, I.: MesoNet: a compact facial video forgery detection network. In: 2018 IEEE international workshop on information forensics and security (WIFS) (2018)","DOI":"10.1109\/WIFS.2018.8630761"},{"key":"3605_CR11","doi-asserted-by":"crossref","unstructured":"Rossler, A., Cozzolino, D., Verdoliva, L., Riess, C., Thies, J., Nie\u00dfner, M.: FaceForensics++: learning to detect manipulated facial images. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp. 1\u201311 (2019)","DOI":"10.1109\/ICCV.2019.00009"},{"key":"3605_CR12","doi-asserted-by":"crossref","unstructured":"Qian, Y., Yin, G., Sheng, L., Chen, Z., Shao, J.: Thinking in frequency: face forgery detection by mining frequency-aware clues. In: European conference on computer vision, pp. 86\u2013103. Springer (2020)","DOI":"10.1007\/978-3-030-58610-2_6"},{"issue":"7","key":"3605_CR13","doi-asserted-by":"publisher","first-page":"6662","DOI":"10.1109\/TCYB.2021.3079311","volume":"52","author":"B Sheng","year":"2021","unstructured":"Sheng, B., Li, P., Ali, R., Chen, C.L.P.: Improving video temporal consistency via broad learning system. IEEE Trans. Cybern. 52(7), 6662\u20136675 (2021)","journal-title":"IEEE Trans. Cybern."},{"key":"3605_CR14","unstructured":"Szegedy, C., Zaremba, W., Sutskever, I., Bruna, J., Erhan, D., Goodfellow, I., Fergus, R.: Intriguing properties of neural networks. In: Computer vision and pattern recognition. arXiv:1312.6199 (2013)"},{"key":"3605_CR15","unstructured":"Goodfellow, I.J., Shlens, J., Szegedy, C.: Explaining and harnessing adversarial examples. Cornell University, arXiv:1412.6572 (2014)"},{"key":"3605_CR16","doi-asserted-by":"crossref","unstructured":"Carlini, N., Farid, H.: Evading deepfake-image detectors with white-and black-box attacks. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition workshops (CVPRW) (2020)","DOI":"10.1109\/CVPRW50498.2020.00337"},{"key":"3605_CR17","doi-asserted-by":"crossref","unstructured":"Hussain, S., Neekhara, P., Jere, M., Koushanfar, F., McAuley, J.: Adversarial deepfakes: evaluating vulnerability of deepfake detectors to adversarial examples. In: 2021 IEEE winter conference on applications of computer vision (WACV) (2021)","DOI":"10.1109\/WACV48630.2021.00339"},{"key":"3605_CR18","doi-asserted-by":"crossref","unstructured":"Gandhi, A., Jain, S.: Adversarial perturbations fool deepfake detectors. In: 2020 international joint conference on neural networks (IJCNN), pp. 1\u20138. IEEE (2020)","DOI":"10.1109\/IJCNN48605.2020.9207034"},{"key":"3605_CR19","doi-asserted-by":"crossref","unstructured":"Li, D., Wang, W., Fan, H., Dong, J.: Exploring adversarial fake images on face manifold. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 5789\u20135798 (2021)","DOI":"10.1109\/CVPR46437.2021.00573"},{"key":"3605_CR20","doi-asserted-by":"crossref","unstructured":"Neekhara, P., Dolhansky, B., Bitton, J., Ferrer, C.C.: Adversarial threats to deepfake detection: a practical perspective. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 923\u2013932 (2021)","DOI":"10.1109\/CVPRW53098.2021.00103"},{"key":"3605_CR21","doi-asserted-by":"crossref","unstructured":"Lim, N.T., Kuan, M.Y., Pu, M., Lim, M.K., Chong, C.Y.: Metamorphic testing-based adversarial attack to fool deepfake detectors. In: 2022 26th international conference on pattern recognition (ICPR), pp. 2503\u20132509. IEEE (2022)","DOI":"10.1109\/ICPR56361.2022.9956543"},{"key":"3605_CR22","doi-asserted-by":"crossref","unstructured":"Jia, S., Ma, C., Yao, T., Yin, B., Ding, S., Yang, X.: Exploring frequency adversarial attacks for face forgery detection. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 4103\u20134112 (2022)","DOI":"10.1109\/CVPR52688.2022.00407"},{"key":"3605_CR23","unstructured":"Korshunov, P., Marcel, S.: Deepfakes: a new threat to face recognition? Assessment and detection. arXiv:1812.08685 (2018)"},{"issue":"8","key":"3605_CR24","doi-asserted-by":"publisher","first-page":"2001","DOI":"10.1109\/TIFS.2018.2807791","volume":"13","author":"E Gonzalez-Sosa","year":"2018","unstructured":"Gonzalez-Sosa, E., Fierrez, J., Vera-Rodriguez, R., Alonso-Fernandez, F.: Facial soft biometrics for recognition in the wild: recent works, annotation, and cots evaluation. IEEE Trans. Inf. Forensics Secur. 13(8), 2001\u20132014 (2018)","journal-title":"IEEE Trans. Inf. Forensics Secur."},{"key":"3605_CR25","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: 2019 IEEE\/CVF conference on computer vision and pattern recognition (CVPR) (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"3605_CR26","doi-asserted-by":"crossref","unstructured":"Liu, M., Ding, Y., Xia, M., Liu, X., Ding, E., Zuo, W., Wen, S.: STGAN: a unified selective transfer network for arbitrary image attribute editing. Cornell University (2019)","DOI":"10.1109\/CVPR.2019.00379"},{"key":"3605_CR27","doi-asserted-by":"crossref","unstructured":"Tolosana, R., Vera-Rodriguez, R., Fierrez, J., Morales, A., Ortega-Garcia, J.: Deepfakes and beyond: a survey of face manipulation and fake detection. Information Fusion, pp. 131\u2013148 (2020)","DOI":"10.1016\/j.inffus.2020.06.014"},{"issue":"4","key":"3605_CR28","doi-asserted-by":"publisher","first-page":"61","DOI":"10.3390\/jsan12040061","volume":"12","author":"MSH Mukta","year":"2023","unstructured":"Mukta, M.S.H., Ahmad, J., Raiaan, M.A.K., Islam, S., Azam, S., Ali, M.E., Jonkman, M.: An investigation of the effectiveness of deepfake models and tools. J. Sensor Actuator Netw. 12(4), 61 (2023)","journal-title":"J. Sensor Actuator Netw."},{"issue":"1","key":"3605_CR29","doi-asserted-by":"publisher","first-page":"205630512090340","DOI":"10.1177\/2056305120903408","volume":"6","author":"C Vaccari","year":"2020","unstructured":"Vaccari, C., Chadwick, A.: Deepfakes and disinformation: exploring the impact of synthetic political video on deception, uncertainty, and trust in news. Soc. Media Soc. 6(1), 2056305120903408 (2020)","journal-title":"Soc. Media Soc."},{"key":"3605_CR30","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE conference on computer vision and pattern recognition (CVPR) (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"3605_CR31","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K. Q.: Densely connected convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4700\u20134708 (2017)","DOI":"10.1109\/CVPR.2017.243"},{"key":"3605_CR32","doi-asserted-by":"crossref","unstructured":"Chollet, F.: Xception: deep learning with depthwise separable convolutions. In: 2017 IEEE conference on computer vision and pattern recognition (CVPR) (2017)","DOI":"10.1109\/CVPR.2017.195"},{"key":"3605_CR33","unstructured":"Tan, M., Le, Q.: EfficientNet: Rethinking model scaling for convolutional neural networks. In: International conference on machine learning (2019)"},{"key":"3605_CR34","doi-asserted-by":"crossref","unstructured":"Liu, Z., Qi, X., Torr, P. H.: Global texture enhancement for fake face detection in the wild. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 8060\u20138069 (2020)","DOI":"10.1109\/CVPR42600.2020.00808"},{"key":"3605_CR35","doi-asserted-by":"crossref","unstructured":"Tu, Z., Talebi, H., Zhang, H., Yang, F., Milanfar, P., Bovik, A., Li, Y.: Maxvit: multi-axis vision transformer. In: European conference on computer vision, pp. 459\u2013479. Springer (2022)","DOI":"10.1007\/978-3-031-20053-3_27"},{"key":"3605_CR36","unstructured":"Madry, A., Makelov, A., Schmidt, L., Tsipras, D., Vladu, A.: Towards deep learning models resistant to adversarial attacks. Cornell University, arXiv:1706.06083 (2017)"},{"key":"3605_CR37","doi-asserted-by":"crossref","unstructured":"Carlini, N., Wagner, D.: Towards evaluating the robustness of neural networks. In: 2017 IEEE symposium on security and privacy (SP) (2017)","DOI":"10.1109\/SP.2017.49"},{"key":"3605_CR38","unstructured":"Guo, C., Frank, J.S., Weinberger, K.Q.: Low frequency adversarial perturbation. arXiv:1809.08758 (2018)"},{"key":"3605_CR39","doi-asserted-by":"crossref","unstructured":"Chen, P.Y., Zhang, H., Sharma, Y., Yi, J., Hsieh, C.J.: Zoo: Zeroth order optimization based black-box attacks to deep neural networks without training substitute models. In: Proceedings of the 10th ACM workshop on artificial intelligence and security (2017)","DOI":"10.1145\/3128572.3140448"},{"key":"3605_CR40","unstructured":"Guo, C., Gardner, J., You, Y., Wilson, A.G., Weinberger, K.: Simple black-box adversarial attacks. In: International conference on machine learning (2019)"},{"key":"3605_CR41","unstructured":"Ilyas, A., Engstrom, L., Athalye, A., Lin, J.: Black-box adversarial attacks with limited queries and information. In: International conference on machine learning (2018)"},{"key":"3605_CR42","doi-asserted-by":"crossref","unstructured":"Dong, Y., Liao, F., Pang, T., Su, H., Zhu, J., Hu, X., Li, J.: Boosting adversarial attacks with momentum. In: 2018 IEEE\/CVF conference on computer vision and pattern recognition (2018)","DOI":"10.1109\/CVPR.2018.00957"},{"key":"3605_CR43","unstructured":"Lin, J., Song, C., He, K., Wang, L., Hopcroft, J.E.: Nesterov accelerated gradient and scale invariance for adversarial attacks. arXiv:1908.06281 (2019)"},{"key":"3605_CR44","doi-asserted-by":"crossref","unstructured":"Xie, C., Zhang, Z., Zhou, Y., Bai, S., Wang, J., Ren, Z., Yuille, A.L. : Improving transferability of adversarial examples with input diversity. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 2730\u20132739 (2019)","DOI":"10.1109\/CVPR.2019.00284"},{"key":"3605_CR45","doi-asserted-by":"crossref","unstructured":"Dong, Y., Pang, T., Su, H., Zhu, J.: Evading defenses to transferable adversarial examples by translation-invariant attacks. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 4312\u20134321 (2019)","DOI":"10.1109\/CVPR.2019.00444"},{"key":"3605_CR46","doi-asserted-by":"crossref","unstructured":"Liao, Q., Li, Y., Wang, X., Kong, B., Zhu, B., Lyu, S., Yin, Y., Song, Q., Wu, X.: Imperceptible adversarial examples for fake image detection. arXiv:2106.01615 (2021)","DOI":"10.1109\/ICIP42928.2021.9506775"},{"key":"3605_CR47","unstructured":"Wang, W., Zhao, Z., Sebe, N., Lepri, B.: Turn fake into real: adversarial head turn attacks against deepfake detection. arXiv:2309.01104 (2023)"},{"key":"3605_CR48","doi-asserted-by":"crossref","unstructured":"Ivanovska, M., \u0160truc, V.: On the vulnerability of deepfake detectors to attacks generated by denoising diffusion models. arXiv:2307.05397 (2023)","DOI":"10.1109\/WACVW60836.2024.00115"},{"key":"3605_CR49","doi-asserted-by":"crossref","unstructured":"Gan, Y., Xiao, X., Xiang, T.: Attribute-guided face adversarial example generation. Vis. Comput. 1\u201311 (2024)","DOI":"10.1007\/s00371-024-03265-x"},{"issue":"3","key":"3605_CR50","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3582563","volume":"14","author":"Z Dai","year":"2023","unstructured":"Dai, Z., Liu, S., Li, Q., Tang, K.: Saliency attack: towards imperceptible black-box adversarial attack. ACM Trans. Intell. Syst. Technol. 14(3), 1\u201320 (2023)","journal-title":"ACM Trans. Intell. Syst. Technol."},{"key":"3605_CR51","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Liu, Z., Larson, M.: Towards large yet imperceptible adversarial image perturbations with perceptual color distance. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 1039\u20131048 (2020)","DOI":"10.1109\/CVPR42600.2020.00112"},{"key":"3605_CR52","doi-asserted-by":"publisher","first-page":"15","DOI":"10.1016\/j.patrec.2021.03.009","volume":"146","author":"Y Wang","year":"2021","unstructured":"Wang, Y., Ding, X., Yang, Y., Ding, L., Ward, R., Wang, Z.J.: Perception matters: exploring imperceptible and transferable anti-forensics for GAN-generated fake face imagery detection. Pattern Recognit. Lett. 146, 15\u201322 (2021)","journal-title":"Pattern Recognit. Lett."},{"key":"3605_CR53","doi-asserted-by":"crossref","unstructured":"Duan, R., Chen, Y., Niu, D., Yang, Y., Qin, A.K., He, Y.: AdvDrop: adversarial attack to DNNs by dropping information. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp. 7506\u20137515 (2021)","DOI":"10.1109\/ICCV48922.2021.00741"},{"key":"3605_CR54","doi-asserted-by":"crossref","unstructured":"Luo, C., Lin, Q., Xie, W., Wu, B., Xie, J., Shen, L.: Frequency-driven imperceptible adversarial attack on semantic similarity. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 15315\u201315324 (2022)","DOI":"10.1109\/CVPR52688.2022.01488"},{"issue":"11","key":"3605_CR55","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1145\/3422622","volume":"63","author":"I Goodfellow","year":"2020","unstructured":"Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks. Commun. ACM 63(11), 139\u2013144 (2020)","journal-title":"Commun. ACM"},{"key":"3605_CR56","doi-asserted-by":"crossref","unstructured":"Wang, H., Wu, X., Huang, Z., Xing, E.P.: High-frequency component helps explain the generalization of convolutional neural networks. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.00871"},{"key":"3605_CR57","doi-asserted-by":"crossref","unstructured":"Li, Q., Shen, L., Guo, S., Lai, Z.: Wavelet integrated CNNs for noise-robust image classification. In: IEEE\/CVF conference on computer vision and pattern recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.00727"},{"key":"3605_CR58","unstructured":"Bingen Li, H.Z., Wang, Z., Liu, C., Yan, H., Hu, L.: Unsupervisedmonoculardepthestimationwith aggre-gating image features and wavelet SSIM (structural SIMilarity) loss (2021)"},{"key":"3605_CR59","doi-asserted-by":"crossref","unstructured":"Dong, Y., Liao, F., Pang, T., Su, H., Zhu, J., Hu, X., Li, J.: Boosting adversarial attacks with momentum. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 9185\u20139193 (2018)","DOI":"10.1109\/CVPR.2018.00957"},{"key":"3605_CR60","doi-asserted-by":"crossref","unstructured":"Xiao, C., Li, B., Zhu, J. Y., He, W., Liu, M., Song, D.: Generating adversarial examples with adversarial networks. arXiv:1801.02610 (2018)","DOI":"10.24963\/ijcai.2018\/543"},{"key":"3605_CR61","doi-asserted-by":"crossref","unstructured":"Yang, X., Dong, Y., Pang, T., Su, H., Zhu, J: Boosting transferability of targeted adversarial examples via hierarchical generative networks. In: European conference on computer vision, pp. 725\u2013742. Springer (2022)","DOI":"10.1007\/978-3-031-19772-7_42"},{"key":"3605_CR62","unstructured":"Kim, H.: TorchAttacks: a pytorch repository for adversarial attacks. arXiv:2010.01950 (2020)"},{"key":"3605_CR63","unstructured":"Wang, Z., Yang, Y., Shrivastava, A., Rawal, V., Ding, Z.: Towards frequency-based explanation for robust CNN. arXiv:2005.03141 (2020)"}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-024-03605-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-024-03605-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-024-03605-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,10]],"date-time":"2025-03-10T09:09:21Z","timestamp":1741597761000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-024-03605-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,6]]},"references-count":63,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2025,3]]}},"alternative-id":["3605"],"URL":"https:\/\/doi.org\/10.1007\/s00371-024-03605-x","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"value":"0178-2789","type":"print"},{"value":"1432-2315","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,9,6]]},"assertion":[{"value":"6 August 2024","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 September 2024","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}