{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T01:07:54Z","timestamp":1770340074680,"version":"3.49.0"},"reference-count":23,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T00:00:00Z","timestamp":1690848000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T00:00:00Z","timestamp":1690848000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T00:00:00Z","timestamp":1690848000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T00:00:00Z","timestamp":1690848000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T00:00:00Z","timestamp":1690848000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T00:00:00Z","timestamp":1690848000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T00:00:00Z","timestamp":1690848000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100002766","name":"Beijing University of Posts and Telecommunications","doi-asserted-by":"publisher","award":["CX2022229"],"award-info":[{"award-number":["CX2022229"]}],"id":[{"id":"10.13039\/501100002766","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61972048"],"award-info":[{"award-number":["61972048"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61976024"],"award-info":[{"award-number":["61976024"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62072051"],"award-info":[{"award-number":["62072051"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Neural Networks"],"published-print":{"date-parts":[[2023,8]]},"DOI":"10.1016\/j.neunet.2023.06.006","type":"journal-article","created":{"date-parts":[[2023,6,8]],"date-time":"2023-06-08T11:47:58Z","timestamp":1686224878000},"page":"516-526","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":2,"special_numbering":"C","title":["Enhanced covertness class discriminative universal adversarial perturbations"],"prefix":"10.1016","volume":"165","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9596-3066","authenticated-orcid":false,"given":"Haoran","family":"Gao","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0532-9783","authenticated-orcid":false,"given":"Hua","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Xin","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4665-8508","authenticated-orcid":false,"given":"Wenmin","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2552-0726","authenticated-orcid":false,"given":"Jiahui","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Fei","family":"Gao","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.neunet.2023.06.006_b1","series-title":"Computer vision \u2013 ACCV 2020, vol. 12625","first-page":"284","article-title":"Double targeted universal adversarial perturbations","author":"Benz","year":"2021"},{"key":"10.1016\/j.neunet.2023.06.006_b2","doi-asserted-by":"crossref","unstructured":"Bhojanapalli,\u00a0S., Chakrabarti,\u00a0A., Glasner,\u00a0D., Li,\u00a0D., Unterthiner,\u00a0T., & Veit,\u00a0A. (2021). Understanding robustness of transformers for image classification. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 10231\u201310241).","DOI":"10.1109\/ICCV48922.2021.01007"},{"key":"10.1016\/j.neunet.2023.06.006_b3","series-title":"2017 IEEE symposium on security and privacy","first-page":"39","article-title":"Towards evaluating the robustness of neural networks","author":"Carlini","year":"2017"},{"issue":"2","key":"10.1016\/j.neunet.2023.06.006_b4","doi-asserted-by":"crossref","first-page":"935","DOI":"10.1109\/TCSVT.2022.3204753","article-title":"Snis: A signal noise separation-based network for post-processed image forgery detection","volume":"33","author":"Chen","year":"2022","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"10.1016\/j.neunet.2023.06.006_b5","series-title":"2009 IEEE conference on computer vision and pattern recognition","first-page":"248","article-title":"Imagenet: A large-scale hierarchical image database","author":"Deng","year":"2009"},{"key":"10.1016\/j.neunet.2023.06.006_b6","unstructured":"Goodfellow,\u00a0I., Pouget-Abadie,\u00a0J., Mirza,\u00a0M., Xu,\u00a0B., Warde-Farley,\u00a0D., Ozair,\u00a0S., et al. (2014). Generative adversarial nets. In Advances in neural information processing systems (pp. 2672\u20132680)."},{"key":"10.1016\/j.neunet.2023.06.006_b7","doi-asserted-by":"crossref","first-page":"58","DOI":"10.1016\/j.neunet.2022.02.025","article-title":"Boosting the transferability of adversarial examples via stochastic serial attack","volume":"150","author":"Hao","year":"2022","journal-title":"Neural Networks"},{"key":"10.1016\/j.neunet.2023.06.006_b8","series-title":"2018 IEEE security and privacy workshops","first-page":"43","article-title":"Learning universal adversarial perturbations with generative models","author":"Hayes","year":"2018"},{"key":"10.1016\/j.neunet.2023.06.006_b9","doi-asserted-by":"crossref","unstructured":"He,\u00a0K., Zhang,\u00a0X., Ren,\u00a0S., & Sun,\u00a0J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 770\u2013778).","DOI":"10.1109\/CVPR.2016.90"},{"key":"10.1016\/j.neunet.2023.06.006_b10","doi-asserted-by":"crossref","unstructured":"Hu,\u00a0J., Liao,\u00a0X., Liang,\u00a0J., Zhou,\u00a0W., & Qin,\u00a0Z. (2022). Finfer: Frame inference-based deepfake detection for high-visual-quality videos. 36, In Proceedings of the AAAI conference on artificial intelligence, vol. 36, no. 1 (1), (pp. 951\u2013959).","DOI":"10.1609\/aaai.v36i1.19978"},{"key":"10.1016\/j.neunet.2023.06.006_b11","doi-asserted-by":"crossref","unstructured":"Kong,\u00a0Z., Guo,\u00a0J., Li,\u00a0A., & Liu,\u00a0C. (2020). Physgan: Generating physical-world-resilient adversarial examples for autonomous driving. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 14254\u201314263).","DOI":"10.1109\/CVPR42600.2020.01426"},{"key":"10.1016\/j.neunet.2023.06.006_b12","series-title":"Learning multiple layers of features from tiny images","author":"Krizhevsky","year":"2009"},{"key":"10.1016\/j.neunet.2023.06.006_b13","doi-asserted-by":"crossref","unstructured":"Moosavi-Dezfooli,\u00a0S.-M., Fawzi,\u00a0A., Fawzi,\u00a0O., & Frossard,\u00a0P. (2017). Universal adversarial perturbations. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1765\u20131773).","DOI":"10.1109\/CVPR.2017.17"},{"key":"10.1016\/j.neunet.2023.06.006_b14","doi-asserted-by":"crossref","unstructured":"Moosavi-Dezfooli,\u00a0S.-M., Fawzi,\u00a0A., & Frossard,\u00a0P. (2016). Deepfool: a simple and accurate method to fool deep neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 2574\u20132582).","DOI":"10.1109\/CVPR.2016.282"},{"key":"10.1016\/j.neunet.2023.06.006_b15","series-title":"British machine vision conference 2017","article-title":"Fast feature fool: A data independent approach to universal adversarial perturbations","author":"Mopuri","year":"2017"},{"key":"10.1016\/j.neunet.2023.06.006_b16","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"742","article-title":"NAG: Network for adversary generation","author":"Mopuri","year":"2018"},{"key":"10.1016\/j.neunet.2023.06.006_b17","series-title":"2018 IEEE\/CVF conference on computer vision and pattern recognition","first-page":"4422","article-title":"Generative adversarial perturbations","author":"Poursaeed","year":"2018"},{"key":"10.1016\/j.neunet.2023.06.006_b18","series-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2014"},{"key":"10.1016\/j.neunet.2023.06.006_b19","series-title":"The 2011 international joint conference on neural networks","first-page":"1453","article-title":"The german traffic sign recognition benchmark: a multi-class classification competition","author":"Stallkamp","year":"2011"},{"key":"10.1016\/j.neunet.2023.06.006_b20","unstructured":"Szegedy,\u00a0C., Zaremba,\u00a0W., Sutskever,\u00a0I., Bruna,\u00a0J., Erhan,\u00a0D., Goodfellow,\u00a0I., et al. (2014). Intriguing properties of neural networks. In 2nd International conference on learning representations."},{"key":"10.1016\/j.neunet.2023.06.006_b21","doi-asserted-by":"crossref","unstructured":"Xiao,\u00a0Z., Gao,\u00a0X., Fu,\u00a0C., Dong,\u00a0Y., Gao,\u00a0W., Zhang,\u00a0X., et al. (2021). Improving Transferability of Adversarial Patches on Face Recognition With Generative Models. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 11845\u201311854).","DOI":"10.1109\/CVPR46437.2021.01167"},{"issue":"04","key":"10.1016\/j.neunet.2023.06.006_b22","doi-asserted-by":"crossref","first-page":"6754","DOI":"10.1609\/aaai.v34i04.6154","article-title":"CD-UAP: Class discriminative universal adversarial perturbation","volume":"34","author":"Zhang","year":"2020","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence"},{"key":"10.1016\/j.neunet.2023.06.006_b23","series-title":"2020 IEEE\/CVF conference on computer vision and pattern recognition","first-page":"14509","article-title":"Understanding adversarial examples from the mutual influence of images and perturbations","author":"Zhang","year":"2020"}],"container-title":["Neural Networks"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0893608023003076?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0893608023003076?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T02:32:47Z","timestamp":1760236367000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0893608023003076"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8]]},"references-count":23,"alternative-id":["S0893608023003076"],"URL":"https:\/\/doi.org\/10.1016\/j.neunet.2023.06.006","relation":{},"ISSN":["0893-6080"],"issn-type":[{"value":"0893-6080","type":"print"}],"subject":[],"published":{"date-parts":[[2023,8]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Enhanced covertness class discriminative universal adversarial perturbations","name":"articletitle","label":"Article Title"},{"value":"Neural Networks","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.neunet.2023.06.006","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2023 Elsevier Ltd. All rights reserved.","name":"copyright","label":"Copyright"}]}}