{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T03:45:43Z","timestamp":1775619943804,"version":"3.50.1"},"reference-count":75,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key R&#x0026;D Program of China","award":["2021YFB3100800"],"award-info":[{"award-number":["2021YFB3100800"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62271090"],"award-info":[{"award-number":["62271090"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Chongqing Natural Science Fund","award":["cstc2021jcyj-jqX0023"],"award-info":[{"award-number":["cstc2021jcyj-jqX0023"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1109\/tpami.2024.3385745","type":"journal-article","created":{"date-parts":[[2024,4,8]],"date-time":"2024-04-08T20:53:36Z","timestamp":1712609616000},"page":"6669-6687","source":"Crossref","is-referenced-by-count":10,"title":["Meta Invariance Defense Towards Generalizable Robustness to Unknown Adversarial Attacks"],"prefix":"10.1109","volume":"46","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5305-8543","authenticated-orcid":false,"given":"Lei","family":"Zhang","sequence":"first","affiliation":[{"name":"School of Microelectronics and Communication Engineering, Chongqing University, Chongqing, China"}]},{"given":"Yuhang","family":"Zhou","sequence":"additional","affiliation":[{"name":"School of Microelectronics and Communication Engineering, Chongqing University, Chongqing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0512-880X","authenticated-orcid":false,"given":"Yi","family":"Yang","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7985-0037","authenticated-orcid":false,"given":"Xinbo","family":"Gao","sequence":"additional","affiliation":[{"name":"Chongqing Key Laboratory of Image Cognition, Chongqing University of Posts and Telecommunications, Chongqing, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Rethinking machine learning robustness via its link with the out-of-distribution problem","author":"Amich","year":"2022"},{"key":"ref2","article-title":"Training ensembles to detect adversarial examples","author":"Bagnall","year":"2017"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/1120.003.0080"},{"key":"ref4","article-title":"Decision-based adversarial attacks: Reliable attacks against black-box machine learning models","author":"Brendel","year":"2017"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref6","first-page":"22 405","article-title":"SWAD: Domain generalization by seeking flat minima","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Cha"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140448"},{"key":"ref8","first-page":"2196","article-title":"Minimally distorted adversarial examples with a fast adaptive boundary attack","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Croce"},{"key":"ref9","first-page":"2206","article-title":"Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Croce"},{"key":"ref10","article-title":"Advertorch v0. 1: An adversarial robustness toolbox based on PyTorch","author":"Ding","year":"2019"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00957"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00444"},{"key":"ref13","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00741"},{"key":"ref15","first-page":"1126","article-title":"Model-agnostic meta-learning for fast adaptation of deep networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Finn"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.293"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5816"},{"key":"ref18","volume-title":"Deep Learning","author":"Goodfellow","year":"2016"},{"key":"ref19","article-title":"Explaining and harnessing adversarial examples","author":"Goodfellow","year":"2014"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref21","first-page":"857","article-title":"Stochastic neighbor embedding","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Hinton"},{"key":"ref22","article-title":"Unsupervised learning via meta-learning","author":"Hsu","year":"2018"},{"key":"ref23","article-title":"What do adversarially trained neural networks focus: A Fourier domain-based study","author":"Huang","year":"2022"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00407"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01304"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00699"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1523\/JNEUROSCI.17-11-04302.1997"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-33718-5_12"},{"key":"ref30","article-title":"Adversarial machine learning at scale","author":"Kurakin","year":"2016"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11596"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00191"},{"key":"ref34","article-title":"Nesterov accelerated gradient and scale invariance for adversarial attacks","author":"Lin","year":"2019"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01488"},{"key":"ref37","article-title":"Towards deep learning models resistant to adversarial attacks","author":"Madry","year":"2017"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00070"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134057"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1016\/0166-4328(82)90081-X"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.282"},{"key":"ref42","first-page":"10","article-title":"Domain generalization via invariant feature representation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Muandet"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2020.2978474"},{"key":"ref44","article-title":"Bag of tricks for adversarial training","author":"Pang","year":"2020"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP.2016.36"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2016.41"},{"key":"ref47","article-title":"Certified defenses against adversarial examples","author":"Raghunathan","year":"2018"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00445"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11504"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1126\/science.290.5500.2323"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1162\/089976698300017467"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1016\/S0893-6080(02)00228-9"},{"key":"ref53","article-title":"APE-GAN: Adversarial perturbation elimination with GAN","author":"Shen","year":"2017"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/TEVC.2019.2890858"},{"key":"ref55","article-title":"Intriguing properties of neural networks","author":"Szegedy","year":"2013"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1126\/science.290.5500.2319"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/72.788640"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00871"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00756"},{"key":"ref60","first-page":"1","article-title":"Demystifying adversarial training via a unified probabilistic framework","volume-title":"Proc. ICML Workshop Adversarial Mach. Learn.","author":"Wang"},{"key":"ref61","first-page":"1","article-title":"Improving adversarial robustness requires revisiting misclassified examples","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wang"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.01002"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-005-4939-z"},{"key":"ref64","article-title":"Defending against physically realizable attacks on image classification","author":"Wu","year":"2019"},{"key":"ref65","article-title":"Spatially transformed adversarial examples","author":"Xiao","year":"2018"},{"key":"ref66","article-title":"Mitigating adversarial effects through randomization","author":"Xie","year":"2017"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00284"},{"key":"ref68","first-page":"16 051","article-title":"Class-disentanglement and applications in adversarial detection and defense","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Yang"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00765"},{"key":"ref70","first-page":"7472","article-title":"Theoretically principled trade-off between robustness and accuracy","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhang"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.485"},{"key":"ref72","first-page":"12 835","article-title":"Towards defending against adversarial examples via attack-invariant features","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhou"},{"key":"ref73","article-title":"Reliable adversarial distillation with unreliable teachers","author":"Zhu","year":"2021"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01613"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58542-6_34"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/34\/10666888\/10494561.pdf?arnumber=10494561","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,7]],"date-time":"2024-09-07T04:42:28Z","timestamp":1725684148000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10494561\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10]]},"references-count":75,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2024.3385745","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10]]}}}