{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T05:13:43Z","timestamp":1755839623333,"version":"3.37.3"},"reference-count":86,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"Science and Technology Development Fund of Macao S.A.R","award":["0015\/2019\/AKP","0123\/2022\/AFJ","0081\/2022\/A2"],"award-info":[{"award-number":["0015\/2019\/AKP","0123\/2022\/AFJ","0081\/2022\/A2"]}]},{"DOI":"10.13039\/501100021171","name":"Basic and Applied Basic Research Foundation of Guangdong Province","doi-asserted-by":"publisher","award":["2020B1515130004"],"award-info":[{"award-number":["2020B1515130004"]}],"id":[{"id":"10.13039\/501100021171","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62376263"],"award-info":[{"award-number":["62376263"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Basic Research Program of Shenzhen","award":["JCYJ20190812160003719"],"award-info":[{"award-number":["JCYJ20190812160003719"]}]},{"name":"SICC"},{"DOI":"10.13039\/501100004733","name":"Universidade de Macau","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100004733","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2024,1]]},"DOI":"10.1109\/tpami.2023.3323698","type":"journal-article","created":{"date-parts":[[2023,10,13]],"date-time":"2023-10-13T17:58:02Z","timestamp":1697219882000},"page":"354-369","source":"Crossref","is-referenced-by-count":4,"title":["LAFIT: Efficient and Reliable Evaluation of Adversarial Defenses With Latent Features"],"prefix":"10.1109","volume":"46","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-1717-8687","authenticated-orcid":false,"given":"Yunrui","family":"Yu","sequence":"first","affiliation":[{"name":"State Key Lab of IOTSC, University of Macau, Taipa, Macau, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2063-2051","authenticated-orcid":false,"given":"Xitong","family":"Gao","sequence":"additional","affiliation":[{"name":"Shenzhen Institute of Advanced Technology, Chinese Academy of Sciences, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9480-0356","authenticated-orcid":false,"given":"Cheng-Zhong","family":"Xu","sequence":"additional","affiliation":[{"name":"State Key Lab of IOTSC, University of Macau, Taipa, Macau, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CIS.2016.0099"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2016.7532519"},{"key":"ref3","first-page":"177","article-title":"AirSim drone racing lab","author":"Madaan","year":"2020","journal-title":"Proc. Neurips 2019 Competition Demonstration Track"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2018.2791721"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2019.04.028"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00783"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8461053"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00718"},{"article-title":"Intriguing properties of neural networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Szegedy","key":"ref9"},{"article-title":"Explaining and harnessing adversarial examples","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Goodfellow","key":"ref10"},{"article-title":"On evaluating adversarial robustness","year":"2019","author":"Carlini","key":"ref11"},{"article-title":"Towards deep learning models resistant to adversarial attacks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Madry","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.282"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00957"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/543"},{"article-title":"Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Croce","key":"ref17"},{"key":"ref18","first-page":"3358","article-title":"Adversarial training for free!","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Shafahi"},{"key":"ref19","first-page":"11 192","article-title":"Unlabeled data improves adversarial robustness","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Carmon"},{"key":"ref20","first-page":"12192","article-title":"Are labels required for improving adversarial robustness?","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Alayrac"},{"key":"ref21","first-page":"7472","article-title":"Theoretically principled trade-off between robustness and accuracy","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhang"},{"key":"ref22","first-page":"4970","article-title":"Improving adversarial robustness via promoting ensemble diversity","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Pang"},{"article-title":"Improving adversarial robustness of ensembles with diversity training","year":"2019","author":"Kariyappa","key":"ref23"},{"article-title":"Boosting adversarial training with hypersphere embedding","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Pang","key":"ref24"},{"article-title":"Improving adversarial robustness requires revisiting misclassified examples","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wang","key":"ref25"},{"article-title":"Adversarial weight perturbation helps robust generalization","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Wu","key":"ref26"},{"article-title":"Does network width really help adversarial robustness?","year":"2020","author":"Wu","key":"ref27"},{"article-title":"Uncovering the limits of adversarial training against norm-bounded adversarial examples","year":"2020","author":"Gowal","key":"ref28"},{"article-title":"DVERGE: Diversifying vulnerabilities for enhanced robust generation of ensembles","year":"2020","author":"Yang","key":"ref29"},{"key":"ref30","first-page":"1831","article-title":"Defense against adversarial attacks using feature scattering-based adversarial training","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Zhang"},{"key":"ref31","article-title":"Adversarial interpolation training: A simple approach for improving model robustness","volume-title":"OpenReview","author":"Zhang","year":"2020"},{"key":"ref32","article-title":"Sensible adversarial learning","volume-title":"OpenReview","author":"Kim","year":"2020"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2022.3207917"},{"article-title":"Manifold regularization for locally stable deep neural networks","year":"2020","author":"Jin","key":"ref34"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00348"},{"article-title":"On adaptive attacks to adversarial example defenses","year":"2020","author":"Tramer","key":"ref36"},{"key":"ref37","doi-asserted-by":"crossref","DOI":"10.23915\/distill.00007","article-title":"Feature visualization","volume-title":"Distill","author":"Olah","year":"2017"},{"key":"ref38","article-title":"Activation atlas","volume-title":"Distill","volume":"4","author":"Carter","year":"2019"},{"key":"ref39","first-page":"2032","article-title":"Controlling neural level sets","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Atzmon"},{"article-title":"Self-adaptive training: Beyond empirical risk minimization","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Huang","key":"ref40"},{"article-title":"An alternative surrogate loss for PGD-based adversarial testing","year":"2019","author":"Gowal","key":"ref41"},{"article-title":"Adversarial robustness through local linearization","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Qin","key":"ref42"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00568"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3354222"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1145\/3453158"},{"article-title":"Adversarially-trained deep nets transfer better: Illustration on image classification","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Utrera","key":"ref46"},{"article-title":"Adversarial self-defense for cycle-consistent GANs","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Bashkirova","key":"ref47"},{"key":"ref48","first-page":"25179","article-title":"Adversarial training helps transfer learning via better representations","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Deng"},{"article-title":"Adversarial examples in the physical world","year":"2017","author":"Kurakin","key":"ref49"},{"article-title":"Minimally distorted adversarial examples with a fast adaptive boundary attack","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Croce","key":"ref50"},{"article-title":"Adam: A method for stochastic optimization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kingma","key":"ref51"},{"article-title":"Diversity can be transferred: Output diversification for white- and black-box attacks","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Tashiro","key":"ref52"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01468"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58592-1_29"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i10.17075"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/385"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00893"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00483"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00723"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11672"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00283"},{"key":"ref62","first-page":"2672","article-title":"Generative adversarial nets","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Goodfellow"},{"key":"ref63","first-page":"2045","article-title":"AdvGAN++: Harnessing latent layers for adversary generation","volume-title":"Proc. IEEE\/CVF Int. Conf. Comput. Vis. Workshop","author":"Mangla"},{"article-title":"Adversarial machine learning at scale","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kurakin","key":"ref64"},{"article-title":"Bag of tricks for adversarial training","year":"2020","author":"Pang","key":"ref65"},{"key":"ref66","first-page":"8093","article-title":"Overfitting in adversarially robust deep learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rice"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00078"},{"article-title":"Fast is better than free: Revisiting adversarial training","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wong","key":"ref68"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00020"},{"article-title":"HYDRA: Pruning adversarially robust neural networks","year":"2020","author":"Sehwag","key":"ref70"},{"article-title":"Robustness may be at odds with accuracy","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Tsipras","key":"ref71"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1145\/3052973.3053009"},{"key":"ref73","first-page":"274","article-title":"Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Athalye"},{"key":"ref74","first-page":"448","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","volume-title":"Proc. 32nd Int. Conf. Int. Conf. Mach. Learn.","author":"Ioffe"},{"article-title":"The CIFAR-10 and CIFAR-100 datasets","year":"2014","author":"Krizhevsky","key":"ref75"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2008.128"},{"key":"ref77","first-page":"29935","article-title":"Data augmentation can improve robustness","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Rebuffi"},{"key":"ref78","first-page":"4218","article-title":"Improving robustness using generated data","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Gowal"},{"key":"ref79","first-page":"17 258","article-title":"Robustness and accuracy could be reconcilable by (Proper) definition","volume-title":"Proc. 39th Int. Conf. Mach. Learn.","author":"Pang"},{"key":"ref80","first-page":"2712","article-title":"Using pre-training can improve model robustness and uncertainty","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Hendrycks"},{"article-title":"Robustness (Python library)","year":"2019","author":"Engstrom","key":"ref81"},{"key":"ref82","first-page":"227","article-title":"You only propagate once: Accelerating adversarial training via maximal principle","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Zhang"},{"article-title":"MMA training: Direct input space margin maximization through adversarial training","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ding","key":"ref83"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00445"},{"article-title":"Improving adversarial robustness through progressive hardening","year":"2020","author":"Sitawarin","key":"ref85"},{"key":"ref86","first-page":"7054","article-title":"Do wider neural networks really help adversarial robustness?","volume-title":"Proc. 35th Conf. Neural Inf. Process. Syst.","author":"Wu"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/34\/10345401\/10285432.pdf?arnumber=10285432","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,12,20]],"date-time":"2023-12-20T01:15:01Z","timestamp":1703034901000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10285432\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,1]]},"references-count":86,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2023.3323698","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"type":"print","value":"0162-8828"},{"type":"electronic","value":"2160-9292"},{"type":"electronic","value":"1939-3539"}],"subject":[],"published":{"date-parts":[[2024,1]]}}}