{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T04:57:42Z","timestamp":1760245062355,"version":"3.37.3"},"reference-count":41,"publisher":"Springer Science and Business Media LLC","issue":"7","license":[{"start":{"date-parts":[[2022,6,14]],"date-time":"2022-06-14T00:00:00Z","timestamp":1655164800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,6,14]],"date-time":"2022-06-14T00:00:00Z","timestamp":1655164800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"national natural science foundation of china","doi-asserted-by":"publisher","award":["no.61876155and no.61876154"],"award-info":[{"award-number":["no.61876155and no.61876154"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Mach Learn"],"published-print":{"date-parts":[[2022,7]]},"DOI":"10.1007\/s10994-022-06186-9","type":"journal-article","created":{"date-parts":[[2022,6,14]],"date-time":"2022-06-14T22:02:33Z","timestamp":1655244153000},"page":"2489-2513","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Re-thinking model robustness from stability: a new insight to defend adversarial examples"],"prefix":"10.1007","volume":"111","author":[{"given":"Shufei","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Kaizhu","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Zenglin","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,6,14]]},"reference":[{"key":"6186_CR1","unstructured":"Carmon, Y., Raghunathan, A., Schmidt, L., Duchi, J. C., & Liang, P. S. (2019). Unlabeled data improves adversarial robustness (pp. 11190\u201311201)."},{"key":"6186_CR2","unstructured":"Cisse, M., Bojanowski, P., Grave, E., Dauphin, Y., & Usunier, N. (2017). Parseval networks: Improving robustness to adversarial examples. arXiv preprint arXiv:1704.08847."},{"key":"6186_CR3","unstructured":"Eykholt, K., Evtimov, I., Fernandes, E., Li, B., Rahmati, A., Tramer, F., Prakash, A., Kohno, T., & Song, D. (2018). Physical adversarial examples for object detectors. arXiv preprint arXiv:1807.07769."},{"issue":"3","key":"6186_CR4","doi-asserted-by":"publisher","first-page":"481","DOI":"10.1007\/s10994-017-5663-3","volume":"107","author":"A Fawzi","year":"2018","unstructured":"Fawzi, A., Fawzi, O., & Frossard, P. (2018). Analysis of classifiers robustness to adversarial perturbations. Machine Learning, 107(3), 481\u2013508.","journal-title":"Machine Learning"},{"key":"6186_CR5","unstructured":"Fawzi, A., Moosavi-Dezfooli, S. M., & Frossard, P. (2016). Robustness of classifiers: From adversarial to random noise (pp. 1632\u20131640)."},{"key":"6186_CR6","unstructured":"Finlay, C., Oberman, A., & Abbasi, B. (2018). Improved robustness to adversarial examples using Lipschitz regularization of the loss. arXiv preprint arXiv:1810.00953."},{"key":"6186_CR7","unstructured":"Fischer, V., Kumar, M. C., Metzen, J. H., & Brox, T. (2017). Adversarial examples for semantic image segmentation. arXiv preprint arXiv:1703.01101."},{"key":"6186_CR8","unstructured":"Goodfellow, I. J., Shlens, J., & Szegedy, C. (2014). Explaining and harnessing adversarial examples. arXiv preprint arXiv:1412.6572."},{"key":"6186_CR9","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., & Girshick, R. (2017). Mask r-cnn (pp. 2980\u20132988). IEEE.","DOI":"10.1109\/ICCV.2017.322"},{"key":"6186_CR10","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van Der\u00a0Maaten, L., & Weinberger, K. Q. (2017). Densely connected convolutional networks.","DOI":"10.1109\/CVPR.2017.243"},{"key":"6186_CR11","unstructured":"Kannan, H., Kurakin, A., & Goodfellow, I. (2018). Adversarial logit pairing. arXiv preprint arXiv:1803.06373."},{"key":"6186_CR12","doi-asserted-by":"crossref","unstructured":"Kos, J., Fischer, I., & Song, D. (2018). Adversarial examples for generative models (pp. 36\u201342). IEEE.","DOI":"10.1109\/SPW.2018.00014"},{"key":"6186_CR13","unstructured":"Kurakin, A., Goodfellow, I., & Bengio, S. (2016). Adversarial machine learning at scale. arXiv preprint arXiv:1611.01236."},{"key":"6186_CR14","unstructured":"Laine, S., & Aila, T. (2016). Temporal ensembling for semi-supervised learning. arXiv preprint arXiv:1610.02242."},{"issue":"7553","key":"6186_CR15","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1038\/nature14539","volume":"521","author":"Y LeCun","year":"2015","unstructured":"LeCun, Y., Bengio, Y., & Hinton, G. (2015). Deep learning. Nature, 521(7553), 436.","journal-title":"Nature"},{"issue":"1\u20133","key":"6186_CR16","doi-asserted-by":"publisher","first-page":"503","DOI":"10.1007\/BF01589116","volume":"45","author":"DC Liu","year":"1989","unstructured":"Liu, D. C., & Nocedal, J. (1989). On the limited memory bfgs method for large scale optimization. Mathematical Programming, 45(1\u20133), 503\u2013528.","journal-title":"Mathematical Programming"},{"key":"6186_CR17","doi-asserted-by":"crossref","unstructured":"Lyu, C., Huang, K., & Liang, H. N. (2015). A unified gradient regularization family for adversarial examples (pp. 301\u2013309). IEEE.","DOI":"10.1109\/ICDM.2015.84"},{"key":"6186_CR18","unstructured":"Ma, X., Li, B., Wang, Y., Erfani, S. M., Wijewickrema, S., Houle, M. E., Schoenebeck, G., Song, D., & Bailey, J. (2018). Characterizing adversarial subspaces using local intrinsic dimensionality. arXiv preprint arXiv:1801.02613."},{"key":"6186_CR19","unstructured":"Maal\u00f8e, L., S\u00f8nderby, C. K., S\u00f8nderby, S. K., & Winther, O. (2016). Auxiliary deep generative models. arXiv preprint arXiv:1602.05473."},{"key":"6186_CR20","unstructured":"Madry, A., Makelov, A., Schmidt, L., Tsipras, D., & Vladu, A. (2017). Towards deep learning models resistant to adversarial attacks. arXiv preprint arXiv:1706.06083."},{"key":"6186_CR21","unstructured":"Mao, C., Zhong, Z., Yang, J., Vondrick, C., & Ray, B. (2019). Metric learning for adversarial robustness (pp. 478\u2013489)."},{"key":"6186_CR23","doi-asserted-by":"publisher","first-page":"1979","DOI":"10.1109\/TPAMI.2018.2858821","volume":"41","author":"T Miyato","year":"2018","unstructured":"Miyato, T., Maeda, S. I., Ishii, S., & Koyama, M. (2018). Virtual adversarial training: a regularization method for supervised and semi-supervised learning. IEEE Transactions on Pattern Analysis and Machine Intelligence, 41, 1979.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"6186_CR25","unstructured":"Miyato, T., Maeda, S. I., Koyama, M., Nakae, K., & Ishii, S. (2015). Distributional smoothing with virtual adversarial training. arXiv preprint arXiv:1507.00677."},{"key":"6186_CR26","unstructured":"Pang, T., Yang, X., Dong, Y., Su, H., & Zhu, J. (2020). Bag of tricks for adversarial training. arXiv preprint arXiv:2010.00467."},{"key":"6186_CR27","unstructured":"Papernot, N., McDaniel, P., & Goodfellow, I. (2016). Transferability in machine learning: from phenomena to black-box attacks using adversarial samples. arXiv preprint arXiv:1605.07277."},{"key":"6186_CR28","unstructured":"Rasmus, A., Berglund, M., Honkala, M., Valpola, H., & Raiko, T. (2015). Semi-supervised learning with ladder networks (pp. 3546\u20133554)."},{"key":"6186_CR29","unstructured":"Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., & Chen, X. (2016). Improved techniques for training gans. In Advances in neural information processing systems (pp. 2234\u20132242)."},{"key":"6186_CR30","unstructured":"Shafahi, A., Najibi, M., Ghiasi, A., Xu, Z., Dickerson, J., Studer, C., Davis, L. S., Taylor, G., & Goldstein, T. (2019). Adversarial training for free! arXiv preprint arXiv:1904.12843."},{"key":"6186_CR31","unstructured":"Shaham, U., Yamada, Y., & Negahban, S. (2015). Understanding adversarial training: Increasing local stability of neural nets through robust optimization. arXiv preprint arXiv:1511.05432."},{"key":"6186_CR32","unstructured":"Sitawarin, C., Chakraborty, S., & Wagner, D. (2020). Improving adversarial robustness through progressive hardening. arXiv preprint arXiv:2003.09347."},{"key":"6186_CR33","unstructured":"Song, C., He, K., Lin, J., Wang, L., & Hopcroft, J. E. (2019). Robust local features for improving the generalization of adversarial training. arXiv preprint arXiv:1909.10147."},{"key":"6186_CR34","unstructured":"Springenberg, J. T. (2015). Unsupervised and semi-supervised learning with categorical generative adversarial networks. arXiv preprint arXiv:1511.06390."},{"key":"6186_CR35","unstructured":"Stanforth, R., Fawzi, A., & Kohli, P., et\u00a0al. (2019). Are labels required for improving adversarial robustness? arXiv preprint arXiv:1905.13725."},{"key":"6186_CR36","unstructured":"Weng, T. W., Zhang, H., Chen, P. Y., Yi, J., Su, D., Gao, Y., Hsieh, C. J., & Daniel, L. (2018). Evaluating the robustness of neural networks: An extreme value theory approach. arXiv preprint arXiv:1801.10578."},{"key":"6186_CR37","unstructured":"Willetts, M., Camuto, A., Rainforth, T., Roberts, S., & Holmes, C. (2019). Improving vaes\u2019 robustness to adversarial attack. arXiv preprint arXiv:1906.00230."},{"key":"6186_CR38","unstructured":"Wu, D., Xia, S. T., & Wang, Y. (2020). Adversarial weight perturbation helps robust generalization. arXiv preprint arXiv:2004.05884."},{"key":"6186_CR39","doi-asserted-by":"crossref","unstructured":"Xu, W., Evans, D., & Qi, Y. (2017). Feature squeezing: Detecting adversarial examples in deep neural networks. arXiv preprint arXiv:1704.01155.","DOI":"10.14722\/ndss.2018.23198"},{"key":"6186_CR40","doi-asserted-by":"crossref","unstructured":"Zagoruyko, S., & Komodakis, N. (2016). Wide residual networks. arXiv preprint arXiv:1605.07146.","DOI":"10.5244\/C.30.87"},{"key":"6186_CR41","unstructured":"Zhang, H., Yu, Y., Jiao, J., Xing, E. P., Ghaoui, L. E., & Jordan, M. I. (2019). Theoretically principled trade-off between robustness and accuracy. arXiv preprint arXiv:1901.08573."},{"key":"6186_CR42","unstructured":"Zhang, J., Zhu, J., Niu, G., Han, B., Sugiyama, M., & Kankanhalli, M. (2020). Geometry-aware instance-reweighted adversarial training. arXiv preprint arXiv:2010.01736."},{"key":"6186_CR43","unstructured":"Zhao, J., Mathieu, M., Goroshin, R., & Lecun, Y. (2015). Stacked what-where auto-encoders. arXiv preprint arXiv:1506.02351."}],"container-title":["Machine Learning"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-022-06186-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10994-022-06186-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-022-06186-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,6,14]],"date-time":"2023-06-14T00:04:13Z","timestamp":1686701053000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10994-022-06186-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,14]]},"references-count":41,"journal-issue":{"issue":"7","published-print":{"date-parts":[[2022,7]]}},"alternative-id":["6186"],"URL":"https:\/\/doi.org\/10.1007\/s10994-022-06186-9","relation":{},"ISSN":["0885-6125","1573-0565"],"issn-type":[{"type":"print","value":"0885-6125"},{"type":"electronic","value":"1573-0565"}],"subject":[],"published":{"date-parts":[[2022,6,14]]},"assertion":[{"value":"8 July 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 April 2022","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 April 2022","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 June 2022","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"This article does not contain any studies with human participants performed by any of the authors.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval"}},{"value":"Informed consent was obtained from all individual participants included in the study.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to participate"}},{"value":"Consent for publication was obtained from the participants.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}},{"value":"This content has been made available to all.","name":"free","label":"Free to read"}]}}