{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T05:05:09Z","timestamp":1750309509544,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":57,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T00:00:00Z","timestamp":1730073600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"the Beijing Natural Science Foundation","award":["IS23055"],"award-info":[{"award-number":["IS23055"]}]},{"name":"the National Key R&D Program of China","award":["2023YFB2703800"],"award-info":[{"award-number":["2023YFB2703800"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,10,28]]},"DOI":"10.1145\/3664647.3680580","type":"proceedings-article","created":{"date-parts":[[2024,10,26]],"date-time":"2024-10-26T06:59:41Z","timestamp":1729925981000},"page":"9435-9444","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["Embracing Adaptation: An Effective Dynamic Defense Strategy Against Adversarial Examples"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5216-9946","authenticated-orcid":false,"given":"Shenglin","family":"Yin","sequence":"first","affiliation":[{"name":"School of Computer Science, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4891-3197","authenticated-orcid":false,"given":"Kelu","family":"Yao","sequence":"additional","affiliation":[{"name":"Zhejiang Lab &amp; Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6784-9709","authenticated-orcid":false,"given":"Zhen","family":"Xiao","sequence":"additional","affiliation":[{"name":"School of Computer Science, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-4646-7131","authenticated-orcid":false,"given":"Jieyi","family":"Long","sequence":"additional","affiliation":[{"name":"Theta Labs, Inc., San Jose, USA"}]}],"member":"320","published-online":{"date-parts":[[2024,10,28]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i6.20545"},{"key":"e_1_3_2_1_2_1","volume-title":"UK","author":"Andriushchenko Maksym","year":"2020","unstructured":"Maksym Andriushchenko, Francesco Croce, Nicolas Flammarion, and Matthias Hein. 2020. Square attack: a query-efficient black-box adversarial attack via random search. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXIII. Springer, 484--501."},{"key":"e_1_3_2_1_3_1","volume-title":"International conference on machine learning. PMLR, 274--283","author":"Athalye Anish","year":"2018","unstructured":"Anish Athalye, Nicholas Carlini, and David Wagner. 2018. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. In International conference on machine learning. PMLR, 274--283."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00488"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00072"},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/3394486.3403225"},{"key":"e_1_3_2_1_8_1","volume-title":"Towards robust neural networks via close-loop control. arXiv preprint arXiv:2102.01862","author":"Chen Zhuotong","year":"2021","unstructured":"Zhuotong Chen, Qianxiao Li, and Zheng Zhang. 2021. Towards robust neural networks via close-loop control. arXiv preprint arXiv:2102.01862 (2021)."},{"key":"e_1_3_2_1_9_1","volume-title":"International Conference on Machine Learning. PMLR, 1310--1320","author":"Cohen Jeremy","year":"2019","unstructured":"Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. 2019. Certified adversarial robustness via randomized smoothing. In International Conference on Machine Learning. PMLR, 1310--1320."},{"key":"e_1_3_2_1_10_1","volume-title":"Robustbench: a standardized adversarial robustness benchmark. arXiv preprint arXiv:2010.09670","author":"Croce Francesco","year":"2020","unstructured":"Francesco Croce, Maksym Andriushchenko, Vikash Sehwag, Edoardo Debenedetti, Nicolas Flammarion, Mung Chiang, Prateek Mittal, and Matthias Hein. 2020. Robustbench: a standardized adversarial robustness benchmark. arXiv preprint arXiv:2010.09670 (2020)."},{"key":"e_1_3_2_1_11_1","volume-title":"International Conference on Machine Learning. PMLR, 4421--4435","author":"Croce Francesco","year":"2022","unstructured":"Francesco Croce, Sven Gowal, Thomas Brunner, Evan Shelhamer, Matthias Hein, and Taylan Cemgil. 2022. Evaluating the adversarial robustness of adaptive test-time defenses. In International Conference on Machine Learning. PMLR, 4421--4435."},{"key":"e_1_3_2_1_12_1","volume-title":"International Conference on Machine Learning. PMLR, 2196--2205","author":"Croce Francesco","year":"2020","unstructured":"Francesco Croce and Matthias Hein. 2020. Minimally distorted adversarial examples with a fast adaptive boundary attack. In International Conference on Machine Learning. PMLR, 2196--2205."},{"key":"e_1_3_2_1_13_1","volume-title":"International conference on machine learning. PMLR, 2206--2216","author":"Croce Francesco","year":"2020","unstructured":"Francesco Croce and Matthias Hein. 2020. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In International conference on machine learning. PMLR, 2206--2216."},{"key":"e_1_3_2_1_14_1","volume-title":"3rd International Conference on Learning Representations, ICLR 2015 - Conference Track Proceedings. arXiv:1412","author":"Goodfellow Ian J.","year":"2015","unstructured":"Ian J. Goodfellow, Jonathon Shlens, and Christian Szegedy. 2015. Explaining and harnessing adversarial examples. In 3rd International Conference on Learning Representations, ICLR 2015 - Conference Track Proceedings. arXiv:1412.6572"},{"key":"e_1_3_2_1_15_1","volume-title":"Semi-supervised Learning by Entropy Minimization. Neural Information Processing Systems","author":"Grandvalet Yves","year":"2004","unstructured":"Yves Grandvalet and Yoshua Bengio. 2004. Semi-supervised Learning by Entropy Minimization. Neural Information Processing Systems (2004)."},{"key":"e_1_3_2_1_16_1","volume-title":"International conference on machine learning. PMLR, 1321--1330","author":"Guo Chuan","year":"2017","unstructured":"Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian QWeinberger. 2017. On calibration of modern neural networks. In International conference on machine learning. PMLR, 1321--1330."},{"key":"e_1_3_2_1_17_1","volume-title":"International Conference on Machine Learning. PMLR, 3976--3987","author":"G\u00fcrel Nezihe Merve","year":"2021","unstructured":"Nezihe Merve G\u00fcrel, Xiangyu Qi, Luka Rimanic, Ce Zhang, and Bo Li. 2021. Knowledge enhanced machine learning pipeline against diverse adversarial attacks. In International Conference on Machine Learning. PMLR, 3976--3987."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_1_19_1","volume-title":"International Conference on Machine Learning. PMLR, 2712--2721","author":"Hendrycks Dan","year":"2019","unstructured":"Dan Hendrycks, Kimin Lee, and Mantas Mazeika. 2019. Using pre-training can improve model robustness and uncertainty. In International Conference on Machine Learning. PMLR, 2712--2721."},{"key":"e_1_3_2_1_20_1","volume-title":"Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531","author":"Hinton Geoffrey","year":"2015","unstructured":"Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. 2015. Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531 (2015)."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR56361.2022.9956200"},{"key":"e_1_3_2_1_22_1","first-page":"2427","article-title":"Test-time classifier adjustment module for model-agnostic domain generalization","volume":"34","author":"Iwasawa Yusuke","year":"2021","unstructured":"Yusuke Iwasawa and Yutaka Matsuo. 2021. Test-time classifier adjustment module for model-agnostic domain generalization. Advances in Neural Information Processing Systems 34 (2021), 2427--2440.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01304"},{"key":"e_1_3_2_1_24_1","volume-title":"Hauptmann","author":"Kang Guoliang","year":"2019","unstructured":"Guoliang Kang, Lu Jiang, Yi Yang, and Alexander G. Hauptmann. 2019. Contrastive Adaptation Network for Unsupervised Domain Adaptation. Cornell University - arXiv (2019)."},{"key":"e_1_3_2_1_25_1","first-page":"14925","article-title":"Stable neural ode with lyapunov-stable equilibrium points for defending against adversarial attacks","volume":"34","author":"Kang Qiyu","year":"2021","unstructured":"Qiyu Kang, Yang Song, Qinxu Ding, and Wee Peng Tay. 2021. Stable neural ode with lyapunov-stable equilibrium points for defending against adversarial attacks. Advances in Neural Information Processing Systems 34 (2021), 14925--14937.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_26_1","volume-title":"Gomes","author":"Krause Andreas","year":"2010","unstructured":"Andreas Krause, Pietro Perona, and Ryan G. Gomes. 2010. Discriminative Clustering by Regularized Information Maximization. Neural Information Processing Systems (2010)."},{"key":"e_1_3_2_1_27_1","unstructured":"Alex Krizhevsky Geoffrey Hinton et al. 2009. Learning multiple layers of features from tiny images. (2009)."},{"key":"e_1_3_2_1_28_1","unstructured":"Jonghyun Lee Dahuin Jung Junho Yim and Sungroh Yoon. 2022. Confidence Score for Source-Free Unsupervised Domain Adaptation."},{"key":"e_1_3_2_1_29_1","volume-title":"A Simple Unified Framework for Detecting Out-of-Distribution Samples and Adversarial Attacks. Neural Information Processing Systems","author":"Lee Kimin","year":"2018","unstructured":"Kimin Lee, Kibok Lee, Honglak Lee, and Jinwoo Shin. 2018. A Simple Unified Framework for Detecting Out-of-Distribution Samples and Adversarial Attacks. Neural Information Processing Systems (2018)."},{"key":"e_1_3_2_1_30_1","volume-title":"Certified adversarial robustness with additive noise. arXiv preprint arXiv:1809.03113","author":"Li Bai","year":"2018","unstructured":"Bai Li, Changyou Chen, Wenlin Wang, and Lawrence Carin. 2018. Certified adversarial robustness with additive noise. arXiv preprint arXiv:1809.03113 (2018)."},{"key":"e_1_3_2_1_31_1","volume-title":"Do We Really Need to Access the Source Data? Source Hypothesis Transfer for Unsupervised Domain Adaptation. arXiv: Computer Vision and Pattern Recognition","author":"Liang Jian","year":"2020","unstructured":"Jian Liang, Dapeng Hu, and Jiashi Feng. 2020. Do We Really Need to Access the Source Data? Source Hypothesis Transfer for Unsupervised Domain Adaptation. arXiv: Computer Vision and Pattern Recognition (2020)."},{"key":"e_1_3_2_1_32_1","volume-title":"6th International Conference on Learning Representations, ICLR 2018 - Conference Track Proceedings.","author":"Madry Aleksander","year":"2018","unstructured":"Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. 2018. Towards deep learning models resistant to adversarial attacks. In 6th International Conference on Learning Representations, ICLR 2018 - Conference Track Proceedings."},{"key":"e_1_3_2_1_33_1","volume-title":"International Conference on Machine Learning. PMLR, 6640--6650","author":"Maini Pratyush","year":"2020","unstructured":"Pratyush Maini, Eric Wong, and Zico Kolter. 2020. Adversarial robustness against the union of multiple perturbation models. In International Conference on Machine Learning. PMLR, 6640--6650."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00070"},{"key":"e_1_3_2_1_35_1","volume-title":"International conference on machine learning. PMLR, 16888--16905","author":"Niu Shuaicheng","year":"2022","unstructured":"Shuaicheng Niu, Jiaxiang Wu, Yifan Zhang, Yaofo Chen, Shijian Zheng, Peilin Zhao, and Mingkui Tan. 2022. Efficient test-time model adaptation without forgetting. In International conference on machine learning. PMLR, 16888--16905."},{"key":"e_1_3_2_1_36_1","volume-title":"Towards Stable Test-Time Adaptation in Dynamic Wild World. arXiv preprint arXiv:2302.12400","author":"Niu Shuaicheng","year":"2023","unstructured":"Shuaicheng Niu, Jiaxiang Wu, Yifan Zhang, Zhiquan Wen, Yaofo Chen, Peilin Zhao, and Mingkui Tan. 2023. Towards Stable Test-Time Adaptation in Dynamic Wild World. arXiv preprint arXiv:2302.12400 (2023)."},{"key":"e_1_3_2_1_37_1","volume-title":"International Conference on Machine Learning. PMLR, 7717--7727","author":"Pinot Rafael","year":"2020","unstructured":"Rafael Pinot, Raphael Ettedgui, Geovani Rizk, Yann Chevaleyre, and Jamal Atif. 2020. Randomization matters How to defend against strong adversarial attacks. In International Conference on Machine Learning. PMLR, 7717--7727."},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00844"},{"key":"e_1_3_2_1_39_1","volume-title":"International Conference on Machine Learning. PMLR, 8093--8104","author":"Rice Leslie","year":"2020","unstructured":"Leslie Rice, Eric Wong, and Zico Kolter. 2020. Overfitting in adversarially robust deep learning. In International Conference on Machine Learning. PMLR, 8093--8104."},{"key":"e_1_3_2_1_40_1","volume-title":"Robust learning meets generative models: Can proxy distributions improve adversarial robustness? arXiv preprint arXiv:2104.09425","author":"Sehwag Vikash","year":"2021","unstructured":"Vikash Sehwag, Saeed Mahloujifar, Tinashe Handina, Sihui Dai, Chong Xiang, Mung Chiang, and Prateek Mittal. 2021. Robust learning meets generative models: Can proxy distributions improve adversarial robustness? arXiv preprint arXiv:2104.09425 (2021)."},{"key":"e_1_3_2_1_41_1","volume-title":"International Conference on Learning Representations","author":"Shi Changhao","year":"2021","unstructured":"Changhao Shi, Chester Holtz, and Gal Mishne. 2021. Online Adversarial Purification based on Self-supervised Learning. International Conference on Learning Representations (2021)."},{"key":"e_1_3_2_1_42_1","volume-title":"Test-Time Training with Self-Supervision for Generalization under Distribution Shifts. arXiv: Learning","author":"Sun Yu","year":"2019","unstructured":"Yu Sun, Xiaolong Wang, Zhuang Liu, John J. Miller, Alexei A. Efros, and Moritz Hardt. 2019. Test-Time Training with Self-Supervision for Generalization under Distribution Shifts. arXiv: Learning (2019)."},{"key":"e_1_3_2_1_43_1","volume-title":"Intriguing properties of neural networks. arXiv preprint arXiv:1312.6199","author":"Szegedy Christian","year":"2013","unstructured":"Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. 2013. Intriguing properties of neural networks. arXiv preprint arXiv:1312.6199 (2013)."},{"key":"e_1_3_2_1_44_1","volume-title":"International Conference on Learning Representations","author":"Tarvainen Antti","year":"2017","unstructured":"Antti Tarvainen and Harri Valpola. 2017. Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. International Conference on Learning Representations (2017)."},{"key":"e_1_3_2_1_45_1","volume-title":"ADVENT: Adversarial Entropy Minimization for Domain Adaptation in Semantic Segmentation. arXiv: Computer Vision and Pattern Recognition","author":"Vu Tuan-Hung","year":"2018","unstructured":"Tuan-Hung Vu, Himalaya Jain, Maxime Bucher, Matthieu Cord, and Patrick P\u00e9rez. 2018. ADVENT: Adversarial Entropy Minimization for Domain Adaptation in Semantic Segmentation. arXiv: Computer Vision and Pattern Recognition (2018)."},{"key":"e_1_3_2_1_46_1","volume-title":"Fighting Gradients with Gradients: Dynamic Defenses against Adversarial Attacks. arXiv preprint arXiv:2105.08714","author":"Wang Dequan","year":"2021","unstructured":"Dequan Wang, An Ju, Evan Shelhamer, David Wagner, and Trevor Darrell. 2021. Fighting Gradients with Gradients: Dynamic Defenses against Adversarial Attacks. arXiv preprint arXiv:2105.08714 (2021)."},{"key":"e_1_3_2_1_47_1","volume-title":"Tent: Fully test-time adaptation by entropy minimization. arXiv preprint arXiv:2006.10726","author":"Wang Dequan","year":"2020","unstructured":"Dequan Wang, Evan Shelhamer, Shaoteng Liu, Bruno Olshausen, and Trevor Darrell. 2020. Tent: Fully test-time adaptation by entropy minimization. arXiv preprint arXiv:2006.10726 (2020)."},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00552"},{"key":"e_1_3_2_1_49_1","volume-title":"Luc Van Gool, and Dengxin Dai","author":"Wang Qin","year":"2023","unstructured":"Qin Wang, Olga Fink, Luc Van Gool, and Dengxin Dai. 2023. Continual Test-Time Domain Adaptation."},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00706"},{"key":"e_1_3_2_1_51_1","first-page":"2958","article-title":"Adversarial weight perturbation helps robust generalization","volume":"33","author":"Wu Dongxian","year":"2020","unstructured":"Dongxian Wu, Shu-Tao Xia, and Yisen Wang. 2020. Adversarial weight perturbation helps robust generalization. Advances in Neural Information Processing Systems 33 (2020), 2958--2969.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.1145\/3447548.3467079"},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/80"},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/3580305.3599241"},{"key":"e_1_3_2_1_55_1","volume-title":"International Conference on Machine Learning. PMLR, 12062--12072","author":"Yoon Jongmin","year":"2021","unstructured":"Jongmin Yoon, Sung Ju Hwang, and Juho Lee. 2021. Adversarial purification with score-based generative models. In International Conference on Machine Learning. PMLR, 12062--12072."},{"key":"e_1_3_2_1_56_1","volume-title":"International conference on machine learning. PMLR, 11278--11287","author":"Zhang Jingfeng","year":"2020","unstructured":"Jingfeng Zhang, Xilie Xu, Bo Han, Gang Niu, Lizhen Cui, Masashi Sugiyama, and Mohan Kankanhalli. 2020. Attacks which do not kill training make adversarial learning stronger. In International conference on machine learning. PMLR, 11278--11287."},{"key":"e_1_3_2_1_57_1","volume-title":"Kankanhalli","author":"Zhang Jingfeng","year":"2020","unstructured":"Jingfeng Zhang, Jianing Zhu, Gang Niu, Bo Han, Masashi Sugiyama, and Mohan S. Kankanhalli. 2020. Geometry-aware Instance-reweighted Adversarial Training. arXiv: Learning (2020)."}],"event":{"name":"MM '24: The 32nd ACM International Conference on Multimedia","sponsor":["SIGMM ACM Special Interest Group on Multimedia"],"location":"Melbourne VIC Australia","acronym":"MM '24"},"container-title":["Proceedings of the 32nd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3680580","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3664647.3680580","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T01:17:56Z","timestamp":1750295876000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3680580"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,28]]},"references-count":57,"alternative-id":["10.1145\/3664647.3680580","10.1145\/3664647"],"URL":"https:\/\/doi.org\/10.1145\/3664647.3680580","relation":{},"subject":[],"published":{"date-parts":[[2024,10,28]]},"assertion":[{"value":"2024-10-28","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}