{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T01:18:01Z","timestamp":1743038281967,"version":"3.40.3"},"publisher-location":"Cham","reference-count":43,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030953904"},{"type":"electronic","value":"9783030953911"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-030-95391-1_47","type":"book-chapter","created":{"date-parts":[[2022,2,22]],"date-time":"2022-02-22T09:04:54Z","timestamp":1645520694000},"page":"754-771","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Why is Your Trojan NOT Responding? A\u00a0Quantitative Analysis of\u00a0Failures in\u00a0Backdoor Attacks of\u00a0Neural Networks"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5963-3513","authenticated-orcid":false,"given":"Xingbo","family":"Hu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5172-9497","authenticated-orcid":false,"given":"Yibing","family":"Lan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4728-8000","authenticated-orcid":false,"given":"Ruimin","family":"Gao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6388-2571","authenticated-orcid":false,"given":"Guozhu","family":"Meng","sequence":"additional","affiliation":[]},{"given":"Kai","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,2,23]]},"reference":[{"issue":"5","key":"47_CR1","first-page":"2106","volume":"18","author":"A Agarwal","year":"2021","unstructured":"Agarwal, A., Singh, R., Vatsa, M., Ratha, N.: Image transformation-based defense against adversarial perturbation on deep learning models. IEEE Trans. Dependable Secure Comput. 18(5), 2106\u20132121 (2021)","journal-title":"IEEE Trans. Dependable Secure Comput."},{"key":"47_CR2","unstructured":"Bagdasaryan, E., Shmatikov, V.: Blind backdoors in deep learning models. arXiv abs\/2005.03823 (2020)"},{"key":"47_CR3","unstructured":"Chen, X., Liu, C., Li, B., Lu, K., Song, D.: Targeted backdoor attacks on deep learning systems using data poisoning. CoRR abs\/1712.05526 (2017)"},{"key":"47_CR4","doi-asserted-by":"crossref","unstructured":"Cheng, S., Liu, Y., Ma, S., Zhang, X.: Deep feature space trojan attack of neural networks by controlled detoxification. In: AAAI, pp. 1148\u20131156 (2021)","DOI":"10.1609\/aaai.v35i2.16201"},{"key":"47_CR5","doi-asserted-by":"crossref","unstructured":"Doan, B.G., Abbasnejad, E., Ranasinghe, D.C.: Februus: input purification defense against trojan attacks on deep neural network systems. In: ACSAC 2020: Annual Computer Security Applications Conference, Virtual Event\/Austin, TX, USA, 7\u201311 December 2020, pp. 897\u2013912. ACM (2020)","DOI":"10.1145\/3427228.3427264"},{"key":"47_CR6","doi-asserted-by":"crossref","unstructured":"Dumford, J., Scheirer, W.: Backdooring convolutional neural networks via targeted weight perturbations. In: 2020 IEEE International Joint Conference on Biometrics (IJCB), pp. 1\u20139 (2020)","DOI":"10.1109\/IJCB48548.2020.9304875"},{"key":"47_CR7","doi-asserted-by":"crossref","unstructured":"Gao, Y., Xu, C., Wang, D., Chen, S., Ranasinghe, D.C., Nepal, S.: STRIP: a defence against trojan attacks on deep neural networks. In: Balenson, D. (ed.) ACSAC, pp. 113\u2013125. ACM (2019)","DOI":"10.1145\/3359789.3359790"},{"key":"47_CR8","unstructured":"Gu, T., Dolan-Gavitt, B., Garg, S.: Badnets: identifying vulnerabilities in the machine learning model supply chain. CoRR abs\/1708.06733 (2017)"},{"key":"47_CR9","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: IEEE CVPR, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"47_CR10","unstructured":"He, Y., Meng, G., Chen, K., He, J., Hu, X.: Deepobliviate: a powerful charm for erasing data residual memory in deep neural networks. CoRR abs\/2105.06209 (2021). https:\/\/arxiv.org\/abs\/2105.06209"},{"key":"47_CR11","unstructured":"He, Y., Meng, G., Chen, K., He, J., Hu, X.: DRMI: a dataset reduction technology based on mutual information for black-box attacks. In: Proceedings of the 30th USENIX Security Symposium (USENIX), August 2021"},{"key":"47_CR12","doi-asserted-by":"publisher","unstructured":"He, Y., Meng, G., Chen, K., Hu, X., He, J.: Towards security threats of deep learning systems: a survey, pp. 1\u201328 (2020). https:\/\/doi.org\/10.1109\/TSE.2020.3034721","DOI":"10.1109\/TSE.2020.3034721"},{"key":"47_CR13","doi-asserted-by":"crossref","unstructured":"Kolouri, S., Saha, A., Pirsiavash, H., Hoffmann, H.: Universal litmus patterns: revealing backdoor attacks in CNNs. In: CVPR, pp. 298\u2013307. Computer Vision Foundation\/IEEE (2020)","DOI":"10.1109\/CVPR42600.2020.00038"},{"issue":"11","key":"47_CR14","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y Lecun","year":"1998","unstructured":"Lecun, Y., Bottou, L., Bengio, Y., Haffner, P.: Gradient-based learning applied to document recognition. Proc. IEEE 86(11), 2278\u20132324 (1998)","journal-title":"Proc. IEEE"},{"key":"47_CR15","unstructured":"LeCun, Y.: The MNIST database of handwritten digits (2017). http:\/\/yann.lecun.com\/exdb\/mnist\/"},{"key":"47_CR16","first-page":"2088","volume":"18","author":"S Li","year":"2021","unstructured":"Li, S., Xue, M., Zhao, B.Z.H., Zhu, H., Zhang, X.: Invisible backdoor attacks on deep neural networks via steganography and regularization. IEEE Trans. Dependable Secure Comput. 18, 2088\u20132105 (2021)","journal-title":"IEEE Trans. Dependable Secure Comput."},{"key":"47_CR17","unstructured":"Li, Y., Zhai, T., Wu, B., Jiang, Y., Li, Z., Xia, S.: Rethinking the trigger of backdoor attack. arXiv abs\/2004.04692 (2020)"},{"key":"47_CR18","doi-asserted-by":"crossref","unstructured":"Li, Y., Li, Y., Wu, B., Li, L., He, R., Lyu, S.: Backdoor attack with sample-specific triggers. arXiv abs\/2012.03816 (2020)","DOI":"10.1109\/ICCV48922.2021.01615"},{"key":"47_CR19","doi-asserted-by":"crossref","unstructured":"Lin, J., Xu, L., Liu, Y., Zhang, X.: Composite backdoor attack for deep neural network by mixing existing benign features. In: CCS (2020)","DOI":"10.1145\/3372297.3423362"},{"key":"47_CR20","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"273","DOI":"10.1007\/978-3-030-00470-5_13","volume-title":"Research in Attacks, Intrusions, and Defenses","author":"K Liu","year":"2018","unstructured":"Liu, K., Dolan-Gavitt, B., Garg, S.: Fine-pruning: defending against backdooring attacks on deep neural networks. In: Bailey, M., Holz, T., Stamatogiannakis, M., Ioannidis, S. (eds.) RAID 2018. LNCS, vol. 11050, pp. 273\u2013294. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-00470-5_13"},{"key":"47_CR21","doi-asserted-by":"crossref","unstructured":"Liu, Y., Lee, W., Tao, G., Ma, S., Aafer, Y., Zhang, X.: ABS: scanning neural networks for back-doors by artificial brain stimulation. In: Cavallaro, L., Kinder, J., Wang, X., Katz, J. (eds.) CCS, pp. 1265\u20131282. ACM (2019)","DOI":"10.1145\/3319535.3363216"},{"key":"47_CR22","doi-asserted-by":"crossref","unstructured":"Liu, Y., et al.: Trojaning attack on neural networks. In: NDSS. The Internet Society (2018)","DOI":"10.14722\/ndss.2018.23291"},{"key":"47_CR23","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"182","DOI":"10.1007\/978-3-030-58607-2_11","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y Liu","year":"2020","unstructured":"Liu, Y., Ma, X., Bailey, J., Lu, F.: Reflection backdoor: a natural backdoor attack on deep neural networks. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12355, pp. 182\u2013199. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58607-2_11"},{"key":"47_CR24","unstructured":"Murphy, K.P.: Machine Learning - A Probabilistic Perspective. Adaptive Computation and Machine Learning Series. MIT Press, Cambridge (2012)"},{"key":"47_CR25","unstructured":"Neuroinformatik, I.F.: German Traffic Sign Detection Benchmark (GTSRB) (2019). https:\/\/benchmark.ini.rub.de\/"},{"key":"47_CR26","doi-asserted-by":"publisher","first-page":"12","DOI":"10.1186\/s13635-020-00104-z","volume":"2020","author":"C Pasquini","year":"2020","unstructured":"Pasquini, C., B\u00f6hme, R.: Trembling triggers: exploring the sensitivity of backdoors in DNN-based face recognition. EURASIP J. Inf. Secur. 2020, 12 (2020)","journal-title":"EURASIP J. Inf. Secur."},{"key":"47_CR27","doi-asserted-by":"crossref","unstructured":"Quiring, E., Rieck, K.: Backdooring and poisoning neural networks with image-scaling attacks. In: 2020 IEEE Security and Privacy Workshops (SPW), pp. 41\u201347 (2020)","DOI":"10.1109\/SPW50608.2020.00024"},{"key":"47_CR28","doi-asserted-by":"crossref","unstructured":"Rakin, A.S., He, Z., Fan, D.: TBT: targeted neural network attack with bit trojan. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 13195\u201313204 (2020)","DOI":"10.1109\/CVPR42600.2020.01321"},{"key":"47_CR29","doi-asserted-by":"crossref","unstructured":"Saha, A., Subramanya, A., Pirsiavash, H.: Hidden trigger backdoor attacks. In: AAAI (2020)","DOI":"10.1609\/aaai.v34i07.6871"},{"key":"47_CR30","doi-asserted-by":"publisher","first-page":"60","DOI":"10.1186\/s40537-019-0197-0","volume":"6","author":"C Shorten","year":"2019","unstructured":"Shorten, C., Khoshgoftaar, T.M.: A survey on image data augmentation for deep learning. J. Big Data 6, 60 (2019)","journal-title":"J. Big Data"},{"key":"47_CR31","unstructured":"Smilkov, D., Thorat, N., Kim, B., Vi\u00e9gas, F., Wattenberg, M.: Smoothgrad: removing noise by adding noise (2017)"},{"key":"47_CR32","unstructured":"Sundararajan, M., Taly, A., Yan, Q.: Axiomatic attribution for deep networks. In: Precup, D., Teh, Y.W. (eds.) Proceedings of the 34th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 70, pp. 3319\u20133328. PMLR (2017). https:\/\/proceedings.mlr.press\/v70\/sundararajan17a.html"},{"key":"47_CR33","unstructured":"Szegedy, C., et al.: Intriguing properties of neural networks. In: Bengio, Y., LeCun, Y. (eds.) ICLR (2014)"},{"key":"47_CR34","doi-asserted-by":"crossref","unstructured":"Tang, R., Du, M., Liu, N., Yang, F., Hu, X.: An embarrassingly simple approach for trojan attack in deep neural networks. In: KDD (2020)","DOI":"10.1145\/3394486.3403064"},{"key":"47_CR35","unstructured":"Turner, A., Tsipras, D., Madry, A.: Label-consistent backdoor attacks. arXiv abs\/1912.02771 (2019)"},{"key":"47_CR36","doi-asserted-by":"crossref","unstructured":"Wang, B., et al.: Neural cleanse: identifying and mitigating backdoor attacks in neural networks. In: 2019 IEEE Symposium on Security and Privacy, SP 2019, San Francisco, CA, USA, 19\u201323 May 2019, pp. 707\u2013723. IEEE (2019)","DOI":"10.1109\/SP.2019.00031"},{"key":"47_CR37","unstructured":"Weng, C.H., Lee, Y.T., Wu, S.H.: On the trade-off between adversarial and backdoor robustness. In: NeurIPS (2020)"},{"key":"47_CR38","doi-asserted-by":"crossref","unstructured":"Wenger, E., Passananti, J., Bhagoji, A.N., Yao, Y., Zheng, H., Zhao, B.Y.: Backdoor attacks against deep learning systems in the physical world. In: CVPR, pp. 6206\u20136215. Computer Vision Foundation\/IEEE (2021)","DOI":"10.1109\/CVPR46437.2021.00614"},{"key":"47_CR39","unstructured":"Xiao, Q., Chen, Y., Shen, C., Chen, Y., Li, K.: Seeing is not believing: camouflage attacks on image scaling algorithms. In: USENIX Security Symposium (2019)"},{"key":"47_CR40","doi-asserted-by":"crossref","unstructured":"Xue, M., He, C., Sun, S., Wang, J., Liu, W.: Robust backdoor attacks against deep neural networks in real physical world. arXiv abs\/2104.07395 (2021)","DOI":"10.1109\/TrustCom53373.2021.00093"},{"key":"47_CR41","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"101","DOI":"10.1007\/978-3-030-42921-8_6","volume-title":"Information Security and Cryptology","author":"M Zha","year":"2020","unstructured":"Zha, M., Meng, G., Lin, C., Zhou, Z., Chen, K.: RoLMA: a practical adversarial attack against deep learning-based LPR systems. In: Liu, Z., Yung, M. (eds.) Inscrypt 2019. LNCS, vol. 12020, pp. 101\u2013117. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-42921-8_6"},{"key":"47_CR42","doi-asserted-by":"crossref","unstructured":"Zhao, S., Ma, X., Zheng, X., Bailey, J., Chen, J., Jiang, Y.: Clean-label backdoor attacks on video recognition models. In: CVPR, pp. 14431\u201314440. Computer Vision Foundation\/IEEE (2020)","DOI":"10.1109\/CVPR42600.2020.01445"},{"key":"47_CR43","doi-asserted-by":"crossref","unstructured":"Zhao, Y., Zhu, H., Liang, R., Shen, Q., Zhang, S., Chen, K.: Seeing isn\u2019t believing: towards more robust adversarial attack against real world object detectors. In: Cavallaro, L., Kinder, J., Wang, X., Katz, J. (eds.) CCS, pp. 1989\u20132004. ACM (2019)","DOI":"10.1145\/3319535.3354259"}],"container-title":["Lecture Notes in Computer Science","Algorithms and Architectures for Parallel Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-95391-1_47","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,27]],"date-time":"2023-01-27T17:28:07Z","timestamp":1674840487000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-95391-1_47"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783030953904","9783030953911"],"references-count":43,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-95391-1_47","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"23 February 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICA3PP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Algorithms and Architectures for Parallel Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2021","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"3 December 2021","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 December 2021","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ica3pp2021","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/nsclab.org\/ica3pp2021\/index.html","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"403","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"145","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"36% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.12","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.27","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}