{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T23:16:35Z","timestamp":1743117395259,"version":"3.40.3"},"publisher-location":"Cham","reference-count":33,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031533105"},{"type":"electronic","value":"9783031533112"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-53311-2_18","type":"book-chapter","created":{"date-parts":[[2024,1,27]],"date-time":"2024-01-27T21:37:36Z","timestamp":1706391456000},"page":"242-256","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Exploring Imperceptible Adversarial Examples in\u00a0$$YC_bC_r$$ Color Space"],"prefix":"10.1007","author":[{"given":"Pei","family":"Chen","sequence":"first","affiliation":[]},{"given":"Zhiyong","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Meng","family":"Xing","sequence":"additional","affiliation":[]},{"given":"Yiming","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Jinqing","family":"Zheng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,1,28]]},"reference":[{"key":"18_CR1","doi-asserted-by":"publisher","first-page":"7338","DOI":"10.1109\/TIP.2022.3204206","volume":"31","author":"A Agarwal","year":"2022","unstructured":"Agarwal, A., et al.: Crafting adversarial perturbations via transformed image component swapping. IEEE Trans. Image Process. 31, 7338\u20137349 (2022)","journal-title":"IEEE Trans. Image Process."},{"key":"18_CR2","doi-asserted-by":"crossref","unstructured":"Carlini, N., Wagner, D.: Towards evaluating the robustness of neural networks. In: 2017 IEEE Symposium On Security and Privacy (sp), pp. 39\u201357. IEEE (2017)","DOI":"10.1109\/SP.2017.49"},{"key":"18_CR3","unstructured":"Croce, F., Hein, M.: Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In: International Conference On Machine Learning, pp. 2206\u20132216. PMLR (2020)"},{"key":"18_CR4","doi-asserted-by":"crossref","unstructured":"Das, N., et al.: Shield: Fast, practical defense and vaccination for deep learning using jpeg compression. In: Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pp. 196\u2013204 (2018)","DOI":"10.1145\/3219819.3219910"},{"key":"18_CR5","doi-asserted-by":"crossref","unstructured":"Dong, Y., et al.: Boosting adversarial attacks with momentum. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 9185\u20139193 (2018)","DOI":"10.1109\/CVPR.2018.00957"},{"key":"18_CR6","doi-asserted-by":"crossref","unstructured":"Duan, R., Chen, Y., Niu, D., Yang, Y., Qin, A.K., He, Y.: Advdrop: adversarial attack to dnns by dropping information. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 7506\u20137515 (2021)","DOI":"10.1109\/ICCV48922.2021.00741"},{"key":"18_CR7","doi-asserted-by":"crossref","unstructured":"Duan, Ret al.: Adversarial laser beam: Effective physical-world attack to dnns in a blink. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16062\u201316071 (2021)","DOI":"10.1109\/CVPR46437.2021.01580"},{"key":"18_CR8","doi-asserted-by":"crossref","unstructured":"Engstrom, L., Ilyas, A., Salman, H., Santurkar, S., Tsipras, D.: Robustness (python library). https:\/\/github.com\/MadryLab\/robustness4(4), 4-3 (2019)","DOI":"10.23915\/distill.00019.7"},{"key":"18_CR9","unstructured":"Goodfellow, I.J., Shlens, J., Szegedy, C.: Explaining and harnessing adversarial examples. arXiv preprint arXiv:1412.6572 (2014)"},{"key":"18_CR10","unstructured":"Guo, C., Frank, J.S., Weinberger, K.Q.: Low frequency adversarial perturbation. arXiv preprint arXiv:1809.08758 (2018)"},{"key":"18_CR11","unstructured":"Guo, C., Rana, M., Cisse, M., Van Der Maaten, L.: Countering adversarial images using input transformations. arXiv preprint arXiv:1711.00117 (2017)"},{"key":"18_CR12","first-page":"975","volume":"33","author":"Q Guo","year":"2020","unstructured":"Guo, Q., et al.: Watch out! motion is blurring the vision of your deep neural networks. Adv. Neural. Inf. Process. Syst. 33, 975\u2013985 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"18_CR13","unstructured":"Heusel, M., et al.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"18_CR14","doi-asserted-by":"crossref","unstructured":"Hosseini, H., Poovendran, R.: Semantic adversarial examples. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 1614\u20131619 (2018)","DOI":"10.1109\/CVPRW.2018.00212"},{"key":"18_CR15","doi-asserted-by":"publisher","unstructured":"Hu, Q., Liu, D., Hu, W.: Exploring the devil in graph spectral domain for 3D point cloud attacks. In: European Conference on Computer Vision, pp. 229\u2013248. Springer (2022). https:\/\/doi.org\/10.1007\/978-3-031-20062-5_14","DOI":"10.1007\/978-3-031-20062-5_14"},{"key":"18_CR16","doi-asserted-by":"crossref","unstructured":"Jiang, L., Dai, B., Wu, W., Loy, C.C.: Focal frequency loss for image reconstruction and synthesis. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 13919\u201313929 (2021)","DOI":"10.1109\/ICCV48922.2021.01366"},{"key":"18_CR17","unstructured":"Krizhevsky, A., Hinton, G., et al.: Learning multiple layers of features from tiny images (2009)"},{"key":"18_CR18","unstructured":"Kurakin, A., Goodfellow, I., Bengio, S.: Adversarial machine learning at scale. arXiv preprint arXiv:1611.01236 (2016)"},{"key":"18_CR19","doi-asserted-by":"crossref","unstructured":"Li, Y., Liu, B.: Improved edge detection algorithm for canny operator. In: 2022 IEEE 10th Joint International Information Technology and Artificial Intelligence Conference (ITAIC), vol. 10, pp. 1\u20135. IEEE (2022)","DOI":"10.1109\/ITAIC54216.2022.9836608"},{"key":"18_CR20","doi-asserted-by":"crossref","unstructured":"Luo, C., Lin, Q., Xie, W., Wu, B., Xie, J., Shen, L.: Frequency-driven imperceptible adversarial attack on semantic similarity. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15315\u201315324 (2022)","DOI":"10.1109\/CVPR52688.2022.01488"},{"key":"18_CR21","unstructured":"Madry, A., Makelov, A., Schmidt, L., Tsipras, D., Vladu, A.: Towards deep learning models resistant to adversarial attacks. arXiv preprint arXiv:1706.06083 (2017)"},{"issue":"10","key":"18_CR22","doi-asserted-by":"publisher","first-page":"2452","DOI":"10.1109\/TPAMI.2018.2861800","volume":"41","author":"KR Mopuri","year":"2018","unstructured":"Mopuri, K.R., Ganeshan, A., Babu, R.V.: Generalizable data-free objective for crafting universal adversarial perturbations. IEEE Trans. Pattern Anal. Mach. Intell. 41(10), 2452\u20132465 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"18_CR23","doi-asserted-by":"crossref","unstructured":"Prakash, A., Moran, N., Garber, S., DiLillo, A., Storer, J.: Deflecting adversarial attacks with pixel deflection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8571\u20138580 (2018)","DOI":"10.1109\/CVPR.2018.00894"},{"key":"18_CR24","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., et al.: Imagenet large scale visual recognition challenge. Int. J. Comput. Vision 115, 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vision"},{"issue":"4","key":"18_CR25","doi-asserted-by":"publisher","first-page":"30","DOI":"10.1145\/103085.103089","volume":"34","author":"GK Wallace","year":"1991","unstructured":"Wallace, G.K.: The jpeg still picture compression standard. Commun. ACM 34(4), 30\u201344 (1991)","journal-title":"Commun. ACM"},{"key":"18_CR26","doi-asserted-by":"crossref","unstructured":"Wang, H., et al.: High-frequency component helps explain the generalization of convolutional neural networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8684\u20138694 (2020)","DOI":"10.1109\/CVPR42600.2020.00871"},{"issue":"4","key":"18_CR27","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"key":"18_CR28","unstructured":"Xiao, C., Zhu, J.Y., Li, B., He, W., Liu, M., Song, D.: Spatially transformed adversarial examples. arXiv preprint arXiv:1801.02612 (2018)"},{"key":"18_CR29","doi-asserted-by":"crossref","unstructured":"Xu, W., Evans, D., Qi, Y.: Feature squeezing: Detecting adversarial examples in deep neural networks. arXiv preprint arXiv:1704.01155 (2017)","DOI":"10.14722\/ndss.2018.23198"},{"key":"18_CR30","unstructured":"Yin, D., Gontijo Lopes, R., Shlens, J., Cubuk, E.D., Gilmer, J.: A fourier perspective on model robustness in computer vision. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"18_CR31","unstructured":"Yuan, S., Zhang, Q., Gao, L., Cheng, Y., Song, J.: Natural color fool: towards boosting black-box unrestricted attacks. arXiv preprint arXiv:2210.02041 (2022)"},{"key":"18_CR32","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 586\u2013595 (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"18_CR33","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Liu, Z., Larson, M.: Towards large yet imperceptible adversarial image perturbations with perceptual color distance. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1039\u20131048 (2020)","DOI":"10.1109\/CVPR42600.2020.00112"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-53311-2_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,12]],"date-time":"2024-03-12T15:27:05Z","timestamp":1710257225000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-53311-2_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031533105","9783031533112"],"references-count":33,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-53311-2_18","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"28 January 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Amsterdam","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"The Netherlands","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 January 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 February 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"ConfTool Pro","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"297","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"112","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"38% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.2","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.2","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}