{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,25]],"date-time":"2025-07-25T10:19:33Z","timestamp":1753438773999,"version":"3.41.0"},"publisher-location":"Cham","reference-count":57,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031926471","type":"print"},{"value":"9783031926488","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-92648-8_9","type":"book-chapter","created":{"date-parts":[[2025,5,30]],"date-time":"2025-05-30T16:28:36Z","timestamp":1748622516000},"page":"134-151","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Top-GAP: Integrating Size Priors in\u00a0CNNs for\u00a0More Interpretability, Robustness, and\u00a0Bias Mitigation"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7523-5694","authenticated-orcid":false,"given":"Lars","family":"Nieradzik","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9821-1636","authenticated-orcid":false,"given":"Henrike","family":"Stephani","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1327-1243","authenticated-orcid":false,"given":"Janis","family":"Keuper","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,12]]},"reference":[{"key":"9_CR1","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"484","DOI":"10.1007\/978-3-030-58592-1_29","volume-title":"Computer Vision \u2013 ECCV 2020","author":"M Andriushchenko","year":"2020","unstructured":"Andriushchenko, M., Croce, F., Flammarion, N., Hein, M.: Square attack: a query-efficient black-box adversarial attack via random search. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12368, pp. 484\u2013501. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58592-1_29"},{"key":"9_CR2","unstructured":"Andriushchenko, M., Flammarion, N.: Understanding and improving fast adversarial training (2020)"},{"key":"9_CR3","unstructured":"Athalye, A., Carlini, N., Wagner, D.: Obfuscated gradients give a false sense of security: circumventing defenses to adversarial examples (2018)"},{"key":"9_CR4","unstructured":"Bolukbasi, T., Chang, K.W., Zou, J., Saligrama, V., Kalai, A.: Man is to computer programmer as woman is to homemaker? Debiasing word embeddings (2016)"},{"key":"9_CR5","unstructured":"Buolamwini, J., Gebru, T.: Gender shades: intersectional accuracy disparities in commercial gender classification. In: Friedler, S.A., Wilson, C. (eds.) Proceedings of the 1st Conference on Fairness, Accountability and Transparency. Proceedings of Machine Learning Research, vol.\u00a081, pp. 77\u201391. PMLR (23\u201324 Feb 2018), https:\/\/proceedings.mlr.press\/v81\/buolamwini18a.html"},{"key":"9_CR6","doi-asserted-by":"crossref","unstructured":"Burns, K., Hendricks, L.A., Saenko, K., Darrell, T., Rohrbach, A.: Women also snowboard: Overcoming bias in captioning models (2019)","DOI":"10.1007\/978-3-030-01219-9_47"},{"key":"9_CR7","unstructured":"Byun, S.Y., Lee, W.: Recipro-cam: Fast gradient-free visual explanations for convolutional neural networks (2023)"},{"key":"9_CR8","doi-asserted-by":"publisher","unstructured":"Cai, J., Hou, J., Lu, Y., Chen, H., Kneip, L., Schwertfeger, S.: Improving CNN-based planar object detection with geometric prior knowledge. In: 2020 IEEE International Symposium on Safety, Security, and Rescue Robotics (SSRR), pp. 387\u2013393 (2020). https:\/\/doi.org\/10.1109\/SSRR50563.2020.9292601","DOI":"10.1109\/SSRR50563.2020.9292601"},{"key":"9_CR9","doi-asserted-by":"publisher","unstructured":"Chattopadhay, A., Sarkar, A., Howlader, P., Balasubramanian, V.N.: Grad-CAM++: generalized gradient-based visual explanations for deep convolutional networks. In: 2018 IEEE Winter Conference on Applications of Computer Vision (WACV). IEEE, March 2018. https:\/\/doi.org\/10.1109\/wacv.2018.00097","DOI":"10.1109\/wacv.2018.00097"},{"key":"9_CR10","unstructured":"Clarysse, J., H\u00f6rrmann, J., Yang, F.: Why adversarial training can hurt robust accuracy (2022)"},{"key":"9_CR11","doi-asserted-by":"publisher","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: a large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255 (2009). https:\/\/doi.org\/10.1109\/CVPR.2009.5206848","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"9_CR12","doi-asserted-by":"crossref","unstructured":"Fang, Y., et al.: Eva: Exploring the limits of masked visual representation learning at scale (2022)","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"9_CR13","unstructured":"Fu, R., Hu, Q., Dong, X., Guo, Y., Gao, Y., Li, B.: Axiom-based grad-cam: Towards accurate visualization and explanation of CNNs. CoRR abs\/2008.02312 (2020). https:\/\/arxiv.org\/abs\/2008.02312"},{"key":"9_CR14","doi-asserted-by":"publisher","unstructured":"Goodfellow, I.J., Shlens, J., Szegedy, C.: Explaining and harnessing adversarial examples (2014). https:\/\/doi.org\/10.48550\/ARXIV.1412.6572","DOI":"10.48550\/ARXIV.1412.6572"},{"key":"9_CR15","unstructured":"Goodfellow, I.J., Shlens, J., Szegedy, C.: Explaining and harnessing adversarial examples (2015)"},{"key":"9_CR16","unstructured":"Gowal, S., Rebuffi, S., Wiles, O., Stimberg, F., Calian, D.A., Mann, T.A.: Improving robustness using generated data. CoRR abs\/2110.09468 (2021). https:\/\/arxiv.org\/abs\/2110.09468"},{"key":"9_CR17","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition (2015)","DOI":"10.1109\/CVPR.2016.90"},{"key":"9_CR18","doi-asserted-by":"crossref","unstructured":"He, Y., Yang, X., Chang, C.M., Xie, H., Igarashi, T.: Efficient human-in-the-loop system for guiding DNNs attention (2023)","DOI":"10.1145\/3581641.3584074"},{"key":"9_CR19","unstructured":"Hendrycks, D., Dietterich, T.G.: Benchmarking neural network robustness to common corruptions and perturbations. CoRR abs\/1903.12261 (2019). http:\/\/arxiv.org\/abs\/1903.12261"},{"key":"9_CR20","doi-asserted-by":"publisher","unstructured":"Hou, W., Tao, X., Xu, D.: Combining prior knowledge with CNN for weak scratch inspection of optical components. IEEE Trans. Instrum. Measur. 70, 1\u201311 (2021). https:\/\/doi.org\/10.1109\/TIM.2020.3011299","DOI":"10.1109\/TIM.2020.3011299"},{"key":"9_CR21","unstructured":"Huang, H., Wang, Y., Erfani, S.M., Gu, Q., Bailey, J., Ma, X.: Exploring architectural ingredients of adversarially robust deep neural networks (2022)"},{"key":"9_CR22","doi-asserted-by":"publisher","first-page":"5875","DOI":"10.1109\/TIP.2021.3089943","volume":"30","author":"PT Jiang","year":"2021","unstructured":"Jiang, P.T., Zhang, C.B., Hou, Q., Cheng, M.M., Wei, Y.: LayerCam: exploring hierarchical class activation maps for localization. IEEE Trans. Image Process. 30, 5875\u20135888 (2021). https:\/\/doi.org\/10.1109\/TIP.2021.3089943","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR23","doi-asserted-by":"publisher","unstructured":"Jo, S., Yu, I.J.: Puzzle-CAM: improved localization via matching partial and full features. In: 2021 IEEE International Conference on Image Processing (ICIP). IEEE, September 2021. https:\/\/doi.org\/10.1109\/icip42928.2021.9506058","DOI":"10.1109\/icip42928.2021.9506058"},{"key":"9_CR24","doi-asserted-by":"crossref","unstructured":"Kirillov, A., et al.: Segment anything (2023)","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"9_CR25","doi-asserted-by":"crossref","unstructured":"Li, K., Wu, Z., Peng, K., Ernst, J., Fu, Y.: Tell me where to look: guided attention inference network. CoRR abs\/1802.10171 (2018). http:\/\/arxiv.org\/abs\/1802.10171","DOI":"10.1109\/CVPR.2018.00960"},{"key":"9_CR26","unstructured":"Li, Z., Xu, C.: Discover the unknown biased attribute of an image classifier. CoRR abs\/2104.14556 (2021). https:\/\/arxiv.org\/abs\/2104.14556"},{"key":"9_CR27","unstructured":"Lin, M., Chen, Q., Yan, S.: Network in network (2014)"},{"key":"9_CR28","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., Doll\u00e1r, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection (2017)","DOI":"10.1109\/CVPR.2017.106"},{"key":"9_CR29","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"9_CR30","doi-asserted-by":"crossref","unstructured":"Liu, Z., Mao, H., Wu, C.Y., Feichtenhofer, C., Darrell, T., Xie, S.: A convnet for the 2020s (2022)","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"9_CR31","unstructured":"Luo, W., Li, Y., Urtasun, R., Zemel, R.: Understanding the effective receptive field in deep convolutional neural networks (2017)"},{"key":"9_CR32","doi-asserted-by":"publisher","first-page":"508","DOI":"10.1093\/mam\/ozae038","volume":"30","author":"L Nieradzik","year":"2023","unstructured":"Nieradzik, L., et al.: Automating wood species detection and classification in microscopic images of fibrous materials with deep learning. Microsc. Microanal. 30, 508\u2013520 (2023)","journal-title":"Microsc. Microanal."},{"key":"9_CR33","unstructured":"Omeiza, D., Speakman, S., Cintas, C., Weldemariam, K.: Smooth grad-cam++: an enhanced inference level visualization technique for deep convolutional neural network models. CoRR abs\/1908.01224 (2019). http:\/\/arxiv.org\/abs\/1908.01224"},{"key":"9_CR34","doi-asserted-by":"crossref","unstructured":"Parkhi, O.M., Vedaldi, A., Zisserman, A., Jawahar, C.V.: Cats and dogs. In: IEEE Conference on Computer Vision and Pattern Recognition (2012)","DOI":"10.1109\/CVPR.2012.6248092"},{"key":"9_CR35","doi-asserted-by":"crossref","unstructured":"Pathak, D., Kr\u00e4henb\u00fchl, P., Darrell, T.: Constrained convolutional neural networks for weakly supervised segmentation (2015)","DOI":"10.1109\/ICCV.2015.209"},{"key":"9_CR36","unstructured":"Peng, S., et al.: Robust principles: architectural design principles for adversarially robust CNNs (2023)"},{"key":"9_CR37","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision (2021)"},{"key":"9_CR38","unstructured":"Raghunathan, A., Xie, S.M., Yang, F., Duchi, J.C., Liang, P.: Adversarial training can hurt generalization. CoRR abs\/1906.06032 (2019). http:\/\/arxiv.org\/abs\/1906.06032"},{"key":"9_CR39","doi-asserted-by":"publisher","unstructured":"Rajabi, A., Yazdani-Jahromi, M., Garibay, O.O., Sukthankar, G.: Through a fair looking-glass: mitigating bias in image datasets. In: Degen, H., Ntoa, S. (eds) Artificial Intelligence in HCI. HCII 2023. LNCS, vol. 14050, pp. 446\u2013459. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-35891-3_27","DOI":"10.1007\/978-3-031-35891-3_27"},{"key":"9_CR40","doi-asserted-by":"crossref","unstructured":"Redmon, J., Farhadi, A.: Yolo9000: Better, faster, stronger (2016)","DOI":"10.1109\/CVPR.2017.690"},{"key":"9_CR41","doi-asserted-by":"crossref","unstructured":"Ribeiro, M.T., Singh, S., Guestrin, C.: \u201cwhy should i trust you?\u201d: Explaining the predictions of any classifier (2016)","DOI":"10.18653\/v1\/N16-3020"},{"key":"9_CR42","unstructured":"Sagawa, S., Koh, P.W., Hashimoto, T.B., Liang, P.: Distributionally robust neural networks for group shifts: on the importance of regularization for worst-case generalization. CoRR abs\/1911.08731 (2019). http:\/\/arxiv.org\/abs\/1911.08731"},{"key":"9_CR43","doi-asserted-by":"publisher","unstructured":"Selvaraju, R.R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., Batra, D.: Grad-CAM: visual explanations from deep networks via gradient-based localization. Int. J. Comput. Vision 128(2), 336\u2013359 (2019). https:\/\/doi.org\/10.1007\/s11263-019-01228-7","DOI":"10.1007\/s11263-019-01228-7"},{"key":"9_CR44","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition (2015)"},{"key":"9_CR45","unstructured":"Sun, W., Liu, Z., Zhang, Y., Zhong, Y., Barnes, N.: An alternative to WSSS? An empirical study of the segment anything model (SAM) on weakly-supervised semantic segmentation problems (2023)"},{"key":"9_CR46","unstructured":"Tan, M., Le, Q.V.: Efficientnet: rethinking model scaling for convolutional neural networks (2020)"},{"key":"9_CR47","unstructured":"Wah, C., Branson, S., Welinder, P., Perona, P., Belongie, S.: The Caltech-UCSD Birds-200-2011 Dataset, July 2011"},{"key":"9_CR48","doi-asserted-by":"publisher","unstructured":"Wang, C., Siddiqi, K.: Differential geometry boosts convolutional neural networks for object detection. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 1006\u20131013 (2016). https:\/\/doi.org\/10.1109\/CVPRW.2016.130","DOI":"10.1109\/CVPRW.2016.130"},{"key":"9_CR49","doi-asserted-by":"crossref","unstructured":"Wang, H., et al.: Score-CAM: score-weighted visual explanations for convolutional neural networks (2020)","DOI":"10.1109\/CVPRW50498.2020.00020"},{"key":"9_CR50","unstructured":"Wang, H., Ge, S., Xing, E.P., Lipton, Z.C.: Learning robust global representations by penalizing local predictive power. CoRR abs\/1905.13549 (2019). http:\/\/arxiv.org\/abs\/1905.13549"},{"key":"9_CR51","unstructured":"Wang, Z., Pang, T., Du, C., Lin, M., Liu, W., Yan, S.: Better diffusion models further improve adversarial training (2023)"},{"key":"9_CR52","doi-asserted-by":"publisher","unstructured":"Wightman, R.: Pytorch image models. https:\/\/github.com\/rwightman\/pytorch-image-models (2019). https:\/\/doi.org\/10.5281\/zenodo.4414861","DOI":"10.5281\/zenodo.4414861"},{"key":"9_CR53","unstructured":"Yang, X., Wu, B., Sato, I., Igarashi, T.: Directing DNNs attention for facial attribution classification using gradient-weighted class activation mapping. CoRR abs\/1905.00593 (2019). http:\/\/arxiv.org\/abs\/1905.00593"},{"key":"9_CR54","doi-asserted-by":"crossref","unstructured":"Zar\u00e1ndy, \u00c1., Rekeczky, C., Szolgay, P., Chua, L.O.: Overview of CNN research: 25 years history and the current trends. In: 2015 IEEE International Symposium on Circuits and Systems (ISCAS), pp. 401\u2013404. IEEE (2015)","DOI":"10.1109\/ISCAS.2015.7168655"},{"key":"9_CR55","doi-asserted-by":"publisher","unstructured":"Zhao, J., Wang, T., Yatskar, M., Ordonez, V., Chang, K.W.: Men also like shopping: reducing gender bias amplification using corpus-level constraints. In: Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pp. 2979\u20132989. Association for Computational Linguistics, Copenhagen, Denmark, September 2017. https:\/\/doi.org\/10.18653\/v1\/D17-1323, https:\/\/aclanthology.org\/D17-1323","DOI":"10.18653\/v1\/D17-1323"},{"key":"9_CR56","doi-asserted-by":"crossref","unstructured":"Zhou, B., Khosla, A., Lapedriza, A., Oliva, A., Torralba, A.: Learning deep features for discriminative localization (2015)","DOI":"10.1109\/CVPR.2016.319"},{"key":"9_CR57","doi-asserted-by":"publisher","unstructured":"Zhou, X., Zhu, M., Pavlakos, G., Leonardos, S., Derpanis, K.G., Daniilidis, K.: Monocap: monocular human motion capture using a CNN coupled with a geometric prior. IEEE Trans. Pattern Anal. Mach. Intell.41(04), 901\u2013914 (2019). https:\/\/doi.org\/10.1109\/TPAMI.2018.2816031","DOI":"10.1109\/TPAMI.2018.2816031"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-92648-8_9","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,30]],"date-time":"2025-05-30T16:28:53Z","timestamp":1748622533000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-92648-8_9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031926471","9783031926488"],"references-count":57,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-92648-8_9","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"12 May 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}