{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T00:15:19Z","timestamp":1773706519256,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":79,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,6,20]],"date-time":"2022-06-20T00:00:00Z","timestamp":1655683200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2022,6,21]]},"DOI":"10.1145\/3531146.3533074","type":"proceedings-article","created":{"date-parts":[[2022,6,20]],"date-time":"2022-06-20T14:27:10Z","timestamp":1655735230000},"page":"70-88","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":13,"title":["Fairness Indicators for Systematic Assessments of Visual Feature Extractors"],"prefix":"10.1145","author":[{"given":"Priya","family":"Goyal","sequence":"first","affiliation":[{"name":"Meta, USA"}]},{"given":"Adriana Romero","family":"Soriano","sequence":"additional","affiliation":[{"name":"Meta, Canada"}]},{"given":"Caner","family":"Hazirbas","sequence":"additional","affiliation":[{"name":"Meta, USA"}]},{"given":"Levent","family":"Sagun","sequence":"additional","affiliation":[{"name":"Meta, France"}]},{"given":"Nicolas","family":"Usunier","sequence":"additional","affiliation":[{"name":"Meta, France"}]}],"member":"320","published-online":{"date-parts":[[2022,6,20]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Accuracy-Fairness Trade-Off. Proc. ACM Hum.-Comput. Interact. 4, CSCW3 (Jan","author":"Barlas Pinar","year":"2021","unstructured":"Pinar Barlas, Kyriakos Kyriakou, Olivia Guest, Styliani Kleanthous, and Jahna Otterbacher. 2021. To \u201dSee\u201d is to Stereotype: Image Tagging Algorithms, Gender Recognition, and the Accuracy-Fairness Trade-Off. Proc. ACM Hum.-Comput. Interact. 4, CSCW3 (Jan 2021)."},{"key":"e_1_3_2_1_2_1","unstructured":"Samy Bengio Inioluwa\u00a0Deborah Raji Alina Beygelzimer Yann Dauphin Percy Liang and Jennifer\u00a0Wortman Vaughan. 2021. A Retrospective on the NeurIPS 2021 Ethics Review Process. https:\/\/blog.neurips.cc\/2021\/12\/03\/a-retrospective-on-the-neurips-2021-ethics-review-process\/ Retrieved: 2021-12-09."},{"key":"e_1_3_2_1_3_1","unstructured":"Maxim Berman Herv\u00e9 J\u00e9gou Andrea Vedaldi Iasonas Kokkinos and Matthijs Douze. 2019. MultiGrain: a unified image embedding for classes and instances. arxiv:1902.05509\u00a0[cs.CV]"},{"key":"e_1_3_2_1_4_1","unstructured":"Shruti Bhargava and David Forsyth. 2019. Exposing and Correcting the Gender Bias in Image Captioning Datasets and Models. arxiv:1912.00578\u00a0[cs.CV]"},{"key":"e_1_3_2_1_5_1","volume-title":"ACM Conference on Fairness, Accountability, and Transparency.","author":"Buolamwini Joy","year":"2018","unstructured":"Joy Buolamwini and Timnit Gebru. 2018. Gender shades: Intersectional accuracy disparities in commercial gender classification. In ACM Conference on Fairness, Accountability, and Transparency."},{"key":"e_1_3_2_1_6_1","unstructured":"Mathilde Caron Ishan Misra Julien Mairal Priya Goyal Piotr Bojanowski and Armand Joulin. 2020. Unsupervised Learning of Visual Features by Contrasting Cluster Assignments. In Advances in Neural Information Processing Systems."},{"key":"e_1_3_2_1_7_1","volume-title":"Fair prediction with disparate impact: A study of bias in recidivism prediction instruments. Big data 5, 2","author":"Chouldechova Alexandra","year":"2017","unstructured":"Alexandra Chouldechova. 2017. Fair prediction with disparate impact: A study of bias in recidivism prediction instruments. Big data 5, 2 (2017)."},{"key":"e_1_3_2_1_8_1","unstructured":"Sam Corbett-Davies and Sharad Goel. 2018. The Measure and Mismeasure of Fairness: A Critical Review of Fair Machine Learning. arxiv:1808.00023\u00a0[cs.CY]"},{"key":"e_1_3_2_1_9_1","volume-title":"The Atlas of AI","author":"Crawford Kate","unstructured":"Kate Crawford. 2021. The Atlas of AI. Yale University Press."},{"key":"e_1_3_2_1_10_1","volume-title":"Excavating AI: The Politics of Training Sets for Machine Learning. https:\/\/excavating.ai\/ Retrieved: 2021-12-01.","author":"Crawford Kate","year":"2019","unstructured":"Kate Crawford and Trevor Paglen. 2019. Excavating AI: The Politics of Training Sets for Machine Learning. https:\/\/excavating.ai\/ Retrieved: 2021-12-01."},{"key":"e_1_3_2_1_11_1","volume-title":"IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops.","author":"De\u00a0Vries Terrance","year":"2019","unstructured":"Terrance De\u00a0Vries, Ishan Misra, Changhan Wang, and Laurens Van Der\u00a0Maaten. 2019. Does object recognition work for everyone?. In IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops."},{"key":"e_1_3_2_1_12_1","volume-title":"ImageNet: A Large-Scale Hierarchical Image Database. In IEEE\/CVF Conference on Computer Vision and Pattern Recognition.","author":"Deng J.","unstructured":"J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei. 2009. ImageNet: A Large-Scale Hierarchical Image Database. In IEEE\/CVF Conference on Computer Vision and Pattern Recognition."},{"key":"e_1_3_2_1_13_1","unstructured":"Emily Denton and Timnit Gebru. 2020. Tutorial on Fairness Accountability Transparency and Ethics in Computer Vision."},{"key":"e_1_3_2_1_15_1","volume-title":"International conference on machine learning.","author":"Donahue Jeff","year":"2014","unstructured":"Jeff Donahue, Yangqing Jia, Oriol Vinyals, Judy Hoffman, Ning Zhang, Eric Tzeng, and Trevor Darrell. 2014. Decaf: A deep convolutional activation feature for generic visual recognition. In International conference on machine learning."},{"key":"e_1_3_2_1_16_1","volume-title":"International Conference on Learning Representations.","author":"Dosovitskiy Alexey","year":"2021","unstructured":"Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. 2021. An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_17_1","volume-title":"IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops.","author":"Dulhanty Chris","year":"2019","unstructured":"Chris Dulhanty and Alexander Wong. 2019. Auditing ImageNet: Towards a Model-driven Framework for Annotating Demographic Attributes of Large-Scale Image Datasets. In IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops."},{"key":"e_1_3_2_1_18_1","volume-title":"On the Foundations of Noise-free Selective Classification.Journal of Machine Learning Research 11, 5","author":"El-Yaniv Ran","year":"2010","unstructured":"Ran El-Yaniv and Yair Wiener. 2010. On the Foundations of Noise-free Selective Classification.Journal of Machine Learning Research 11, 5 (2010)."},{"key":"e_1_3_2_1_19_1","volume-title":"Automating inequality: How high-tech tools profile, police, and punish the poor","author":"Eubanks Virginia","unstructured":"Virginia Eubanks. 2018. Automating inequality: How high-tech tools profile, police, and punish the poor. St. Martin\u2019s Press."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"crossref","unstructured":"Christiane Fellbaum (Ed.). 1998. WordNet: An electronic lexical database. MIT press.","DOI":"10.7551\/mitpress\/7287.001.0001"},{"key":"e_1_3_2_1_21_1","first-page":"33","article-title":"Soleil et peau","volume":"2","author":"Fitzpatrick B.","year":"1975","unstructured":"Thomas\u00a0B. Fitzpatrick. 1975. \u201cSoleil et peau\u201d [Sun and skin]. Journal de M\u00e9decine Esth\u00e9tique (in French) 2 (1975), 33\u201334.","journal-title":"Journal de M\u00e9decine Esth\u00e9tique (in French)"},{"key":"e_1_3_2_1_22_1","first-page":"38","article-title":"False Positives, False Negatives, and False Analyses: A Rejoinder to Machine Bias: There\u2019s Software Used across the Country to Predict Future Criminals. And It\u2019s Biased against Blacks","volume":"80","author":"Flores W","year":"2016","unstructured":"Anthony\u00a0W Flores, Kristin Bechtel, and Christopher\u00a0T Lowenkamp. 2016. False Positives, False Negatives, and False Analyses: A Rejoinder to Machine Bias: There\u2019s Software Used across the Country to Predict Future Criminals. And It\u2019s Biased against Blacks. Fed. Probation 80(2016), 38.","journal-title":"Fed. Probation"},{"key":"e_1_3_2_1_23_1","unstructured":"Gapminder. 2021. Dollar Street Dataset. https:\/\/www.gapminder.org\/dollar-street. Retrieved: 2021-11-05."},{"key":"e_1_3_2_1_24_1","unstructured":"Yonatan Geifman and Ran El-Yaniv. 2017. Selective Classification for Deep Neural Networks. In Advances in Neural Information Processing Systems."},{"key":"e_1_3_2_1_25_1","volume-title":"Conference on Neural Information Processing Systems.","author":"G\u00f6lz Paul","year":"2019","unstructured":"Paul G\u00f6lz, Anson Kahng, and Ariel\u00a0D Procaccia. 2019. Paradoxes in fair machine learning. In Conference on Neural Information Processing Systems."},{"key":"e_1_3_2_1_26_1","unstructured":"Priya Goyal Mathilde Caron Benjamin Lefaudeux Min Xu Pengchao Wang Vivek Pai Mannat Singh Vitaliy Liptchinsky Ishan Misra Armand Joulin and Piotr Bojanowski. 2021. Self-supervised Pretraining of Visual Features in the Wild. arxiv:2103.01988\u00a0[cs.CV]"},{"key":"e_1_3_2_1_27_1","volume-title":"ACM Conference on Human Factors in Computing Systems.","author":"Hamidi Foad","year":"2018","unstructured":"Foad Hamidi, Morgan\u00a0Klaus Scheuerman, and Stacy\u00a0M Branham. 2018. Gender recognition or gender reductionism? The social implications of embedded gender recognition systems. In ACM Conference on Human Factors in Computing Systems."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1145\/3351095.3372826"},{"key":"e_1_3_2_1_29_1","unstructured":"Drew Harwell. 2019. Federal study confirms racial bias of many facial-recognition systems casts doubt on their expanding use. https:\/\/www.washingtonpost.com\/technology\/2019\/12\/19\/federal-study-confirms-racial-bias-many-facial-recognition-systems-casts-doubt-their-expanding-use\/ Retrieved: 2021-11-05."},{"key":"e_1_3_2_1_30_1","volume-title":"Towards Measuring Fairness in AI: the Casual Conversations Dataset","author":"Hazirbas Caner","year":"2021","unstructured":"Caner Hazirbas, Joanna Bitton, Brian Dolhansky, Jacqueline Pan, Albert Gordo, and Cristian\u00a0Canton Ferrer. 2021. Towards Measuring Fairness in AI: the Casual Conversations Dataset. IEEE Transactions on Biometrics, Behavior, and Identity Science (2021)."},{"key":"e_1_3_2_1_31_1","volume-title":"Deep Residual Learning for Image Recognition. In IEEE\/CVF Conference on Computer Vision and Pattern Recognition.","author":"He Kaiming","year":"2016","unstructured":"Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep Residual Learning for Image Recognition. In IEEE\/CVF Conference on Computer Vision and Pattern Recognition."},{"key":"e_1_3_2_1_32_1","volume-title":"Classification with reject option. The Canadian Journal of Statistics\/La Revue Canadienne de Statistique","author":"Herbei Radu","year":"2006","unstructured":"Radu Herbei and Marten\u00a0H Wegkamp. 2006. Classification with reject option. The Canadian Journal of Statistics\/La Revue Canadienne de Statistique (2006)."},{"key":"e_1_3_2_1_33_1","unstructured":"Minyoung Huh Pulkit Agrawal and Alexei\u00a0A. Efros. 2016. What makes ImageNet good for transfer learning?arxiv:1608.08614\u00a0[cs.CV]"},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1145\/3287560.3287600"},{"key":"e_1_3_2_1_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442188.3445918"},{"key":"e_1_3_2_1_36_1","volume-title":"Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision. In International Conference on Machine Learning.","author":"Jia Chao","year":"2021","unstructured":"Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc\u00a0V. Le, Yunhsuan Sung, Zhen Li, and Tom Duerig. 2021. Scaling Up Visual and Vision-Language Representation Learning With Noisy Text Supervision. In International Conference on Machine Learning."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46478-7_5"},{"key":"e_1_3_2_1_38_1","volume-title":"Don\u2019t ask if artificial intelligence is good or fair, ask how it shifts power. Nature 583, 7815","author":"Kalluri Pratyusha","year":"2020","unstructured":"Pratyusha Kalluri. 2020. Don\u2019t ask if artificial intelligence is good or fair, ask how it shifts power. Nature 583, 7815 (2020), 169\u2013169."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00159"},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1145\/3274357"},{"key":"e_1_3_2_1_41_1","unstructured":"Douwe Kiela Hamed Firooz Aravind Mohan Vedanuj Goswami Amanpreet Singh Pratik Ringshia and Davide Testuggine. 2020. The Hateful Memes Challenge: Detecting Hate Speech in Multimodal Memes. arXiv preprint arXiv:2005.04790(2020)."},{"key":"e_1_3_2_1_42_1","unstructured":"Douwe Kiela Hamed Firooz Aravind Mohan Vedanuj Goswami Amanpreet Singh Pratik Ringshia and Davide Testuggine. 2021. The Hateful Memes Challenge: Detecting Hate Speech in Multimodal Memes. arxiv:2005.04790\u00a0[cs.AI]"},{"key":"e_1_3_2_1_43_1","article-title":"Dlib-Ml: A Machine Learning Toolkit","author":"King E.","year":"2009","unstructured":"Davis\u00a0E. King. 2009. Dlib-Ml: A Machine Learning Toolkit. Journal of Machine Learning Research (Dec. 2009).","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298803"},{"key":"e_1_3_2_1_45_1","first-page":"1","article-title":"Inherent Trade-Offs in Algorithmic Fairness","volume":"46","author":"Kleinberg Jon","year":"2018","unstructured":"Jon Kleinberg. 2018. Inherent Trade-Offs in Algorithmic Fairness. SIGMETRICS Performance Evaluation Review 46, 1 (jun 2018).","journal-title":"SIGMETRICS Performance Evaluation Review"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58558-7_29"},{"key":"e_1_3_2_1_47_1","volume-title":"ImageNet Classification with Deep Convolutional Neural Networks. In Conference on Neural Information Processing Systems.","author":"Krizhevsky Alex","year":"2012","unstructured":"Alex Krizhevsky, Ilya Sutskever, and Geoffrey\u00a0E Hinton. 2012. ImageNet Classification with Deep Convolutional Neural Networks. In Conference on Neural Information Processing Systems."},{"key":"e_1_3_2_1_48_1","volume-title":"International Conference on Machine Learning Workshops.","author":"Kulynych Bogdan","year":"2020","unstructured":"Bogdan Kulynych, David Madras, Smitha Milli, Inioluwa\u00a0Deborah Raji, Angela Zhou, and Richard Zemel. 2020. Participatory Approaches to Machine Learning. In International Conference on Machine Learning Workshops."},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"crossref","unstructured":"Alina Kuznetsova Hassan Rom Neil Alldrin Jasper Uijlings Ivan Krasin Jordi Pont-Tuset Shahab Kamali Stefan Popov Matteo Malloci Alexander Kolesnikov Tom Duerig and Vittorio Ferrari. 2020. The Open Images Dataset V4. International Journal of Computer Vision(2020).","DOI":"10.1007\/s11263-020-01316-z"},{"key":"e_1_3_2_1_50_1","unstructured":"Jeff Larson Surya Mattu Lauren Kirchner and Julia Angwin. 2016. How We Analyzed the COMPAS Recidivism Algorithm. https:\/\/www.propublica.org\/article\/how-we-analyzed-the-compas-recidivism-algorithm."},{"key":"e_1_3_2_1_51_1","volume-title":"Backpropagation Applied to Handwritten Zip Code Recognition. Neural Computation 1, 4","author":"LeCun Y.","year":"1989","unstructured":"Y. LeCun, B. Boser, J.\u00a0S. Denker, D. Henderson, R.\u00a0E. Howard, W. Hubbard, and L.\u00a0D. Jackel. 1989. Backpropagation Applied to Handwritten Zip Code Recognition. Neural Computation 1, 4 (1989)."},{"key":"e_1_3_2_1_52_1","volume-title":"Conference on Neural Information Processing Systems.","author":"Lee Kimin","year":"2018","unstructured":"Kimin Lee, Kibok Lee, Honglak Lee, and Jinwoo Shin. 2018. A simple unified framework for detecting out-of-distribution samples and adversarial attacks. In Conference on Neural Information Processing Systems."},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2015.7301352"},{"key":"e_1_3_2_1_54_1","volume-title":"Microsoft COCO: Common Objects in Context. In European Conference on Computer Vision.","author":"Lin Tsung-Yi","year":"2014","unstructured":"Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Doll\u00e1r, and C.\u00a0Lawrence Zitnick. 2014. Microsoft COCO: Common Objects in Context. In European Conference on Computer Vision."},{"key":"e_1_3_2_1_55_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01216-8_12"},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.4324\/9780429341175"},{"key":"e_1_3_2_1_57_1","volume-title":"Mitigating Bias in Set Selection with Noisy Protected Attributes. In ACM Conference on Fairness, Accountability, and Transparency.","author":"Mehrotra Anay","year":"2021","unstructured":"Anay Mehrotra and L.\u00a0Elisa Celis. 2021. Mitigating Bias in Set Selection with Noisy Protected Attributes. In ACM Conference on Fairness, Accountability, and Transparency."},{"key":"e_1_3_2_1_58_1","volume-title":"Model Cards for Model Reporting. In ACM Conference on Fairness, Accountability, and Transparency.","author":"Mitchell Margaret","year":"2019","unstructured":"Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa\u00a0Deborah Raji, and Timnit Gebru. 2019. Model Cards for Model Reporting. In ACM Conference on Fairness, Accountability, and Transparency."},{"key":"e_1_3_2_1_59_1","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-021-00298-y"},{"key":"e_1_3_2_1_60_1","volume-title":"Learning Transferable Visual Models From Natural Language Supervision. In International Conference on Machine Learning.","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong\u00a0Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021. Learning Transferable Visual Models From Natural Language Supervision. In International Conference on Machine Learning."},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01044"},{"key":"e_1_3_2_1_62_1","unstructured":"Nitya Rajan. 2015. Google Photo Algorithm Mislabels African Americans As \u2019Gorillas\u2019. https:\/\/www.huffingtonpost.co.uk\/2015\/06\/30\/goole-photo-algorithm-labels-african-american-as-gorillas_n_7696000.html\/ Retrieved: 2021-12-01."},{"key":"e_1_3_2_1_63_1","doi-asserted-by":"publisher","DOI":"10.1145\/3306618.3314244"},{"key":"e_1_3_2_1_64_1","series-title":"Round 2","volume-title":"Neural Information Processing Systems Datasets and Benchmarks Track","author":"Raji Inioluwa\u00a0Deborah","unstructured":"Inioluwa\u00a0Deborah Raji, Emily Denton, Emily\u00a0M. Bender, Alex Hanna, and Amandalynne Paullada. 2021. AI and the Everything in the Whole Wide World Benchmark. In Neural Information Processing Systems Datasets and Benchmarks Track (Round 2)."},{"key":"e_1_3_2_1_65_1","volume-title":"Saving Face: Investigating the Ethical Concerns of Facial Recognition Auditing. In AAAI\/ACM Conference on AI, Ethics, and Society.","author":"Raji Inioluwa\u00a0Deborah","year":"2020","unstructured":"Inioluwa\u00a0Deborah Raji, Timnit Gebru, Margaret Mitchell, Joy Buolamwini, Joonseok Lee, and Emily Denton. 2020. Saving Face: Investigating the Ethical Concerns of Facial Recognition Auditing. In AAAI\/ACM Conference on AI, Ethics, and Society."},{"key":"e_1_3_2_1_66_1","doi-asserted-by":"crossref","unstructured":"Olga Russakovsky Jia Deng Hao Su Jonathan Krause Sanjeev Satheesh Sean Ma Zhiheng Huang Andrej Karpathy Aditya Khosla Michael Bernstein Alexander\u00a0C. Berg and Li Fei-Fei. 2015. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision(2015).","DOI":"10.1007\/s11263-015-0816-y"},{"key":"e_1_3_2_1_67_1","unstructured":"Nicolas Schreuder and Evgenii Chzhen. 2021. Classification with abstention but without disparities. arxiv:2102.12258\u00a0[stat.ML]"},{"key":"e_1_3_2_1_68_1","doi-asserted-by":"publisher","DOI":"10.1145\/3461702.3462594"},{"key":"e_1_3_2_1_69_1","doi-asserted-by":"crossref","unstructured":"Carsten Schwemmer Carly Knight Emily\u00a0D. Bello-Pardo Stan Oklobdzija Martijn Schoonvelde and Jeffrey\u00a0W. Lockhart. 2020. Diagnosing Gender Bias in Image Recognition Systems. Socius 6(2020).","DOI":"10.1177\/2378023120967171"},{"key":"e_1_3_2_1_70_1","volume-title":"NeurIPS 2017 workshop: Machine Learning for the Developing World.","author":"Shankar Shreya","unstructured":"Shreya Shankar, Yoni Halpern, Eric Breck, James Atwood, Jimbo Wilson, and D. Sculley. 2017. No Classification without Representation: Assessing Geodiversity Issues in Open Data Sets for the Developing World. In NeurIPS 2017 workshop: Machine Learning for the Developing World."},{"key":"e_1_3_2_1_71_1","volume-title":"IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops.","author":"Sharif\u00a0Razavian Ali","year":"2014","unstructured":"Ali Sharif\u00a0Razavian, Hossein Azizpour, Josephine Sullivan, and Stefan Carlsson. 2014. CNN features off-the-shelf: an astounding baseline for recognition. In IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops."},{"key":"e_1_3_2_1_72_1","volume-title":"Image Representations Learned With Unsupervised Pre-Training Contain Human-like Biases. In ACM Conference on Fairness, Accountability, and Transparency.","author":"Steed Ryan","year":"2021","unstructured":"Ryan Steed and Aylin Caliskan. 2021. Image Representations Learned With Unsupervised Pre-Training Contain Human-like Biases. In ACM Conference on Fairness, Accountability, and Transparency."},{"key":"e_1_3_2_1_73_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01231-1_31"},{"key":"e_1_3_2_1_74_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.97"},{"key":"e_1_3_2_1_75_1","unstructured":"Alexander Tong. 2016. Full ImageNet Taxonomy. https:\/\/github.com\/atong01\/Imagenet-Tensorflow\/blob\/master\/model\/imagenet_synset_to_human_label_map.txt Retrieved: 2022-01-14."},{"key":"e_1_3_2_1_76_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00654"},{"key":"e_1_3_2_1_77_1","doi-asserted-by":"publisher","DOI":"10.1145\/3351095.3375709"},{"key":"e_1_3_2_1_78_1","volume-title":"Conference on Learning Theory.","author":"Zhang Chicheng","year":"2016","unstructured":"Chicheng Zhang and Kamalika Chaudhuri. 2016. The extended Littlestone\u2019s dimension for learning with mistakes and abstentions. In Conference on Learning Theory."},{"key":"e_1_3_2_1_79_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.463"},{"key":"e_1_3_2_1_80_1","unstructured":"Shoshana Zuboff. 2020. The Age of Surveillance Capitalism: The Fight for a Human Future at the New Frontier of Power."}],"event":{"name":"FAccT '22: 2022 ACM Conference on Fairness, Accountability, and Transparency","location":"Seoul Republic of Korea","acronym":"FAccT '22","sponsor":["ACM Association for Computing Machinery"]},"container-title":["2022 ACM Conference on Fairness Accountability and Transparency"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3531146.3533074","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3531146.3533074","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T19:02:10Z","timestamp":1750186930000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3531146.3533074"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,20]]},"references-count":79,"alternative-id":["10.1145\/3531146.3533074","10.1145\/3531146"],"URL":"https:\/\/doi.org\/10.1145\/3531146.3533074","relation":{},"subject":[],"published":{"date-parts":[[2022,6,20]]},"assertion":[{"value":"2022-06-20","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}