{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T23:12:42Z","timestamp":1776121962042,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":147,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,8,8]],"date-time":"2023-08-08T00:00:00Z","timestamp":1691452800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,8,8]]},"DOI":"10.1145\/3600211.3604695","type":"proceedings-article","created":{"date-parts":[[2023,8,29]],"date-time":"2023-08-29T18:41:37Z","timestamp":1693334497000},"page":"691-704","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":21,"title":["Disambiguating Algorithmic Bias: From Neutrality to Justice"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0382-9458","authenticated-orcid":false,"given":"Elizabeth","family":"Edenberg","sequence":"first","affiliation":[{"name":"Department of Philosophy, Baruch College, The City University of New York, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1122-9655","authenticated-orcid":false,"given":"Alexandra","family":"Wood","sequence":"additional","affiliation":[{"name":"Berkman Klein Center for Internet &amp; Society, Harvard University, USA"}]}],"member":"320","published-online":{"date-parts":[[2023,8,29]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"1964. Section 2000e-3(b) of Title VII of the Civil Rights Act of 1964. 42 U.S.C. \u00a7 2000e-3(b)."},{"key":"e_1_3_2_1_2_1","volume-title":"Title VII of the Civil Rights Act of","year":"1964","unstructured":"1964. Title VII of the Civil Rights Act of 1964. 42 U.S.C. \u00a7 2000e et seq."},{"key":"e_1_3_2_1_3_1","unstructured":"1967. Section 623(e) of Age Discrimination in Employment Act of 1967. 29 U.S.C. \u00a7 623(e)."},{"key":"e_1_3_2_1_4_1","unstructured":"1968. Fair Housing Act. 42 U.S.C. \u00a7 3601 et seq."},{"key":"e_1_3_2_1_5_1","unstructured":"1968. Section 3604(c) of the Fair Housing Act. 42 U.S.C. \u00a7 3604(c)."},{"key":"e_1_3_2_1_6_1","volume-title":"Griggs v","year":"1971","unstructured":"1971. Griggs v. Duke Power Co.401 U.S. 424 (1971)."},{"key":"e_1_3_2_1_7_1","unstructured":"1974. Equal Credit Opportunity Act. 15 U.S.C. \u00a7 1691 et seq."},{"key":"e_1_3_2_1_8_1","volume-title":"Washington v. Davis. 426 U.S. 229","year":"1976","unstructured":"1976. Washington v. Davis. 426 U.S. 229 (1976)."},{"key":"e_1_3_2_1_9_1","unstructured":"1986. Public Order Act 1986 (c 64). Parts III and 3A (UK)."},{"key":"e_1_3_2_1_10_1","unstructured":"1995. Adarand Constructors Inc. v. Pena. 515 U.S. 200 (1995)."},{"key":"e_1_3_2_1_11_1","volume-title":"The media and the Rwanda genocide","unstructured":"2007. The media and the Rwanda genocide. Pluto Press ; Fountain Publishers ; International Development Research Centre, London ; Ann Arbor, MI : Kampala, Uganda : Ottawa."},{"key":"e_1_3_2_1_12_1","volume-title":"Ricci v. DeStefano. 557 U.S. 557","year":"2009","unstructured":"2009. Ricci v. DeStefano. 557 U.S. 557 (2009)."},{"key":"e_1_3_2_1_13_1","unstructured":"2015. Criminal Code of Germany. \u00a7 130 (Volksverhetzung) (Germany)."},{"key":"e_1_3_2_1_14_1","volume-title":"Algorithmic Accountability Act of","year":"2019","unstructured":"2019. Algorithmic Accountability Act of 2019. S.1108, 116th Cong."},{"key":"e_1_3_2_1_15_1","volume-title":"Data Accountability and Transparency Act of","year":"2020","unstructured":"2020. Data Accountability and Transparency Act of 2020. S.____, 116th Cong. (Discussion Draft)."},{"key":"e_1_3_2_1_16_1","unstructured":"2021. Algorithmic Bias in Education. 1052\u20131092\u00a0pages."},{"key":"e_1_3_2_1_17_1","volume-title":"European Commission. Proposal for a Regulation of the European Parliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence","year":"2021","unstructured":"2021. European Commission. Proposal for a Regulation of the European Parliament and of the Council Laying Down Harmonised Rules on Artificial Intelligence (Artificial Intelligence Act) and Amending Certain Union Legislative Acts (COM\/2021\/206 final)."},{"key":"e_1_3_2_1_18_1","volume-title":"The New Jim Crow: Mass Incarceration in the Age of Colorblindness","author":"Alexander Michelle","unstructured":"Michelle Alexander. 2010. The New Jim Crow: Mass Incarceration in the Age of Colorblindness. The New Press, New York, NY."},{"key":"e_1_3_2_1_19_1","unstructured":"American Civil Liberties Union. 2019. In Historic Decision on Digital Bias EEOC Finds Employers Violated Federal Law when they Excluded Women and Older Workers from Facebook Ads. https:\/\/www.aclu.org\/press-releases\/historic-decision-digital-bias-eeoc-finds-employers-violated-federal-law-when-they Press release."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1037\/0022-3514.91.4.652"},{"key":"e_1_3_2_1_21_1","volume-title":"Machine Bias. ProPublica (23","author":"Angwin Julia","year":"2016","unstructured":"Julia Angwin, Jeff Larson, Surya Mattu, and Lauren Kirchner. 2016. Machine Bias. ProPublica (23 May 2016). https:\/\/www.propublica.org\/article\/machine-bias-risk-assessments-in-criminal-sentencing"},{"key":"e_1_3_2_1_22_1","volume-title":"Facebook Lets Advertisers Exclude Users by Race. ProPublica (28","author":"Angwin Julia","year":"2016","unstructured":"Julia Angwin and Terry Parris, Jr.2016. Facebook Lets Advertisers Exclude Users by Race. ProPublica (28 October 2016). https:\/\/www.propublica.org\/article\/facebook-lets-advertisers-exclude-users-by-race"},{"key":"e_1_3_2_1_23_1","volume-title":"What is the Problem to Which Fair Machine Learning is the Solution?. Presentation at AI Now. (10","author":"Barocas Solon","year":"2017","unstructured":"Solon Barocas. 2017. What is the Problem to Which Fair Machine Learning is the Solution?. Presentation at AI Now. (10 July 2017). https:\/\/ainowinstitute.org\/symposia\/videos\/what-is-the-problem-to-which-fair-machine-learning-is-the-solution.html"},{"key":"e_1_3_2_1_24_1","volume-title":"The problem with bias: from allocative to representational harms in machine learning","author":"Barocas Solon","year":"2017","unstructured":"Solon Barocas, Kate Crawford, Aaron Shapiro, and Hanna Wallach. 2017. The problem with bias: from allocative to representational harms in machine learning. Special Interest Group for Computing, Information and Society (2017)."},{"key":"e_1_3_2_1_25_1","unstructured":"Solon Barocas Moritz Hardt and Arvind Narayanan. 2019. Fairness and Machine Learning: Limitations and Opportunities. fairmlbook.org. http:\/\/www.fairmlbook.org."},{"key":"e_1_3_2_1_26_1","unstructured":"Marion Bartl Malvina Nissim and Albert Gatt. 2020. Unmasking Contextual Stereotypes: Measuring and Mitigating BERT\u2019s Gender Bias. arxiv:2010.14534\u00a0[cs.CL]"},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442188.3445922"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1093\/oso\/9780190923624.001.0001"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"crossref","unstructured":"Federico Bianchi Pratyusha Kalluri Esin Durmus Faisal Ladhak Myra Cheng Debora Nozza Tatsunori Hashimoto Dan Jurafsky James Zou and Aylin Caliskan. 2022. Easily Accessible Text-to-Image Generation Amplifies Demographic Stereotypes at Large Scale. https:\/\/arxiv.org\/abs\/2211.03759","DOI":"10.1145\/3593013.3594095"},{"key":"e_1_3_2_1_30_1","volume-title":"Proceedings of the 1st Conference on Fairness, Accountability and Transparency","author":"Binns Reuben","year":"2018","unstructured":"Reuben Binns. 2018. Fairness in machine learning: lessons from political philosophy. Proceedings of the 1st Conference on Fairness, Accountability and Transparency (2018)."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"crossref","unstructured":"Abeba Birhane Pratyusha Kalluri Dallas Card William Agnew Ravit Dotan and Michelle Bao. 2022. The Values Encoded in Machine Learning Research. arxiv:2106.15590\u00a0[cs.LG]","DOI":"10.1145\/3531146.3533083"},{"key":"e_1_3_2_1_32_1","volume-title":"Multimodal datasets: misogyny, pornography, and malignant stereotypes. CoRR abs\/2110.01963","author":"Birhane Abeba","year":"2021","unstructured":"Abeba Birhane, Vinay\u00a0Uday Prabhu, and Emmanuel Kahembwe. 2021. Multimodal datasets: misogyny, pornography, and malignant stereotypes. CoRR abs\/2110.01963 (2021). arXiv:2110.01963https:\/\/arxiv.org\/abs\/2110.01963"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"crossref","unstructured":"Su\u00a0Lin Blodgett Solon Barocas Hal Daum\u00e9 and Hanna Wallach. 2020. Language (Technology) is Power: A Critical Survey of \"Bias\" in NLP.","DOI":"10.18653\/v1\/2020.acl-main.485"},{"key":"e_1_3_2_1_34_1","unstructured":"Rishi Bommasani 2022. On the Opportunities and Risks of Foundation Models. arxiv:2108.07258"},{"key":"e_1_3_2_1_35_1","unstructured":"Rishi Bommasani Kathleen\u00a0A. Creel Ananya Kumar Dan Jurafsky and Percy Liang. 2022. Picking on the Same Person: Does Algorithmic Monoculture lead to Outcome Homogenization?arxiv:2211.13972"},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"crossref","unstructured":"Matthew\u00a0Le Bui and Safiya\u00a0Umoja Noble. 2020. We\u2019re Missing a Moral Framework of Justice in Artificial Intelligence: On the Limits Failings and Ethics of Fairness. In The Oxford Handbook of Ethics of AI.","DOI":"10.1093\/oxfordhb\/9780190067397.013.9"},{"key":"e_1_3_2_1_37_1","volume-title":"Proceedings of the 1st Conference on Fairness, Accountability and Transparency(Proceedings of Machine Learning Research, Vol.\u00a081)","author":"Buolamwini Joy","year":"2018","unstructured":"Joy Buolamwini and Timnit Gebru. 2018. Gender Shades: Intersectional Accuracy Disparities in Commercial Gender Classification. In Proceedings of the 1st Conference on Fairness, Accountability and Transparency(Proceedings of Machine Learning Research, Vol.\u00a081), Sorelle\u00a0A. Friedler and Christo Wilson (Eds.). PMLR, 77\u201391. https:\/\/proceedings.mlr.press\/v81\/buolamwini18a.html"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","unstructured":"Diana Burgess Yingmei Ding Margaret Hargreaves Michelle van Ryn and Sean Phelan. 2008. The Association between Perceived Discrimination and Underutilization of Needed Medical and Mental Health Care in a Multi-Ethnic Community Sample. Journal of health care for the poor and underserved 19 (09 2008) 894\u2013911. https:\/\/doi.org\/10.1353\/hpu.0.0063","DOI":"10.1353\/hpu.0.0063"},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-soc-090820-020800"},{"key":"e_1_3_2_1_40_1","volume-title":"Semantics derived automatically from language corpora necessarily contain human biases. CoRR abs\/1608.07187","author":"Caliskan Aylin","year":"2016","unstructured":"Aylin Caliskan, Joanna\u00a0J. Bryson, and Arvind Narayanan. 2016. Semantics derived automatically from language corpora necessarily contain human biases. CoRR abs\/1608.07187 (2016). arXiv:1608.07187http:\/\/arxiv.org\/abs\/1608.07187"},{"key":"e_1_3_2_1_41_1","volume-title":"The Racist Algorithm?Michigan Law Review 115","author":"Chander Anupam","year":"2017","unstructured":"Anupam Chander. 2017. The Racist Algorithm?Michigan Law Review 115 (2017), 1023\u20131045."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1089\/big.2016.0047"},{"key":"e_1_3_2_1_43_1","first-page":"1","article-title":"The Scored Society: Due Process for Automated Predictions","volume":"89","author":"Citron Danielle\u00a0Keats","year":"2014","unstructured":"Danielle\u00a0Keats Citron and Frank\u00a0A. Pasquale. 2014. The Scored Society: Due Process for Automated Predictions. Washington Law Review 89 (2014), 1\u201333.","journal-title":"Washington Law Review"},{"key":"e_1_3_2_1_44_1","volume-title":"Lensa\u2019s viral AI art creations were bound to hypersexualize users: AI-generated art is rife with issues. Polygon (20","author":"Clark Nicole","year":"2022","unstructured":"Nicole Clark. 2022. Lensa\u2019s viral AI art creations were bound to hypersexualize users: AI-generated art is rife with issues. Polygon (20 December 2022). https:\/\/www.polygon.com\/23513386\/ai-art-lensa-magic-avatars-artificial-intelligence-explained-stable-diffusion"},{"key":"e_1_3_2_1_45_1","volume-title":"Algorithmic decision making and the cost of fairness. CoRR abs\/1701.08230","author":"Corbett-Davies Sam","year":"2017","unstructured":"Sam Corbett-Davies, Emma Pierson, Avi Feller, Sharad Goel, and Aziz Huq. 2017. Algorithmic decision making and the cost of fairness. CoRR abs\/1701.08230 (2017). http:\/\/arxiv.org\/abs\/1701.08230"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/12255.001.0001"},{"key":"e_1_3_2_1_47_1","volume-title":"The Trouble with Bias. Keynote address. Neural Information Processing Systems","author":"Crawford Kate","year":"2017","unstructured":"Kate Crawford. 2017. The Trouble with Bias. Keynote address. Neural Information Processing Systems (2017). https:\/\/www.youtube.com\/watch?v=fMym_BKWQzk"},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017"},{"key":"e_1_3_2_1_49_1","unstructured":"Anupam Datta Matt Fredrikson Gihyuk Ko Piotr Mardziel and Shayak Sen. 2017. Proxy Non-Discrimination in Data-Driven Systems. https:\/\/arxiv.org\/abs\/1707.08120"},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1177\/20539517211044808"},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"crossref","unstructured":"Sunipa Dev Masoud Monajatipoor Anaelia Ovalle Arjun Subramonian Jeff\u00a0M Phillips and Kai-Wei Chang. 2021. Harms of Gender Exclusivity and Challenges in Non-Binary Representation in Language Technologies. arxiv:2108.12084\u00a0[cs.CL]","DOI":"10.18653\/v1\/2021.emnlp-main.150"},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-aacl.24"},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442188.3445924"},{"key":"e_1_3_2_1_54_1","volume-title":"Data Feminism","author":"D\u2019Ignazio Catherine","unstructured":"Catherine D\u2019Ignazio and Lauren\u00a0F. Klein. 2020. Data Feminism. MIT Press, Cambridge, MA."},{"key":"e_1_3_2_1_55_1","volume-title":"Fairness Through Awareness. CoRR abs\/1104.3913","author":"Dwork Cynthia","year":"2011","unstructured":"Cynthia Dwork, Moritz Hardt, Toniann Pitassi, Omer Reingold, and Richard\u00a0S. Zemel. 2011. Fairness Through Awareness. CoRR abs\/1104.3913 (2011). arXiv:1104.3913http:\/\/arxiv.org\/abs\/1104.3913"},{"key":"e_1_3_2_1_56_1","volume-title":"Automating Inequality: How High-Tech Tools Profile, Police, and Punish the Poor","author":"Eubanks Virginia","year":"2017","unstructured":"Virginia Eubanks. 2017. Automating Inequality: How High-Tech Tools Profile, Police, and Punish the Poor. St. Martin\u2019s Press, New York, NY."},{"key":"e_1_3_2_1_57_1","volume-title":"Automating Inequality: How High-Tech Tools Profile, Police, and Punish the Poor","author":"Eubanks Virginia","year":"2018","unstructured":"Virginia Eubanks. 2018. Automating Inequality: How High-Tech Tools Profile, Police, and Punish the Poor. St. Martin\u2019s Press, Inc., USA."},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"publisher","DOI":"10.1111\/phc3.12760"},{"key":"e_1_3_2_1_59_1","doi-asserted-by":"publisher","DOI":"10.1093\/acprof:oso\/9780198237907.001.0001"},{"key":"e_1_3_2_1_60_1","doi-asserted-by":"publisher","DOI":"10.1145\/3433949"},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.1145\/230538.230561"},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"crossref","unstructured":"Vinitha Gadiraju Shaun Kane Sunipa Dev Alex Taylor Ding Wang Emily Denton and Robin Brewer. 2023. \"I wouldn\u2019t say offensive but...\": Disability-Centered Perspectives on Large Language Models.","DOI":"10.1145\/3593013.3593989"},{"key":"e_1_3_2_1_63_1","doi-asserted-by":"publisher","DOI":"10.1145\/3287560.3287573"},{"key":"e_1_3_2_1_64_1","doi-asserted-by":"publisher","DOI":"10.1037\/0022-3514.94.2.292"},{"key":"e_1_3_2_1_65_1","doi-asserted-by":"publisher","unstructured":"Seraphina Goldfarb-Tarrant Rebecca Marchant Ricardo S\u00e1nchez Mugdha Pandya and Adam Lopez. 2021. Intrinsic Bias Metrics Do Not Correlate with Application Bias. 1926\u20131940. https:\/\/doi.org\/10.18653\/v1\/2021.acl-long.150","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_66_1","unstructured":"Government of Canada. 2021. Directive on automated decision-making. https:\/\/www.tbs-sct.gc.ca\/pol\/doc-eng.aspx?id=32592"},{"key":"e_1_3_2_1_67_1","volume-title":"Putting the J(ustice) in FAT","author":"Green Ben","year":"2018","unstructured":"Ben Green. 2018. Putting the J(ustice) in FAT. Berkman Klein Center Collection - Medium (26 February 2018). https:\/\/medium.com\/berkman-klein-center\/putting-the-j-ustice-in-fat-28da2b8eae6d"},{"key":"e_1_3_2_1_68_1","doi-asserted-by":"publisher","DOI":"10.1145\/3351095.3372869"},{"key":"e_1_3_2_1_69_1","volume-title":"Escaping the Impossibility of Fairness: From Formal to Substantive Algorithmic Fairness. Philosophy & Technology 35, 90","author":"Green Ben","year":"2022","unstructured":"Ben Green. 2022. Escaping the Impossibility of Fairness: From Formal to Substantive Algorithmic Fairness. Philosophy & Technology 35, 90 (2022)."},{"key":"e_1_3_2_1_70_1","volume-title":"Machine Learning: The Debates workshop at the 35th International Conference on Machine Learning","author":"Green Ben","year":"2018","unstructured":"Ben Green and Lily Hu. 2018. The myth in the methodology: towards a recontextualization of fairness in machine learning. Machine Learning: The Debates workshop at the 35th International Conference on Machine Learning (2018)."},{"key":"e_1_3_2_1_71_1","doi-asserted-by":"publisher","DOI":"10.1145\/3351095.3372840"},{"key":"e_1_3_2_1_72_1","doi-asserted-by":"publisher","DOI":"10.1145\/3461702.3462536"},{"key":"e_1_3_2_1_73_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1"},{"key":"e_1_3_2_1_74_1","volume-title":"How AI Avatars And Face Filters Are Altering Our Conception Of Beauty. Forbes (19","author":"Haines Anna","year":"2022","unstructured":"Anna Haines. 2022. How AI Avatars And Face Filters Are Altering Our Conception Of Beauty. Forbes (19 December 2022). https:\/\/www.forbes.com\/sites\/annahaines\/2022\/12\/19\/how-ai-avatars-and-face-filters-are-affecting-our-conception-of-beauty\/"},{"key":"e_1_3_2_1_75_1","volume-title":"Advances in Neural Information Processing Systems, D.\u00a0Lee, M.\u00a0Sugiyama, U.\u00a0Luxburg, I.\u00a0Guyon, and R.\u00a0Garnett (Eds.). Vol.\u00a029. Curran Associates","author":"Hardt Moritz","year":"2016","unstructured":"Moritz Hardt, Eric Price, and Nati Srebro. 2016. Equality of Opportunity in Supervised Learning. In Advances in Neural Information Processing Systems, D.\u00a0Lee, M.\u00a0Sugiyama, U.\u00a0Luxburg, I.\u00a0Guyon, and R.\u00a0Garnett (Eds.). Vol.\u00a029. Curran Associates, Inc.https:\/\/proceedings.neurips.cc\/paper\/2016\/file\/9d2682367c3935defcb1f9e247a97c0d-Paper.pdf"},{"key":"e_1_3_2_1_76_1","volume-title":"The Algorithm: AI-generated art raises tricky questions about ethics, copyright, and security. MIT Technology Review (20","author":"Heikkil\u00e4 Melissa","year":"2022","unstructured":"Melissa Heikkil\u00e4. 2022. The Algorithm: AI-generated art raises tricky questions about ethics, copyright, and security. MIT Technology Review (20 September 2022). https:\/\/www.technologyreview.com\/2022\/09\/20\/1059792\/the-algorithm-ai-generated-art-raises-tricky-questions-about-ethics-copyright-and-security\/"},{"key":"e_1_3_2_1_77_1","volume-title":"The viral AI avatar app Lensa undressed me\u2014without my consent. MIT Technology Review (12","author":"Heikkil\u00e4 Melissa","year":"2022","unstructured":"Melissa Heikkil\u00e4. 2022. The viral AI avatar app Lensa undressed me\u2014without my consent. MIT Technology Review (12 December 2022). https:\/\/www.technologyreview.com\/2022\/12\/12\/1064751\/the-viral-ai-avatar-app-lensa-undressed-me-without-my-consent\/"},{"key":"e_1_3_2_1_78_1","volume-title":"AI image generator Midjourney blocks porn by banning words about the human reproductive system. MIT Technology Review (24","author":"Heikkil\u00e4 Melissa","year":"2023","unstructured":"Melissa Heikkil\u00e4. 2023. AI image generator Midjourney blocks porn by banning words about the human reproductive system. MIT Technology Review (24 February 2023). https:\/\/www.technologyreview.com\/2023\/02\/24\/1069093\/ai-image-generator-midjourney-blocks-porn-by-banning-words-about-the-human-reproductive-system\/"},{"key":"e_1_3_2_1_79_1","first-page":"811","article-title":"Measuring Algorithmic Fairness","volume":"106","author":"Hellman Deborah","year":"2020","unstructured":"Deborah Hellman. 2020. Measuring Algorithmic Fairness. Virginia Law Review 106 (2020), 811\u2013866. https:\/\/virginialawreview.org\/articles\/measuring-algorithmic-fairness\/","journal-title":"Virginia Law Review"},{"key":"e_1_3_2_1_80_1","volume-title":"Researchers Find Stable Diffusion Amplifies Stereotypes","author":"Hendrix Justin","year":"2022","unstructured":"Justin Hendrix. 2022. Researchers Find Stable Diffusion Amplifies Stereotypes. Tech Policy Press (9 November 2022)."},{"key":"e_1_3_2_1_81_1","doi-asserted-by":"publisher","DOI":"10.1080\/1369118X.2019.1573912"},{"key":"e_1_3_2_1_82_1","doi-asserted-by":"publisher","DOI":"10.1111\/lnc3.12432"},{"key":"e_1_3_2_1_83_1","first-page":"405","article-title":"Antidiscrimination Law in the Administrative State","volume":"2006","author":"Suk Julie\u00a0Chi","year":"2006","unstructured":"Julie\u00a0Chi hye Suk. 2006. Antidiscrimination Law in the Administrative State. University of Illinois Law Review 2006 (2006), 405\u2013474.","journal-title":"University of Illinois Law Review"},{"key":"e_1_3_2_1_84_1","volume-title":"Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency (FAccT \u201921)","author":"Z.","year":"2021","unstructured":"Abigail\u00a0Z. Jacobs and Hanna Wallach. 2021. Measurement and Fairness. Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency (FAccT \u201921) (2021), 375\u2013385."},{"key":"e_1_3_2_1_85_1","volume-title":"Aiming for truth, fairness, and equity in your company\u2019s use of AI. Federal Trade Commission Business Blog (19","author":"Jillson Elisa","year":"2021","unstructured":"Elisa Jillson. 2021. Aiming for truth, fairness, and equity in your company\u2019s use of AI. Federal Trade Commission Business Blog (19 April 2021)."},{"key":"e_1_3_2_1_86_1","first-page":"499","article-title":"Artificial Intelligence, Machine Learning, and Bias In Finance: Toward Responsible Innovation","volume":"88","author":"Johnson Kristin","year":"2019","unstructured":"Kristin Johnson, Frank Pasquale, and Jennifer Chapman. 2019. Artificial Intelligence, Machine Learning, and Bias In Finance: Toward Responsible Innovation. Fordham Law Review 88, 2 (2019), 499\u2013529.","journal-title":"Fordham Law Review"},{"key":"e_1_3_2_1_87_1","doi-asserted-by":"publisher","DOI":"10.1093\/jla\/laz001"},{"key":"e_1_3_2_1_88_1","volume-title":"Don\u2019t ask if artificial intelligence is good or fair, ask how it shifts power. Nature (7","author":"Kalluri Pratyusha","year":"2020","unstructured":"Pratyusha Kalluri. 2020. Don\u2019t ask if artificial intelligence is good or fair, ask how it shifts power. Nature (7 July 2020). https:\/\/www.nature.com\/articles\/d41586-020-02003-2"},{"key":"e_1_3_2_1_89_1","volume-title":"It\u2019s way too easy to trick Lensa AI into making NSFW images. TechCrunch (6","author":"Kamps Haje\u00a0Jan","year":"2022","unstructured":"Haje\u00a0Jan Kamps. 2022. It\u2019s way too easy to trick Lensa AI into making NSFW images. TechCrunch (6 December 2022). https:\/\/techcrunch.com\/2022\/12\/06\/lensa-goes-nsfw"},{"key":"e_1_3_2_1_90_1","volume-title":"Representational Harms in Image Tagging. In Beyond Fair Computer Vision Workshop at CVPR","author":"Katzman Jared","year":"2021","unstructured":"Jared Katzman, Solon Barocas, Su\u00a0Lin Blodgett, Kristen Laird, Morgan\u00a0Klaus Scheuerman, and Hanna Wallach. 2021. Representational Harms in Image Tagging. In Beyond Fair Computer Vision Workshop at CVPR 2021."},{"key":"e_1_3_2_1_91_1","first-page":"867","article-title":"Manipulating Opportunity","volume":"106","author":"Kim T.","year":"2020","unstructured":"Pauline\u00a0T. Kim. 2020. Manipulating Opportunity. Virginia Law Review 106 (2020), 867\u2013935.","journal-title":"Virginia Law Review"},{"key":"e_1_3_2_1_92_1","first-page":"93","article-title":"Discrimination in Online Employment Recruiting","volume":"63","author":"Kim T.","year":"2018","unstructured":"Pauline\u00a0T. Kim and Sharion Scott. 2018. Discrimination in Online Employment Recruiting. St. Louis University Law Journal 63 (2018), 93\u2013118.","journal-title":"St. Louis University Law Journal"},{"key":"e_1_3_2_1_93_1","doi-asserted-by":"publisher","DOI":"10.1145\/3593013.3594015"},{"key":"e_1_3_2_1_94_1","unstructured":"Hannah Kirk Yennie Jun Haider Iqbal Elias Benussi Filippo Volpin Frederic\u00a0A. Dreyer Aleksandar Shtedritski and Yuki\u00a0M. Asano. 2021. Bias Out-of-the-Box: An Empirical Analysis of Intersectional Occupational Biases in Popular Generative Language Models. arxiv:2102.04130\u00a0[cs.CL]"},{"key":"e_1_3_2_1_95_1","volume-title":"Might Make You Feel Worse. Rolling Stone (12","author":"Klee Miles","year":"2022","unstructured":"Miles Klee. 2022. A Psychologist Explains Why Your \u2018Hot AI Selfies\u2019 Might Make You Feel Worse. Rolling Stone (12 December 2022). https:\/\/www.rollingstone.com\/culture\/culture-features\/lensa-app-hot-ai-selfie-self-esteem-1234644965\/"},{"key":"e_1_3_2_1_96_1","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.2018340118"},{"key":"e_1_3_2_1_97_1","volume-title":"Inherent Trade-Offs in the Fair Determination of Risk Scores. CoRR abs\/1609.05807","author":"Kleinberg M.","year":"2016","unstructured":"Jon\u00a0M. Kleinberg, Sendhil Mullainathan, and Manish Raghavan. 2016. Inherent Trade-Offs in the Fair Determination of Risk Scores. CoRR abs\/1609.05807 (2016). arXiv:1609.05807http:\/\/arxiv.org\/abs\/1609.05807"},{"key":"e_1_3_2_1_98_1","doi-asserted-by":"publisher","DOI":"10.3389\/fsoc.2022.883999"},{"key":"e_1_3_2_1_99_1","volume-title":"Lensa\u2019s Magic Avatars Explained. Live FAQ","author":"Labs Prisma","year":"2023","unstructured":"Prisma Labs. 2023. Lensa\u2019s Magic Avatars Explained. Live FAQ (2023). https:\/\/prismalabs.notion.site\/prismalabs\/Lensa-s-Magic-Avatars-Explained-c08c3c34f75a42518b8621cc89fd3d3f [https:\/\/perma.cc\/E65L-YT3A] (last visited Mar. 6, 2023)."},{"key":"e_1_3_2_1_100_1","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-021-00067-y"},{"key":"e_1_3_2_1_101_1","unstructured":"Paul\u00a0Pu Liang Chiyu Wu Louis-Philippe Morency and Ruslan Salakhutdinov. 2021. Towards Understanding and Mitigating Social Biases in Language Models. arxiv:2106.13219\u00a0[cs.CL]"},{"key":"e_1_3_2_1_102_1","doi-asserted-by":"crossref","unstructured":"Ishani Maitra and Mary\u00a0Kate McGowan (Eds.). 2012. Speech and Harm: Controversies over Free Speech. Oxford University Press.","DOI":"10.1093\/acprof:oso\/9780199236282.001.0001"},{"key":"e_1_3_2_1_103_1","unstructured":"Ninareh Mehrabi Fred Morstatter Nripsuta Saxena Kristina Lerman and Aram Galstyan. 2019. A Survey on Bias and Fairness in Machine Learning."},{"key":"e_1_3_2_1_104_1","volume-title":"Why Do All My AI Avatars Have Huge Boobs?The Cut (12","author":"Mercado Mia","year":"2022","unstructured":"Mia Mercado. 2022. Why Do All My AI Avatars Have Huge Boobs?The Cut (12 December 2022). https:\/\/www.thecut.com\/2022\/12\/ai-avatars-lensa-beauty-boobs.html"},{"key":"e_1_3_2_1_105_1","volume-title":"Want Less-Biased Decisions? Use Algorithms. Harvard Business Review (26","author":"Miller P.","year":"2018","unstructured":"Alex\u00a0P. Miller. 2018. Want Less-Biased Decisions? Use Algorithms. Harvard Business Review (26 July 2018). https:\/\/hbr.org\/2018\/07\/want-less-biased-decisions-use-algorithms"},{"key":"e_1_3_2_1_106_1","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-statistics-042720-125902"},{"key":"e_1_3_2_1_107_1","volume-title":"Whistleblower Testimony and Facebook Papers Trigger Lawmaker Calls for Regulation. Washington Post (25","author":"Morris Loveday","year":"2021","unstructured":"Loveday Morris, Elizabeth Dwoskin, and Hamza Shaban. 2021. Whistleblower Testimony and Facebook Papers Trigger Lawmaker Calls for Regulation. Washington Post (25 October 2021). https:\/\/www.washingtonpost.com\/technology\/2021\/10\/25\/facebook-papers-live-updates"},{"key":"e_1_3_2_1_108_1","doi-asserted-by":"publisher","DOI":"10.1145\/3359221"},{"key":"e_1_3_2_1_109_1","volume-title":"Big Data: A Report on Algorithmic Systems, Opportunity, and Civil Rights. Technical Report. Executive Office of the President","author":"Mu\u00f1oz Cecilia","year":"2016","unstructured":"Cecilia Mu\u00f1oz, Megan Smith, and DJ Patil. 2016. Big Data: A Report on Algorithmic Systems, Opportunity, and Civil Rights. Technical Report. Executive Office of the President, Washington, DC. https:\/\/obamawhitehouse.archives.gov\/sites\/default\/files\/microsites\/ostp\/2016_0504_data_discrimination.pdf"},{"key":"e_1_3_2_1_110_1","doi-asserted-by":"publisher","DOI":"10.1145\/3461702.3462608"},{"key":"e_1_3_2_1_111_1","volume-title":"21 Fairness Definitions and Their Politics. Tutorial for Conf. Fairness, Accountability & Transparency (23","author":"Narayanan Arvind","year":"2018","unstructured":"Arvind Narayanan. 2018. 21 Fairness Definitions and Their Politics. Tutorial for Conf. Fairness, Accountability & Transparency (23 February 2018). https:\/\/www.youtube.com\/watch?v=jIXIuYdnyyk"},{"key":"e_1_3_2_1_112_1","volume-title":"The limits of the quantitative approach to discrimination. 2022 James Baldwin lecture","author":"Narayanan Arvind","year":"2022","unstructured":"Arvind Narayanan. 2022. The limits of the quantitative approach to discrimination. 2022 James Baldwin lecture, Princeton University (11 October 2022). https:\/\/www.cs.princeton.edu\/\u00a0arvindn\/talks\/baldwin-discrimination\/baldwin-discrimination-transcript.pdf"},{"key":"e_1_3_2_1_113_1","doi-asserted-by":"publisher","DOI":"10.1098\/rsta.2017.0358"},{"key":"e_1_3_2_1_114_1","doi-asserted-by":"publisher","unstructured":"K. Nissim and A. Wood. 2021. Foundations for Robust Data Protection: Co-designing Law and Computer Science. In 2021 Third IEEE International Conference on Trust Privacy and Security in Intelligent Systems and Applications (TPS-ISA). IEEE Computer Society Los Alamitos CA USA 235\u2013242. https:\/\/doi.org\/10.1109\/TPSISA52974.2021.00026","DOI":"10.1109\/TPSISA52974.2021.00026"},{"key":"e_1_3_2_1_115_1","volume-title":"Algorithms of oppression. How search engines reinforce racism","author":"Noble Safiya\u00a0Umoja","unstructured":"Safiya\u00a0Umoja Noble. 2018. Algorithms of oppression. How search engines reinforce racism. New York University Press, New York. http:\/\/algorithmsofoppression.com\/"},{"key":"e_1_3_2_1_116_1","volume-title":"The Long History of Algorithmic Fairness. Phenomenal World (30","author":"Ochigame Rodrigo","year":"2020","unstructured":"Rodrigo Ochigame. 2020. The Long History of Algorithmic Fairness. Phenomenal World (30 January 2020). https:\/\/www.nature.com\/articles\/d41586-020-02003-2"},{"key":"e_1_3_2_1_117_1","volume-title":"International Conference on Machine Learning","author":"Ochigame Rodrigo","year":"2018","unstructured":"Rodrigo Ochigame, Chelsea Barabas, Karthik Dinakar, Madars Virza, and Joichi Ito. 2018. Beyond Legitimation: Rethinking Fairness, Interpretability, and Accuracy in Machine Learning. International Conference on Machine Learning (2018)."},{"key":"e_1_3_2_1_118_1","volume-title":"Advanced Extension Awards and Extended Project Qualifications","author":"Level Awarding GCSE, AS, A","year":"2020","unstructured":"Ofqual. 2020. Awarding GCSE, AS, A Level, Advanced Extension Awards and Extended Project Qualifications in Summer 2020: Interim Report. (2020)."},{"key":"e_1_3_2_1_119_1","volume-title":"Weapons of Math Destruction: How Big Data Increases Inequality and Threatens Democracy","author":"O\u2019Neil Cathy","unstructured":"Cathy O\u2019Neil. 2016. Weapons of Math Destruction: How Big Data Increases Inequality and Threatens Democracy. Crown, New York, NY."},{"key":"e_1_3_2_1_121_1","volume-title":"How should AI systems behave, and who should decide?OpenAI Blog (16","author":"AI.","year":"2023","unstructured":"OpenAI. 2023. How should AI systems behave, and who should decide?OpenAI Blog (16 February 2023). https:\/\/openai.com\/blog\/how-should-ai-systems-behave"},{"key":"e_1_3_2_1_122_1","volume-title":"The Black Box Society: The Secret Algorithms that Control Money and Information","author":"Pasquale Frank","unstructured":"Frank Pasquale. 2015. The Black Box Society: The Secret Algorithms that Control Money and Information. Harvard University Press, Cambridge, MA."},{"key":"e_1_3_2_1_123_1","volume-title":"The seductive diversion of \u2018solving","author":"Powles Julia","year":"2018","unstructured":"Julia Powles and Helen Nissenbaum. 2018. The seductive diversion of \u2018solving\u2019 bias in artificial intelligence. Medium (7 December 2018). https:\/\/onezero.medium.com\/the-seductive-diversion-of-solving-bias-in-artificial-intelligence-890df5e5ef53"},{"key":"e_1_3_2_1_124_1","volume-title":"Rebecca\u00a0N. White, Margaret Mitchell, Timnit Gebru, Ben Hutchinson, Jamila Smith-Loud, Daniel Theron, and Parker Barnes.","author":"Raji Inioluwa\u00a0Deborah","year":"2020","unstructured":"Inioluwa\u00a0Deborah Raji, Andrew Smart, Rebecca\u00a0N. White, Margaret Mitchell, Timnit Gebru, Ben Hutchinson, Jamila Smith-Loud, Daniel Theron, and Parker Barnes. 2020. Closing the AI Accountability Gap: Defining an End-to-End Framework for Internal Algorithmic Auditing. arxiv:2001.00973\u00a0[cs.CY]"},{"key":"e_1_3_2_1_125_1","volume-title":"A Theory of Justice","author":"Rawls John","unstructured":"John Rawls. 1971. A Theory of Justice. Belknap Press of Harvard University Press, Cambridge, Mass."},{"key":"e_1_3_2_1_126_1","volume-title":"Political Liberalism: Expanded Edition","author":"Rawls John","year":"2005","unstructured":"John Rawls. 2005. Political Liberalism: Expanded Edition. Columbia University Press, New York."},{"key":"e_1_3_2_1_127_1","doi-asserted-by":"crossref","unstructured":"Beatrice Savoldi Marco Gaido Luisa Bentivogli Matteo Negri and Marco Turchi. 2021. Gender Bias in Machine Translation. arxiv:2104.06001\u00a0[cs.CL]","DOI":"10.1162\/tacl_a_00401"},{"key":"e_1_3_2_1_128_1","doi-asserted-by":"publisher","unstructured":"Reva Schwartz Apostol Vassilev Kristen\u00a0K. Greene Lori Perine Andrew Burt and Patrick Hall. 2022. Towards a Standard for Identifying and Managing Bias in Artificial Intelligence. https:\/\/doi.org\/10.6028\/NIST.SP.1270","DOI":"10.6028\/NIST.SP.1270"},{"key":"e_1_3_2_1_129_1","doi-asserted-by":"publisher","DOI":"10.1145\/3287560.3287598"},{"key":"e_1_3_2_1_130_1","doi-asserted-by":"publisher","DOI":"10.1145\/3479577"},{"key":"e_1_3_2_1_131_1","volume-title":"When reality monitoring fails: The role of imagination in stereotype maintenance. Journal of Personality and Social Psychology 52 (04","author":"Slusher P.","year":"1987","unstructured":"Morgan\u00a0P. Slusher and Craig\u00a0A. Anderson. 1987. When reality monitoring fails: The role of imagination in stereotype maintenance. Journal of Personality and Social Psychology 52 (04 1987), 653\u2013662. https:\/\/doi.org\/10.1037\/\/0022-3514.52.4.653"},{"key":"e_1_3_2_1_132_1","volume-title":"Using Artificial Intelligence and Algorithms. Federal Trade Commission Business Blog (8","author":"Smith Andrew","year":"2020","unstructured":"Andrew Smith. 2020. Using Artificial Intelligence and Algorithms. Federal Trade Commission Business Blog (8 April 2020)."},{"key":"e_1_3_2_1_133_1","volume-title":"App Lensa Generated Nudes From My Childhood Photos. Wired (7","author":"Snow Olivia","year":"2022","unstructured":"Olivia Snow. 2022. \u2018Magic Avatar\u2019 App Lensa Generated Nudes From My Childhood Photos. Wired (7 December 2022). https:\/\/www.wired.com\/story\/lensa-artificial-intelligence-csem\/"},{"key":"e_1_3_2_1_134_1","volume-title":"What to know about Lensa, the AI portrait app all over social media. CNN Style (11","author":"Sottile Zoe","year":"2022","unstructured":"Zoe Sottile. 2022. What to know about Lensa, the AI portrait app all over social media. CNN Style (11 December 2022). https:\/\/www.cnn.com\/style\/article\/lensa-ai-app-art-explainer-trnd\/index.html"},{"key":"e_1_3_2_1_135_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442188.3445932"},{"key":"e_1_3_2_1_136_1","doi-asserted-by":"publisher","DOI":"10.1145\/3465416.3483305"},{"key":"e_1_3_2_1_137_1","doi-asserted-by":"publisher","DOI":"10.1145\/2460276.2460278"},{"key":"e_1_3_2_1_138_1","doi-asserted-by":"publisher","DOI":"10.1214\/aoms"},{"key":"e_1_3_2_1_139_1","first-page":"83","article-title":"An FDA for Algorithms","volume":"69","author":"Tutt Andrew","year":"2017","unstructured":"Andrew Tutt. 2017. An FDA for Algorithms. Administrative Law Review 69 (2017), 83\u2013123.","journal-title":"Administrative Law Review"},{"key":"e_1_3_2_1_140_1","unstructured":"U.S. Department of Housing and Urban Development. 2019. Charge of Discrimination FHEO No. 01-18-0323-8."},{"key":"e_1_3_2_1_141_1","volume-title":"Ethical and social risks of harm from Language Models. DeepMind Report","author":"Laura Weidinger","year":"2021","unstructured":"Laura Weidinger 2021. Ethical and social risks of harm from Language Models. DeepMind Report (2021)."},{"key":"e_1_3_2_1_142_1","volume-title":"How Machine Learning Pushes Us to Define Fairness. Harvard Business Review (6","author":"Weinberger David","year":"2019","unstructured":"David Weinberger. 2019. How Machine Learning Pushes Us to Define Fairness. Harvard Business Review (6 November 2019). https:\/\/hbr.org\/2019\/11\/how-machine-learning-pushes-us-to-define-fairness"},{"key":"e_1_3_2_1_143_1","unstructured":"White House Office of Science and Technology Policy. 2022. Blueprint for an AI Bill of Rights: Making Automated Systems Work for the American People. https:\/\/www.whitehouse.gov\/wp-content\/uploads\/2022\/10\/Blueprint-for-an-AI-Bill-of-Rights.pdf"},{"key":"e_1_3_2_1_144_1","volume-title":"Do Artifacts Have Politics?Daedalus 109, 1","author":"Winner Langdon","year":"1980","unstructured":"Langdon Winner. 1980. Do Artifacts Have Politics?Daedalus 109, 1 (1980), 121\u2013136."},{"key":"e_1_3_2_1_145_1","doi-asserted-by":"publisher","DOI":"10.1145\/3531146.3533183"},{"key":"e_1_3_2_1_146_1","unstructured":"Robert Wolfe Yiwei Yang Bill Howe and Aylin Caliskan. 2022. Contrastive Language-Vision AI Models Pretrained on Web-Scraped Multimodal Data Exhibit Sexual Objectification Bias. (2022). https:\/\/arxiv.org\/abs\/2212.11261"},{"key":"e_1_3_2_1_147_1","volume-title":"Andrew\u00a0L Beam, and Isaac\u00a0S Kohane","author":"Yu Kun-Hsing","year":"2018","unstructured":"Kun-Hsing Yu, Andrew\u00a0L Beam, and Isaac\u00a0S Kohane. 2018. Artificial intelligence in healthcare. Nature biomedical engineering 2, 10 (2018), 719."},{"key":"e_1_3_2_1_148_1","doi-asserted-by":"crossref","unstructured":"Dora Zhao Angelina Wang and Olga Russakovsky. 2021. Understanding and Evaluating Racial Biases in Image Captioning. arxiv:2106.08503\u00a0[cs.CV]","DOI":"10.1109\/ICCV48922.2021.01456"}],"event":{"name":"AIES '23: AAAI\/ACM Conference on AI, Ethics, and Society","location":"Montr\u00e9al QC Canada","acronym":"AIES '23","sponsor":["SIGAI ACM Special Interest Group on Artificial Intelligence"]},"container-title":["Proceedings of the 2023 AAAI\/ACM Conference on AI, Ethics, and Society"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3600211.3604695","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3600211.3604695","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T16:37:39Z","timestamp":1750178259000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3600211.3604695"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,8]]},"references-count":147,"alternative-id":["10.1145\/3600211.3604695","10.1145\/3600211"],"URL":"https:\/\/doi.org\/10.1145\/3600211.3604695","relation":{},"subject":[],"published":{"date-parts":[[2023,8,8]]},"assertion":[{"value":"2023-08-29","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}