{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,15]],"date-time":"2025-07-15T03:20:42Z","timestamp":1752549642934,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":65,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,8,8]],"date-time":"2023-08-08T00:00:00Z","timestamp":1691452800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,8,8]]},"DOI":"10.1145\/3600211.3604707","type":"proceedings-article","created":{"date-parts":[[2023,8,29]],"date-time":"2023-08-29T18:41:37Z","timestamp":1693334497000},"page":"679-690","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":3,"title":["When Fair Classification Meets Noisy Protected Attributes"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8540-3698","authenticated-orcid":false,"given":"Avijit","family":"Ghosh","sequence":"first","affiliation":[{"name":"Khoury College of Computer Sciences, Northeastern University, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-1412-9157","authenticated-orcid":false,"given":"Pablo","family":"Kvitca","sequence":"additional","affiliation":[{"name":"Khoury College of Computer Sciences, Northeastern University, USA and Amazon, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5268-004X","authenticated-orcid":false,"given":"Christo","family":"Wilson","sequence":"additional","affiliation":[{"name":"Khoury College of Computer Sciences, Northeastern University, USA"}]}],"member":"320","published-online":{"date-parts":[[2023,8,29]]},"reference":[{"volume-title":"Using publicly available information to proxy for unidentified race and ethnicity: A methodology and assessment","year":"2014","key":"e_1_3_2_1_1_1","unstructured":"2014. Using publicly available information to proxy for unidentified race and ethnicity: A methodology and assessment. Consumer Financial Protection Bureau. https:\/\/files.consumerfinance.gov\/f\/201409_cfpb_report_proxy-methodology.pdf"},{"key":"e_1_3_2_1_2_1","volume-title":"International Conference on Machine Learning. PMLR, 60\u201369","author":"Agarwal Alekh","year":"2018","unstructured":"Alekh Agarwal, Alina Beygelzimer, Miroslav Dud\u00edk, John Langford, and Hanna Wallach. 2018. A reductions approach to fair classification. In International Conference on Machine Learning. PMLR, 60\u201369."},{"key":"e_1_3_2_1_3_1","volume-title":"International Conference on Machine Learning. PMLR, 120\u2013129","author":"Agarwal Alekh","year":"2019","unstructured":"Alekh Agarwal, Miroslav Dud\u00edk, and Zhiwei\u00a0Steven Wu. 2019. Fair regression: Quantitative definitions and reduction-based algorithms. In International Conference on Machine Learning. PMLR, 120\u2013129."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33011418"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442188.3445888"},{"key":"e_1_3_2_1_6_1","unstructured":"Julia Angwin Jeff Larson Surya Mattu and Lauren Kirchner. 2016. Machine bias. ProPublica. https:\/\/www.propublica.org\/article\/machine-bias-risk-assessments-in-criminal-sentencing."},{"key":"e_1_3_2_1_7_1","volume-title":"Effectiveness of equalized odds for fair classification under imperfect group information. arXiv preprint arXiv:1906.03284","author":"Awasthi Pranjal","year":"2019","unstructured":"Pranjal Awasthi, Matth\u00e4us Kleindessner, and Jamie Morgenstern. 2019. Effectiveness of equalized odds for fair classification under imperfect group information. arXiv preprint arXiv:1906.03284 (2019)."},{"key":"e_1_3_2_1_8_1","unstructured":"Sid Basu Ruthie Berman Adam Bloomston John Campbell Anne Diaz Nanako Era Benjamin Evans Sukhada Palkar and Skyler Wharton. 2020. Measuring discrepancies in Airbnb guest acceptance rates using anonymized demographic data. AirBNB. https:\/\/news.airbnb.com\/wp-content\/uploads\/sites\/4\/2020\/06\/Project-Lighthouse-Airbnb-2020-06-12.pdf."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","unstructured":"Rachel K.\u00a0E. Bellamy Kuntal Dey Michael Hind Samuel\u00a0C. Hoffman Stephanie Houde Kalapriya Kannan Pranay Lohia Jacquelyn Martino Sameep Mehta Aleksandra Mojsilovic Seema Nagar Karthikeyan\u00a0Natesan Ramamurthy John Richards Diptikalyan Saha Prasanna Sattigeri Moninder Singh Kush\u00a0R. Varshney and Yunfeng Zhang. 2018. AI Fairness 360: An Extensible Toolkit for Detecting Understanding and Mitigating Unwanted Algorithmic Bias. https:\/\/doi.org\/10.48550\/ARXIV.1810.01943","DOI":"10.48550\/ARXIV.1810.01943"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1145\/3351095.3372877"},{"key":"e_1_3_2_1_11_1","volume-title":"Conference on fairness, accountability and transparency. 77\u201391","author":"Buolamwini Joy","year":"2018","unstructured":"Joy Buolamwini and Timnit Gebru. 2018. Gender shades: Intersectional accuracy disparities in commercial gender classification. In Conference on fairness, accountability and transparency. 77\u201391."},{"key":"e_1_3_2_1_12_1","volume-title":"Optimized pre-processing for discrimination prevention. Advances in neural information processing systems 30","author":"Calmon Flavio","year":"2017","unstructured":"Flavio Calmon, Dennis Wei, Bhanukiran Vinzamuri, Karthikeyan Natesan\u00a0Ramamurthy, and Kush\u00a0R Varshney. 2017. Optimized pre-processing for discrimination prevention. Advances in neural information processing systems 30 (2017)."},{"key":"e_1_3_2_1_13_1","volume-title":"International Conference on Machine Learning. PMLR, 1349\u20131361","author":"Celis L\u00a0Elisa","year":"2021","unstructured":"L\u00a0Elisa Celis, Lingxiao Huang, Vijay Keswani, and Nisheeth\u00a0K Vishnoi. 2021. Fair classification with noisy protected attributes: A framework with provable guarantees. In International Conference on Machine Learning. PMLR, 1349\u20131361."},{"volume-title":"Ethics of Data and Analytics","author":"Dastin Jeffrey","key":"e_1_3_2_1_14_1","unstructured":"Jeffrey Dastin. 2018. Amazon scraps secret AI recruiting tool that showed bias against women. In Ethics of Data and Analytics. Auerbach Publications, 296\u2013299."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00449"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"e_1_3_2_1_17_1","volume-title":"Retiring adult: New datasets for fair machine learning. Advances in neural information processing systems 34","author":"Ding Frances","year":"2021","unstructured":"Frances Ding, Moritz Hardt, John Miller, and Ludwig Schmidt. 2021. Retiring adult: New datasets for fair machine learning. Advances in neural information processing systems 34 (2021), 6478\u20136490."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1145\/2090236.2090255"},{"key":"e_1_3_2_1_19_1","volume-title":"Assessing and mitigating unfairness in credit models with Fairlearn. https:\/\/www.ey.com\/en_ca\/financial-services\/assessing-and-mitigating-unfairness-in-credit-models. [Accessed: March 16th","author":"EY.","year":"2023","unstructured":"EY. 2020. Assessing and mitigating unfairness in credit models with Fairlearn. https:\/\/www.ey.com\/en_ca\/financial-services\/assessing-and-mitigating-unfairness-in-credit-models. [Accessed: March 16th, 2023]."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1145\/2783258.2783311"},{"key":"e_1_3_2_1_21_1","volume-title":"Principled artificial intelligence: Mapping consensus in ethical and rights-based approaches to principles for AI","author":"Fjeld Jessica","year":"2020","unstructured":"Jessica Fjeld, Nele Achten, Hannah Hilligoss, Adam Nagy, and Madhulika Srikumar. 2020. Principled artificial intelligence: Mapping consensus in ethical and rights-based approaches to principles for AI. Berkman Klein Center Research Publication2020-1 (2020)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1145\/3287560.3287589"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1145\/3506803"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/3531146.3533128"},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1145\/3514094.3534157"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1145\/3278721.3278722"},{"key":"e_1_3_2_1_27_1","volume-title":"Equality of opportunity in supervised learning. arXiv preprint arXiv:1610.02413","author":"Hardt Moritz","year":"2016","unstructured":"Moritz Hardt, Eric Price, and Nathan Srebro. 2016. Equality of opportunity in supervised learning. arXiv preprint arXiv:1610.02413 (2016)."},{"key":"e_1_3_2_1_28_1","volume-title":"International Conference on Machine Learning. PMLR","author":"Hashimoto Tatsunori","year":"2018","unstructured":"Tatsunori Hashimoto, Megha Srivastava, Hongseok Namkoong, and Percy Liang. 2018. Fairness without demographics in repeated loss minimization. In International Conference on Machine Learning. PMLR, 1929\u20131938."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1145\/3468264.3468565"},{"key":"e_1_3_2_1_30_1","unstructured":"IBM. 2022. AI Ethics: IBM\u2019s multidisciplinary multidimensional approach to trustworthy AI. https:\/\/www.ibm.com\/artificial-intelligence\/ethics."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1287\/mnsc.2020.3850"},{"key":"e_1_3_2_1_32_1","volume-title":"Data preprocessing techniques for classification without discrimination. Knowledge and information systems 33, 1","author":"Kamiran Faisal","year":"2012","unstructured":"Faisal Kamiran and Toon Calders. 2012. Data preprocessing techniques for classification without discrimination. Knowledge and information systems 33, 1 (2012), 1\u201333."},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICDM.2012.45"},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-33486-3_3"},{"key":"e_1_3_2_1_35_1","volume-title":"Fairface: Face attribute dataset for balanced race, gender, and age. arXiv preprint arXiv:1908.04913","author":"K\u00e4rkk\u00e4inen Kimmo","year":"2019","unstructured":"Kimmo K\u00e4rkk\u00e4inen and Jungseock Joo. 2019. Fairface: Face attribute dataset for balanced race, gender, and age. arXiv preprint arXiv:1908.04913 (2019)."},{"key":"e_1_3_2_1_36_1","first-page":"83","article-title":"Sulla determinazione empirica di una lgge di distribuzione","volume":"4","author":"Kolmogorov Andrey","year":"1933","unstructured":"Andrey Kolmogorov. 1933. Sulla determinazione empirica di una lgge di distribuzione. Inst. Ital. Attuari, Giorn. 4 (1933), 83\u201391.","journal-title":"Inst. Ital. Attuari, Giorn."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1145\/3178876.3186133"},{"key":"e_1_3_2_1_38_1","volume-title":"International Conference on Machine Learning. PMLR, 5491\u20135500","author":"Kumar I\u00a0Elizabeth","year":"2020","unstructured":"I\u00a0Elizabeth Kumar, Suresh Venkatasubramanian, Carlos Scheidegger, and Sorelle Friedler. 2020. Problems with Shapley-value-based explanations as feature importance measures. In International Conference on Machine Learning. PMLR, 5491\u20135500."},{"key":"e_1_3_2_1_39_1","volume-title":"Chi","author":"Lahoti Preethi","year":"2020","unstructured":"Preethi Lahoti, Alex Beutel, Jilin Chen, Kang Lee, Flavien Prost, Nithum Thain, Xuezhi Wang, and Ed Chi. 2020. Fairness without demographics through adversarially reweighted learning. Advances in neural information processing systems 33 (2020), 728\u2013740."},{"key":"e_1_3_2_1_40_1","volume-title":"Noise-tolerant fair classification. Advances in neural information processing systems 32","author":"Lamy Alex","year":"2019","unstructured":"Alex Lamy, Ziyuan Zhong, Aditya\u00a0K Menon, and Nakul Verma. 2019. Noise-tolerant fair classification. Advances in neural information processing systems 32 (2019)."},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"e_1_3_2_1_42_1","volume-title":"LiFT: A Scalable Framework for Measuring Fairness in ML Applications. https:\/\/github.com\/linkedin\/LiFT. [Accessed: March 16th","author":"In.","year":"2023","unstructured":"LinkedIn. 2021. LiFT: A Scalable Framework for Measuring Fairness in ML Applications. https:\/\/github.com\/linkedin\/LiFT. [Accessed: March 16th, 2023]."},{"key":"e_1_3_2_1_43_1","volume-title":"A unified approach to interpreting model predictions. Advances in neural information processing systems 30","author":"Lundberg M","year":"2017","unstructured":"Scott\u00a0M Lundberg and Su-In Lee. 2017. A unified approach to interpreting model predictions. Advances in neural information processing systems 30 (2017)."},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-78818-6_5"},{"key":"e_1_3_2_1_45_1","volume-title":"On a test of whether one of two random variables is stochastically larger than the other. The annals of mathematical statistics","author":"Mann B","year":"1947","unstructured":"Henry\u00a0B Mann and Donald\u00a0R Whitney. 1947. On a test of whether one of two random variables is stochastically larger than the other. The annals of mathematical statistics (1947), 50\u201360."},{"key":"e_1_3_2_1_46_1","volume-title":"A survey on bias and fairness in machine learning. arXiv preprint arXiv:1908.09635","author":"Mehrabi Ninareh","year":"2019","unstructured":"Ninareh Mehrabi, Fred Morstatter, Nripsuta Saxena, Kristina Lerman, and Aram Galstyan. 2019. A survey on bias and fairness in machine learning. arXiv preprint arXiv:1908.09635 (2019)."},{"key":"e_1_3_2_1_47_1","unstructured":"Microsoft. 2022. Microsoft Responsible AI Standard v2. https:\/\/query.prod.cms.rt.microsoft.com\/cms\/api\/am\/binary\/RE4ZPmV."},{"key":"e_1_3_2_1_48_1","volume-title":"International Conference on Machine Learning. PMLR, 7066\u20137075","author":"Mozannar Hussein","year":"2020","unstructured":"Hussein Mozannar, Mesrob Ohannessian, and Nathan Srebro. 2020. Fair learning with private demographic data. In International Conference on Machine Learning. PMLR, 7066\u20137075."},{"key":"e_1_3_2_1_49_1","unstructured":"OECD. 2022. OECD AI Principles overview. https:\/\/oecd.ai\/en\/ai-principles."},{"key":"e_1_3_2_1_50_1","volume-title":"On fairness and calibration. Advances in neural information processing systems 30","author":"Pleiss Geoff","year":"2017","unstructured":"Geoff Pleiss, Manish Raghavan, Felix Wu, Jon Kleinberg, and Kilian\u00a0Q Weinberger. 2017. On fairness and calibration. Advances in neural information processing systems 30 (2017)."},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1145\/2939672.2939778"},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.5555\/3396126.3396130"},{"key":"e_1_3_2_1_53_1","first-page":"3","article-title":"On the estimation of the discrepancy between empirical curves of distribution for two independent samples","volume":"2","author":"Smirnov V","year":"1939","unstructured":"Nikolai\u00a0V Smirnov. 1939. On the estimation of the discrepancy between empirical curves of distribution for two independent samples. Bull. Math. Univ. Moscou 2, 2 (1939), 3\u201314.","journal-title":"Bull. Math. Univ. Moscou"},{"key":"e_1_3_2_1_54_1","volume-title":"Impact of HbA1c measurement on hospital readmission rates: analysis of 70,000 clinical database patient records. BioMed research international 2014","author":"Strack Beata","year":"2014","unstructured":"Beata Strack, Jonathan\u00a0P DeShazo, Chris Gennings, Juan\u00a0L Olmo, Sebastian Ventura, Krzysztof\u00a0J Cios, and John\u00a0N Clore. 2014. Impact of HbA1c measurement on hospital readmission rates: analysis of 70,000 clinical database patient records. BioMed research international 2014 (2014)."},{"key":"e_1_3_2_1_55_1","volume-title":"International Conference on Machine Learning. PMLR, 3319\u20133328","author":"Sundararajan Mukund","year":"2017","unstructured":"Mukund Sundararajan, Ankur Taly, and Qiqi Yan. 2017. Axiomatic attribution for deep networks. In International Conference on Machine Learning. PMLR, 3319\u20133328."},{"key":"e_1_3_2_1_56_1","unstructured":"The White House. 2022. Blueprint for an AI Bill of Rights: Making Automated Systems work for the American People. https:\/\/www.vox.com\/recode\/22455140\/lemonade-insurance-ai-twitter."},{"key":"e_1_3_2_1_57_1","unstructured":"UNESCO. 2022. Draft text of the Recommendation on the Ethics of Artificial Intelligence. https:\/\/unesdoc.unesco.org\/ark:\/48223\/pf0000377897."},{"key":"e_1_3_2_1_58_1","volume-title":"International Conference on Machine Learning. PMLR, 6373\u20136382","author":"Ustun Berk","year":"2019","unstructured":"Berk Ustun, Yang Liu, and David Parkes. 2019. Fairness without harm: Decoupled classifiers with preference guarantees. In International Conference on Machine Learning. PMLR, 6373\u20136382."},{"volume-title":"Optimal transport","author":"Villani C\u00e9dric","key":"e_1_3_2_1_59_1","unstructured":"C\u00e9dric Villani. 2009. The wasserstein distances. In Optimal transport. Springer, 93\u2013111."},{"key":"e_1_3_2_1_60_1","volume-title":"Robust optimization for fairness with noisy protected groups. Advances in neural information processing systems 33","author":"Wang Serena","year":"2020","unstructured":"Serena Wang, Wenshuo Guo, Harikrishna Narasimhan, Andrew Cotter, Maya Gupta, and Michael Jordan. 2020. Robust optimization for fairness with noisy protected groups. Advances in neural information processing systems 33 (2020), 5190\u20135203."},{"key":"e_1_3_2_1_61_1","unstructured":"Linda\u00a0F Wightman. 1998. LSAC National Longitudinal Bar Passage Study. LSAC Research Report Series. (1998)."},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442188.3445928"},{"key":"e_1_3_2_1_63_1","unstructured":"Muhammad\u00a0Bilal Zafar Isabel Valera Manuel\u00a0Gomez Rogriguez and Krishna\u00a0P Gummadi. 2017. Fairness constraints: Mechanisms for fair classification. In Artificial intelligence and statistics. PMLR 962\u2013970."},{"key":"e_1_3_2_1_64_1","volume-title":"International conference on machine learning. PMLR, 325\u2013333","author":"Zemel Rich","year":"2013","unstructured":"Rich Zemel, Yu Wu, Kevin Swersky, Toni Pitassi, and Cynthia Dwork. 2013. Learning fair representations. In International conference on machine learning. PMLR, 325\u2013333."},{"key":"e_1_3_2_1_65_1","doi-asserted-by":"publisher","DOI":"10.1145\/3278721.3278779"}],"event":{"name":"AIES '23: AAAI\/ACM Conference on AI, Ethics, and Society","sponsor":["SIGAI ACM Special Interest Group on Artificial Intelligence"],"location":"Montr\u00e9al QC Canada","acronym":"AIES '23"},"container-title":["Proceedings of the 2023 AAAI\/ACM Conference on AI, Ethics, and Society"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3600211.3604707","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3600211.3604707","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T16:37:39Z","timestamp":1750178259000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3600211.3604707"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,8]]},"references-count":65,"alternative-id":["10.1145\/3600211.3604707","10.1145\/3600211"],"URL":"https:\/\/doi.org\/10.1145\/3600211.3604707","relation":{},"subject":[],"published":{"date-parts":[[2023,8,8]]},"assertion":[{"value":"2023-08-29","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}