{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,16]],"date-time":"2026-02-16T18:44:10Z","timestamp":1771267450484,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":48,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2026,2,22]]},"DOI":"10.1145\/3773966.3779376","type":"proceedings-article","created":{"date-parts":[[2026,2,16]],"date-time":"2026-02-16T17:50:01Z","timestamp":1771264201000},"page":"1140-1144","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["From Personalization to Prejudice: Bias and Discrimination in Memory-Enhanced AI Agents for Recruitment"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-9292-2662","authenticated-orcid":false,"given":"Himanshu","family":"Gharat","sequence":"first","affiliation":[{"name":"Phi Labs, Quantiphi Inc., Mumbai, Maharashtra, India"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-6448-300X","authenticated-orcid":false,"given":"Himanshi","family":"Agrawal","sequence":"additional","affiliation":[{"name":"Phi Labs, Quantiphi Inc., Bengaluru, Karnataka, India"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2435-6859","authenticated-orcid":false,"given":"Gourab K.","family":"Patro","sequence":"additional","affiliation":[{"name":"Phi Labs, Quantiphi Inc., Bengaluru, Karnataka, India"}]}],"member":"320","published-online":{"date-parts":[[2026,2,21]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Learning, and Ranking: Harnessing Users Limited Attention. arXiv preprint arXiv:2402.14013","author":"Agarwal Arpit","year":"2024","unstructured":"Arpit Agarwal, Rad Niazadeh, and Prathamesh Patil. 2024. Misalignment, Learning, and Ranking: Harnessing Users Limited Attention. arXiv preprint arXiv:2402.14013 (2024)."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3460231.3473895"},{"key":"e_1_3_2_1_3_1","volume-title":"Are Emily and Greg more employable than Lakisha and Jamal? A field experiment on labor market discrimination. American economic review","author":"Bertrand Marianne","year":"2004","unstructured":"Marianne Bertrand and Sendhil Mullainathan. 2004. Are Emily and Greg more employable than Lakisha and Jamal? A field experiment on labor market discrimination. American economic review, Vol. 94, 4 (2004), 991-1013."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1145\/3460231.3473897"},{"key":"e_1_3_2_1_5_1","first-page":"198","volume-title":"Proceedings of the ACM on Web Conference","author":"Cai Hongru","year":"2025","unstructured":"Hongru Cai, Yongqi Li, Wenjie Wang, Fengbin Zhu, Xiaoyu Shen, Wenjie Li, and Tat-Seng Chua. 2025. Large language models empowered personalized web agents. In Proceedings of the ACM on Web Conference 2025. 198-215."},{"key":"e_1_3_2_1_6_1","volume-title":"An algorithmic framework to control bias in bandit-based personalization. arXiv preprint arXiv:1802.08674","author":"Celis L Elisa","year":"2018","unstructured":"L Elisa Celis, Sayash Kapoor, Farnood Salehi, and Nisheeth K Vishnoi. 2018. An algorithmic framework to control bias in bandit-based personalization. arXiv preprint arXiv:1802.08674 (2018)."},{"key":"e_1_3_2_1_7_1","volume-title":"Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing. 8301-8327","author":"Chen Guiming","year":"2024","unstructured":"Guiming Chen, Shunian Chen, Ziche Liu, Feng Jiang, and Benyou Wang. 2024a. Humans or LLMs as the Judge? A Study on Judgement Bias. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing. 8301-8327."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11280-024-01276-1"},{"key":"e_1_3_2_1_9_1","volume-title":"Biases in Opinion Dynamics in Multi-Agent Systems of Large Language Models: A Case Study on Funding Allocation. In Findings of the Association for Computational Linguistics: NAACL 2025","author":"Cisneros-Velarde Pedro","year":"2025","unstructured":"Pedro Cisneros-Velarde. 2025. Biases in Opinion Dynamics in Multi-Agent Systems of Large Language Models: A Case Study on Funding Allocation. In Findings of the Association for Computational Linguistics: NAACL 2025. 1889-1916."},{"key":"e_1_3_2_1_10_1","volume-title":"Unmasking Conversational Bias in AI Multiagent Systems. CoRR","author":"Coppolillo Erica","year":"2025","unstructured":"Erica Coppolillo, Giuseppe Manco, and Luca Maria Aiello. 2025. Unmasking Conversational Bias in AI Multiagent Systems. CoRR (2025)."},{"key":"e_1_3_2_1_11_1","volume-title":"Proxy non-discrimination in data-driven systems. arXiv preprint arXiv:1707.08120","author":"Datta Anupam","year":"2017","unstructured":"Anupam Datta, Matt Fredrikson, Gihyuk Ko, Piotr Mardziel, and Shayak Sen. 2017. Proxy non-discrimination in data-driven systems. arXiv preprint arXiv:1707.08120 (2017)."},{"key":"e_1_3_2_1_12_1","volume-title":"Michael Carl Tschantz, and Anupam Datta","author":"Datta Amit","year":"2014","unstructured":"Amit Datta, Michael Carl Tschantz, and Anupam Datta. 2014. Automated experiments on ad privacy settings: A tale of opacity, choice, and discrimination. arXiv preprint arXiv:1408.6491 (2014)."},{"key":"e_1_3_2_1_13_1","first-page":"120","article-title":"Bias in bios: A case study of semantic representation bias in a high-stakes setting","author":"De-Arteaga Maria","year":"2019","unstructured":"Maria De-Arteaga, Alexey Romanov, Hanna Wallach, Jennifer Chayes, Christian Borgs, Alexandra Chouldechova, Sahin Geyik, Krishnaram Kenthapadi, and Adam Tauman Kalai. 2019. Bias in bios: A case study of semantic representation bias in a high-stakes setting. In FAccT. 120-128.","journal-title":"FAccT."},{"key":"e_1_3_2_1_14_1","volume-title":"Cfairllm: Consumer fairness evaluation in large-language model recommender system. ACM Transactions on Intelligent Systems and Technology","author":"Deldjoo Yashar","year":"2025","unstructured":"Yashar Deldjoo and Tommaso Di Noia. 2025. Cfairllm: Consumer fairness evaluation in large-language model recommender system. ACM Transactions on Intelligent Systems and Technology (2025)."},{"key":"e_1_3_2_1_15_1","first-page":"10126","article-title":"Can LLM be a Personalized Judge?","volume":"2024","author":"Dong Yijiang","year":"2024","unstructured":"Yijiang Dong, Tiancheng Hu, and Nigel Collier. 2024. Can LLM be a Personalized Judge?. In Findings of EMNLP 2024. 10126-10141.","journal-title":"Findings of EMNLP"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/582415.582418"},{"key":"e_1_3_2_1_17_1","volume-title":"The hard proxy problem: proxies aren't intentional","author":"Johnson Gabbrielle M","year":"2025","unstructured":"Gabbrielle M Johnson. 2025. The hard proxy problem: proxies aren't intentional; they're intentional. Philosophical Studies (2025), 1-29."},{"key":"e_1_3_2_1_18_1","volume-title":"Proceedings of the 2018 AAAI\/ACM Conference on AI, Ethics, and Society. 158-163","author":"Joseph Matthew","year":"2018","unstructured":"Matthew Joseph, Michael Kearns, Jamie Morgenstern, Seth Neel, and Aaron Roth. 2018. Meritocratic fairness for infinite and contextual bandits. In Proceedings of the 2018 AAAI\/ACM Conference on AI, Ethics, and Society. 158-163."},{"key":"e_1_3_2_1_19_1","volume-title":"Fairness in learning: Classic and contextual bandits. Advances in neural information processing systems","author":"Joseph Matthew","year":"2016","unstructured":"Matthew Joseph, Michael Kearns, Jamie H Morgenstern, and Aaron Roth. 2016. Fairness in learning: Classic and contextual bandits. Advances in neural information processing systems, Vol. 29 (2016)."},{"key":"e_1_3_2_1_20_1","volume-title":"International conference on machine learning. PMLR","author":"Kearns Michael","year":"2017","unstructured":"Michael Kearns, Aaron Roth, and Zhiwei Steven Wu. 2017. Meritocratic fairness for cross-population selection. In International conference on machine learning. PMLR, 1828-1836."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-017-1051-8"},{"key":"e_1_3_2_1_22_1","volume-title":"A survey on popularity bias in recommender systems","author":"Klimashevskaia A","year":"2023","unstructured":"A Klimashevskaia, D Jannach, M Elahi, and C Trattner. [n.d.]. A survey on popularity bias in recommender systems (2023). CoRR, abs\/2308.01118 ([n.d.])."},{"key":"e_1_3_2_1_23_1","volume-title":"Embodied Agents Meet Personalization: Exploring Memory Utilization for Personalized Assistance. arXiv preprint arXiv:2505","author":"Kwon Taeyoon","year":"2025","unstructured":"Taeyoon Kwon, Dongwook Choi, Sunghwan Kim, Hyojun Kim, Seungjun Moon, Beong-woo Kwak, Kuan-Hao Huang, and Jinyoung Yeo. 2025. Embodied Agents Meet Personalization: Exploring Memory Utilization for Personalized Assistance. arXiv preprint arXiv:2505.16348 (2025)."},{"key":"e_1_3_2_1_24_1","volume-title":"Sahin Cem Geyik, and Krishnaram Kenthapadi","author":"Lal G Roshan","year":"2020","unstructured":"G Roshan Lal, Sahin Cem Geyik, and Krishnaram Kenthapadi. 2020. Fairness-aware online personalization. arXiv preprint arXiv:2007.15270 (2020)."},{"key":"e_1_3_2_1_25_1","volume-title":"Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing. 2757-2791","author":"Li Dawei","year":"2025","unstructured":"Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al., 2025. From generation to judgment: Opportunities and challenges of llm-as-a-judge. In Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing. 2757-2791."},{"key":"e_1_3_2_1_26_1","volume-title":"Llms-as-judges: a comprehensive survey on llm-based evaluation methods. arXiv preprint arXiv:2412.05579","author":"Li Haitao","year":"2024","unstructured":"Haitao Li, Qian Dong, Junjie Chen, Huixue Su, Yujia Zhou, Qingyao Ai, Ziyi Ye, and Yiqun Liu. 2024. Llms-as-judges: a comprehensive survey on llm-based evaluation methods. arXiv preprint arXiv:2412.05579 (2024)."},{"key":"e_1_3_2_1_27_1","volume-title":"Fairness in AI-driven recruitment: Challenges, metrics, methods, and future directions. arXiv preprint arXiv:2405.19699","author":"Mujtaba Dena F","year":"2024","unstructured":"Dena F Mujtaba and Nihar R Mahapatra. 2024. Fairness in AI-driven recruitment: Challenges, metrics, methods, and future directions. arXiv preprint arXiv:2405.19699 (2024)."},{"key":"e_1_3_2_1_28_1","volume-title":"Understanding Bias Reinforcement in LLM Agents Debate. arXiv preprint arXiv:2503.16814","author":"Oh Jihwan","year":"2025","unstructured":"Jihwan Oh, Minchan Jeong, Jongwoo Ko, and Se-Young Yun. 2025. Understanding Bias Reinforcement in LLM Agents Debate. arXiv preprint arXiv:2503.16814 (2025)."},{"key":"e_1_3_2_1_29_1","first-page":"5073","article-title":"Don't just clean it, proxy clean it: Mitigating bias by proxy in pre-trained models","volume":"2022","author":"Panda Swetasudha","year":"2022","unstructured":"Swetasudha Panda, Ari Kobren, Michael Wick, and Qinlan Shen. 2022. Don't just clean it, proxy clean it: Mitigating bias by proxy in pre-trained models. In Findings of the Association for Computational Linguistics: EMNLP 2022. 5073-5085.","journal-title":"Findings of the Association for Computational Linguistics: EMNLP"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1145\/3531146.3533238"},{"key":"e_1_3_2_1_31_1","volume-title":"PersonaAgent: When Large Language Model Agents Meet Personalization at Test Time. ([n.d.]).","author":"AGENTS MEET PERSONALIZATION.","unstructured":"AGENTS MEET PERSONALIZATION. [n.d.]. PersonaAgent: When Large Language Model Agents Meet Personalization at Test Time. ([n.d.])."},{"key":"e_1_3_2_1_32_1","first-page":"469","article-title":"Mitigating bias in algorithmic hiring: Evaluating claims and practices","author":"Raghavan Manish","year":"2020","unstructured":"Manish Raghavan, Solon Barocas, Jon Kleinberg, and Karen Levy. 2020. Mitigating bias in algorithmic hiring: Evaluating claims and practices. In FAccT. 469-481.","journal-title":"FAccT."},{"key":"e_1_3_2_1_33_1","first-page":"68539","article-title":"Toolformer: Language models can teach themselves to use tools","volume":"36","author":"Schick Timo","year":"2023","unstructured":"Timo Schick, Jane Dwivedi-Yu, Roberto Dess`i, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. 2023. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, Vol. 36 (2023), 68539-68551.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_34_1","article-title":"Enhancing Customer Experience Personalization through AI: Leveraging Collaborative Filtering, Neural Networks, and Natural Language Processing","volume":"11","author":"Sharma Deepa","year":"2022","unstructured":"Deepa Sharma, Neha Reddy, Priya Gupta, and Rohit Sharma. 2022. Enhancing Customer Experience Personalization through AI: Leveraging Collaborative Filtering, Neural Networks, and Natural Language Processing. Journal of AI ML Research, Vol. 11, 7 (2022).","journal-title":"Journal of AI ML Research"},{"key":"e_1_3_2_1_35_1","first-page":"8634","article-title":"Reflexion: Language agents with verbal reinforcement learning","volume":"36","author":"Shinn Noah","year":"2023","unstructured":"Noah Shinn, Federico Cassano, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. 2023. Reflexion: Language agents with verbal reinforcement learning. Advances in Neural Information Processing Systems, Vol. 36 (2023), 8634-8652.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_36_1","volume-title":"A Survey on Autonomy-Induced Security Risks in Large Model-Based Agents. arXiv preprint arXiv:2506.23844","author":"Su Hang","year":"2025","unstructured":"Hang Su, Jun Luo, Chang Liu, Xiao Yang, Yichi Zhang, Yinpeng Dong, and Jun Zhu. 2025. A Survey on Autonomy-Induced Security Risks in Large Model-Based Agents. arXiv preprint arXiv:2506.23844 (2025)."},{"key":"e_1_3_2_1_37_1","volume-title":"Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 8416-8439","author":"Tan Zhen","year":"2025","unstructured":"Zhen Tan, Jun Yan, I-Hung Hsu, Rujun Han, Zifeng Wang, Long Le, Yiwen Song, Yanfei Chen, Hamid Palangi, George Lee, et al., 2025. In prospect and retrospect: Reflective memory management for long-term personalized dialogue agents. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 8416-8439."},{"key":"e_1_3_2_1_38_1","volume-title":"Voyager: An open-ended embodied agent with large language models. arXiv preprint arXiv:2305.16291","author":"Wang Guanzhi","year":"2023","unstructured":"Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. 2023b. Voyager: An open-ended embodied agent with large language models. arXiv preprint arXiv:2305.16291 (2023)."},{"key":"e_1_3_2_1_39_1","first-page":"74530","article-title":"Augmenting language models with long-term memory","volume":"36","author":"Wang Weizhi","year":"2023","unstructured":"Weizhi Wang, Li Dong, Hao Cheng, Xiaodong Liu, Xifeng Yan, Jianfeng Gao, and Furu Wei. 2023a. Augmenting language models with long-term memory. NeurIPS, Vol. 36 (2023), 74530-74543.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_40_1","volume-title":"Minilm: Deep self-attention distillation for task-agnostic compression of pre-trained transformers. Advances in neural information processing systems","author":"Wang Wenhui","year":"2020","unstructured":"Wenhui Wang, Furu Wei, Li Dong, Hangbo Bao, Nan Yang, and Ming Zhou. 2020. Minilm: Deep self-attention distillation for task-agnostic compression of pre-trained transformers. Advances in neural information processing systems, Vol. 33 (2020), 5776-5788."},{"key":"e_1_3_2_1_41_1","volume-title":"Conference on learning theory. PMLR, 25-54","author":"Wang Yining","year":"2013","unstructured":"Yining Wang, Liwei Wang, Yuanzhi Li, Di He, and Tie-Yan Liu. 2013. A theoretical analysis of NDCG type ranking measures. In Conference on learning theory. PMLR, 25-54."},{"key":"e_1_3_2_1_42_1","volume-title":"Ediz Ertekin Jr, and Mar\u00eda P\u00e9rez-Ortiz","author":"Wang Ze","year":"2024","unstructured":"Ze Wang, Zekun Wu, Xin Guan, Michael Thaler, Adriano S Koshiyama, Skylar Lu, Sachin Beepath, Ediz Ertekin Jr, and Mar\u00eda P\u00e9rez-Ortiz. 2024. JobFair: A Framework for Benchmarking Gender Hiring Bias in Large Language Models. In EMNLP (Findings)."},{"key":"e_1_3_2_1_43_1","volume-title":"Denny Zhou, et al.","author":"Wei Jason","year":"2022","unstructured":"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al., 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, Vol. 35 (2022), 24824-24837."},{"key":"e_1_3_2_1_44_1","first-page":"1578","volume-title":"Proceedings of the AAAI\/ACM Conference on AI, Ethics, and Society","volume":"7","author":"Wilson Kyra","year":"2024","unstructured":"Kyra Wilson and Aylin Caliskan. 2024. Gender, race, and intersectional bias in resume screening via language model retrieval. In Proceedings of the AAAI\/ACM Conference on AI, Ethics, and Society, Vol. 7. 1578-1590."},{"key":"e_1_3_2_1_45_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Yao Shunyu","year":"2023","unstructured":"Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik Narasimhan, and Yuan Cao. 2023. React: Synergizing reasoning and acting in language models. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_46_1","volume-title":"Personalizing dialogue agents: I have a dog, do you have pets too? arXiv preprint arXiv:1801.07243","author":"Zhang Saizheng","year":"2018","unstructured":"Saizheng Zhang, Emily Dinan, Jack Urbanek, Arthur Szlam, Douwe Kiela, and Jason Weston. 2018. Personalizing dialogue agents: I have a dog, do you have pets too? arXiv preprint arXiv:1801.07243 (2018)."},{"key":"e_1_3_2_1_47_1","volume-title":"PersonaAgent: When Large Language Model Agents Meet Personalization at Test Time. arXiv e-prints","author":"Zhang Weizhi","year":"2025","unstructured":"Weizhi Zhang, Xinyang Zhang, Chenwei Zhang, Liangwei Yang, Jingbo Shang, Zhepei Wei, Henry Peng Zou, Zijie Huang, Zhengyang Wang, Yifan Gao, et al., 2025. PersonaAgent: When Large Language Model Agents Meet Personalization at Test Time. arXiv e-prints (2025), arXiv-2506."},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i17.29946"}],"event":{"name":"WSDM '26:The Nineteenth ACM International Conference on Web Search and Data Mining","location":"Boise ID USA","sponsor":["SIGKDD ACM Special Interest Group on Knowledge Discovery in Data","SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web","SIGIR ACM Special Interest Group on Information Retrieval","SIGMOD ACM Special Interest Group on Management of Data"]},"container-title":["Proceedings of the Nineteenth ACM International Conference on Web Search and Data Mining"],"original-title":[],"deposited":{"date-parts":[[2026,2,16]],"date-time":"2026-02-16T17:56:20Z","timestamp":1771264580000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3773966.3779376"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2,21]]},"references-count":48,"alternative-id":["10.1145\/3773966.3779376","10.1145\/3773966"],"URL":"https:\/\/doi.org\/10.1145\/3773966.3779376","relation":{},"subject":[],"published":{"date-parts":[[2026,2,21]]},"assertion":[{"value":"2026-02-21","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}