{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,28]],"date-time":"2026-04-28T12:15:16Z","timestamp":1777378516058,"version":"3.51.4"},"reference-count":121,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,7,1]],"date-time":"2026-07-01T00:00:00Z","timestamp":1782864000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,7,1]],"date-time":"2026-07-01T00:00:00Z","timestamp":1782864000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T00:00:00Z","timestamp":1773964800000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Array"],"published-print":{"date-parts":[[2026,7]]},"DOI":"10.1016\/j.array.2026.100773","type":"journal-article","created":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T16:46:18Z","timestamp":1774543578000},"page":"100773","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["A review of fairness challenges in natural language processing"],"prefix":"10.1016","volume":"30","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-4694-4718","authenticated-orcid":false,"given":"Talukder Hasnat","family":"Zadid","sequence":"first","affiliation":[]},{"given":"Shahidul Morsalin","family":"Jahin","sequence":"additional","affiliation":[]},{"given":"Aninda Roy","family":"Dhruba","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0009-5231-3432","authenticated-orcid":false,"given":"Sharia Arfin","family":"Tanim","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0009-1096-8596","authenticated-orcid":false,"given":"Abdullah Muhammad","family":"Hamja","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0009-5864-4013","authenticated-orcid":false,"given":"Md. Rakib","family":"Hasan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4587-4636","authenticated-orcid":false,"given":"Md Saef Ullah","family":"Miah","sequence":"additional","affiliation":[]},{"given":"Md. Imamul","family":"Islam","sequence":"additional","affiliation":[]},{"given":"Ahmed Al","family":"Mansur","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.array.2026.100773_b1","series-title":"Proceedings of the 58th annual meeting of the association for computational linguistics","article-title":"Predictive biases in natural language processing models: A conceptual framework and overview","author":"Shah","year":"2020"},{"key":"10.1016\/j.array.2026.100773_b2","article-title":"Man is to computer programmer as woman is to homemaker? Debiasing word embeddings","volume":"29","author":"Bolukbasi","year":"2016","journal-title":"Adv Neural Inf Process Syst"},{"issue":"6334","key":"10.1016\/j.array.2026.100773_b3","doi-asserted-by":"crossref","first-page":"183","DOI":"10.1126\/science.aal4230","article-title":"Semantics derived automatically from language corpora contain human-like biases","volume":"356","author":"Caliskan","year":"2017","journal-title":"Science"},{"key":"10.1016\/j.array.2026.100773_b4","series-title":"Proceedings of the 2024 conference on empirical methods in natural language processing","article-title":"Metrics for what, metrics for whom: Assessing actionability of bias evaluation metrics in NLP","author":"Delobelle","year":"2024"},{"issue":"2","key":"10.1016\/j.array.2026.100773_b5","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3597307","article-title":"Biases in large language models: Origins, inventory, and discussion","volume":"15","author":"Navigli","year":"2023","journal-title":"ACM J Data Inf Qual"},{"key":"10.1016\/j.array.2026.100773_b6","series-title":"Language (technology) is power: A critical survey of \u201cbias\u201d in nlp","author":"Blodgett","year":"2020"},{"issue":"2","key":"10.1016\/j.array.2026.100773_b7","doi-asserted-by":"crossref","DOI":"10.1561\/116.00000064","article-title":"Bias and fairness in chatbots: An overview","volume":"13","author":"Xue","year":"2024","journal-title":"APSIPA Trans Signal Inf Process"},{"key":"10.1016\/j.array.2026.100773_b8","series-title":"Predictive biases in natural language processing models: A conceptual framework and overview","author":"Shah","year":"2019"},{"key":"10.1016\/j.array.2026.100773_b9","series-title":"On measures of biases and harms in NLP","author":"Dev","year":"2021"},{"key":"10.1016\/j.array.2026.100773_b10","series-title":"Bridging fairness and environmental sustainability in natural language processing","author":"Hessenthaler","year":"2022"},{"issue":"7","key":"10.1016\/j.array.2026.100773_b11","doi-asserted-by":"crossref","first-page":"3184","DOI":"10.3390\/app11073184","article-title":"A survey on bias in deep NLP","volume":"11","author":"Garrido-Mu\u00f1oz","year":"2021","journal-title":"Appl Sci"},{"issue":"6","key":"10.1016\/j.array.2026.100773_b12","article-title":"Reducing racial and gender bias in machine learning and natural language processing tasks using a GAN approach","volume":"3","author":"Mandis","year":"2023","journal-title":"Int J High Sch Res"},{"key":"10.1016\/j.array.2026.100773_b13","series-title":"Multi-perspective explanation of data bias in AI: A case study","author":"Adewumi","year":"2024"},{"issue":"2","key":"10.1016\/j.array.2026.100773_b14","article-title":"A taxonomy of bias in machine learning: Classification, sources, and implications for ethical AI","volume":"1","author":"Bhambri","year":"2025","journal-title":"SGS - Eng Sci"},{"key":"10.1016\/j.array.2026.100773_b15","article-title":"Latent space bias mitigation for predicting at-risk students","volume":"7","author":"Al-Zawqari","year":"2024","journal-title":"Comput Educ: Artif Intell"},{"issue":"3","key":"10.1016\/j.array.2026.100773_b16","doi-asserted-by":"crossref","first-page":"406","DOI":"10.1136\/amiajnl-2013-001837","article-title":"Evaluating the impact of pre-annotation on annotation speed and potential bias: Natural language processing gold standard development for clinical named entity recognition in clinical trial announcements","volume":"21","author":"Lingren","year":"2014","journal-title":"J Am Med Inform. Assoc"},{"key":"10.1016\/j.array.2026.100773_b17","series-title":"Mitigating biases to embrace diversity: A comprehensive annotation benchmark for toxic language","author":"Hou","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b18","first-page":"9","article-title":"Geoparsing: Solved or biased? An evaluation of geographic biases in geoparsing","volume":"3","author":"Liu","year":"2022","journal-title":"AGILE: GISci. Ser"},{"key":"10.1016\/j.array.2026.100773_b19","doi-asserted-by":"crossref","first-page":"1408","DOI":"10.1162\/tacl_a_00434","article-title":"Self-diagnosis and self-debiasing: A proposal for reducing corpus-based bias in nlp","volume":"9","author":"Schick","year":"2021","journal-title":"Trans Assoc Comput Linguist"},{"issue":"4","key":"10.1016\/j.array.2026.100773_b20","first-page":"745","article-title":"Semantic web technologies and bias in artificial intelligence: A systematic literature review","volume":"14","author":"Reyero Lobo","year":"2023","journal-title":"Semant Web"},{"key":"10.1016\/j.array.2026.100773_b21","doi-asserted-by":"crossref","DOI":"10.1016\/j.nlp.2023.100047","article-title":"Gender bias in transformers: A comprehensive review of detection and MitigationStrategies","volume":"6","author":"Nemani","year":"2024","journal-title":"Nat Lang Process J","ISSN":"https:\/\/id.crossref.org\/issn\/2949-7191","issn-type":"print"},{"key":"10.1016\/j.array.2026.100773_b22","series-title":"Gender bias of LLM in economics: An existentialism perspective","author":"Zhong","year":"2024"},{"issue":"4","key":"10.1016\/j.array.2026.100773_b23","doi-asserted-by":"crossref","first-page":"2045","DOI":"10.1007\/s00146-023-01675-4","article-title":"Gender bias perpetuation and mitigation in AI technologies: Challenges andopportunities","volume":"39","author":"O2019Connor","year":"2024","journal-title":"AI & SOCIETY"},{"key":"10.1016\/j.array.2026.100773_b24","series-title":"Proceedings of the 2023 ACM conference on fairness, accountability, and transparency","first-page":"1479","article-title":"Examining risks of racial biases in NLP tools for child protective services","author":"Field","year":"2023"},{"issue":"2","key":"10.1016\/j.array.2026.100773_b25","doi-asserted-by":"crossref","first-page":"224","DOI":"10.1515\/jtc-2023-0019","article-title":"Cultural bias in large language models: A comprehensive analysis and mitigation strategies","volume":"3","author":"Liu","year":"2023","journal-title":"J Transcult Commun"},{"issue":"8028","key":"10.1016\/j.array.2026.100773_b26","doi-asserted-by":"crossref","first-page":"147","DOI":"10.1038\/s41586-024-07856-5","article-title":"AI generates covertly racist decisions about people based on their dialect","volume":"633","author":"Hofmann","year":"2024","journal-title":"Nature"},{"key":"10.1016\/j.array.2026.100773_b27","article-title":"Racial bias in natural language processing","author":"Shearer","year":"2019","journal-title":"Oxf Insights"},{"key":"10.1016\/j.array.2026.100773_b28","doi-asserted-by":"crossref","unstructured":"Belliardo E, Kalimeri K, Mejova Y. Leave no place behind: improved geolocation in humanitarian documents. In: Proceedings of the 2023 ACM conference on information technology for social good. 2023, p. 31\u20139.","DOI":"10.1145\/3582515.3609515"},{"key":"10.1016\/j.array.2026.100773_b29","series-title":"Analyzing regional impacts of climate change using natural language processing techniques","author":"Mallick","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b30","unstructured":"Venkit PN, Srinath M, Wilson S. A Study of Implicit Language Model Bias Against People With Disabilities. In: Proceedings of the 29th international conference on computational linguistics, gyeongju, Republic of Korea. 2025."},{"key":"10.1016\/j.array.2026.100773_b31","doi-asserted-by":"crossref","unstructured":"Gadiraju V, Kane S, Dev S, Taylor A, Wang D, Denton R, Brewer R. \u201cI wouldn\u2019t say offensive but...\u201d: Disability-Centered Perspectives on Large Language Models. In: Proceedings of the 2023 ACM conference on fairness, accountability, and transparency. 2023, p. 205\u201316.","DOI":"10.1145\/3593013.3593989"},{"key":"10.1016\/j.array.2026.100773_b32","series-title":"Unpacking the interdependent systems of discrimination: Ableist bias in NLP systems through an intersectional lens","author":"Hassan","year":"2021"},{"key":"10.1016\/j.array.2026.100773_b33","series-title":"How toxicity classifiers and large language models respond to ableism","author":"Phutane","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b34","series-title":"Survey on sociodemographic bias in natural language processing","author":"Gupta","year":"2023"},{"key":"10.1016\/j.array.2026.100773_b35","series-title":"Measuring political bias in large language models: What is said and how it is said","author":"Bang","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b36","series-title":"Assessing political bias in large language models","author":"Rettenberger","year":"2024"},{"issue":"1","key":"10.1016\/j.array.2026.100773_b37","doi-asserted-by":"crossref","DOI":"10.1016\/j.chbah.2024.100066","article-title":"Confirmation bias in AI-assisted decision-making: AI triage recommendations congruent with expert judgments increase psychologist trust and recommendation acceptance","volume":"2","author":"Bashkirova","year":"2024","journal-title":"Comput Hum Behav: Artif Humans","ISSN":"https:\/\/id.crossref.org\/issn\/2949-8821","issn-type":"print"},{"key":"10.1016\/j.array.2026.100773_b38","series-title":"Exploring cognitive bias triggers in COVID-19 misinformation tweets: A bot vs. Human perspective","author":"Ng","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b39","series-title":"Argumentative experience: Reducing confirmation bias on controversial issues through LLM-generated multi-persona debates","author":"Shi","year":"2025"},{"key":"10.1016\/j.array.2026.100773_b40","article-title":"Understanding latent affective bias in large pre-trained neural language models","volume":"7","author":"Kadan","year":"2024","journal-title":"Nat Lang Process J"},{"key":"10.1016\/j.array.2026.100773_b41","series-title":"Social bias in large language models for bangla: An empirical study on gender and religious bias","author":"Sadhu","year":"2024"},{"issue":"4","key":"10.1016\/j.array.2026.100773_b42","first-page":"569","article-title":"Deceptively simple: An outsider\u2019s perspective on natural language processing","volume":"45","author":"KhudaBukhsh","year":"2024","journal-title":"AI Mag"},{"key":"10.1016\/j.array.2026.100773_b43","series-title":"Findings of the association for computational linguistics: NAACL 2022","first-page":"1247","article-title":"FAtNet: Cost-effective approach towards mitigating the linguistic bias in speaker verification systems","author":"Sharma","year":"2022"},{"key":"10.1016\/j.array.2026.100773_b44","series-title":"Proceedings of the 2023 conference on empirical methods in natural language processing","first-page":"16765","article-title":"Addressing linguistic bias through a contrastive analysis of academic writing in the NLP domain","author":"Ridley","year":"2023"},{"key":"10.1016\/j.array.2026.100773_b45","series-title":"Linguistic bias in ChatGPT: Language models reinforce dialect discrimination","author":"Fleisig","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b46","series-title":"Challenges and strategies in cross-cultural NLP","author":"Hershcovich","year":"2022"},{"key":"10.1016\/j.array.2026.100773_b47","doi-asserted-by":"crossref","unstructured":"Hsieh H-Y, Huang S-C, Tsai RT-H. TWBias: A Benchmark for Assessing Social Bias in Traditional Chinese Large Language Models through a Taiwan Cultural Lens. In: Findings of the association for computational linguistics: EMNLP 2024. 2024, p. 8688\u2013704.","DOI":"10.18653\/v1\/2024.findings-emnlp.507"},{"key":"10.1016\/j.array.2026.100773_b48","series-title":"Bias in large language models: Origin, evaluation, and mitigation","author":"Guo","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b49","unstructured":"Khurana R, Pandey C, Gupta P, Nagrath P. Animojity: Detecting hate comments in indic languages and analysing bias against content creators. In: Proceedings of the 19th international conference on natural language processing. ICON, 2022, p. 172\u201382."},{"key":"10.1016\/j.array.2026.100773_b50","doi-asserted-by":"crossref","unstructured":"Zhao J, Shi Z, Li Y, Pei Y, Chen L, Fang M, Pechenizkiy M. More than Minorities and Majorities: Understanding Multilateral Bias in Language Generation. In: Findings of the association for computational linguistics ACL 2024. 2024, p. 9987\u201310001.","DOI":"10.18653\/v1\/2024.findings-acl.594"},{"key":"10.1016\/j.array.2026.100773_b51","series-title":"A comprehensive survey of bias in llms: Current landscape and future directions","author":"Ranjan","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b52","series-title":"Eliciting in-context learning in vision-language models for videos through curated data distributional properties","author":"Yu","year":"2023"},{"key":"10.1016\/j.array.2026.100773_b53","series-title":"A methodology to characterize bias and harmful stereotypes in natural language processing in latin america","author":"Alemany","year":"2022"},{"key":"10.1016\/j.array.2026.100773_b54","series-title":"Analyzing the impact of data selection and fine-tuning on economic and political biases in llms","author":"Agiza","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b55","doi-asserted-by":"crossref","unstructured":"Ganh\u00f6r C, Penz D, Rekabsaz N, Lesota O, Schedl M. Unlearning protected user attributes in recommendations with adversarial training. In: Proceedings of the 45th international ACM SIGIR conference on research and development in information retrieval. 2022, p. 2142\u20137.","DOI":"10.1145\/3477495.3531820"},{"key":"10.1016\/j.array.2026.100773_b56","doi-asserted-by":"crossref","DOI":"10.1109\/ACCESS.2024.3423323","article-title":"A systematic review of adversarial machine learning attacks, defensive controls and technologies","author":"Malik","year":"2024","journal-title":"IEEE Access"},{"key":"10.1016\/j.array.2026.100773_b57","article-title":"From feedback to insight: Uncovering gender bias in STEM evaluations through NLP","volume":"12","author":"de Leon Lopez","year":"2025","journal-title":"Soc Sci Humanit Open"},{"key":"10.1016\/j.array.2026.100773_b58","series-title":"Large language models are inconsistent and biased evaluators","author":"Stureborg","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b59","series-title":"Proceedings of the 2022 AAAI\/ACM conference on AI, ethics, and society","first-page":"411","article-title":"Towards better detection of biased language with scarce, noisy, and biased annotations","author":"Li","year":"2022"},{"issue":"4","key":"10.1016\/j.array.2026.100773_b60","article-title":"Impact of data bias on machine learning for crystal compound synthesizability predictions","volume":"5","author":"Davariashtiyani","year":"2024","journal-title":"Mach Learn: Sci Technol"},{"key":"10.1016\/j.array.2026.100773_b61","doi-asserted-by":"crossref","first-page":"26183","DOI":"10.1109\/ACCESS.2024.3360306","article-title":"Shortcut learning explanations for deep natural language processing: A survey on dataset biases","volume":"12","author":"Dogra","year":"2024","journal-title":"IEEE Access"},{"key":"10.1016\/j.array.2026.100773_b62","series-title":"AI-driven healthcare: A review on ensuring fairness and mitigating bias","author":"Chinta","year":"2025"},{"issue":"1","key":"10.1016\/j.array.2026.100773_b63","doi-asserted-by":"crossref","first-page":"383","DOI":"10.3390\/ai5010019","article-title":"A comprehensive review of AI techniques for addressing algorithmic bias in job hiring","volume":"5","author":"Albaroudi","year":"2024","journal-title":"AI","ISSN":"https:\/\/id.crossref.org\/issn\/2673-2688","issn-type":"print"},{"key":"10.1016\/j.array.2026.100773_b64","series-title":"Understanding and mitigating annotation bias in facial expression recognition","author":"Chen","year":"2021"},{"issue":"3","key":"10.1016\/j.array.2026.100773_b65","doi-asserted-by":"crossref","first-page":"107","DOI":"10.1145\/3446776","article-title":"Understanding deep learning (still) requires rethinking generalization","volume":"64","author":"Zhang","year":"2021","journal-title":"Commun ACM"},{"key":"10.1016\/j.array.2026.100773_b66","series-title":"Don\u2019t stop pretraining: Adapt language models to domains and tasks","author":"Gururangan","year":"2020"},{"key":"10.1016\/j.array.2026.100773_b67","series-title":"Revisiting few-sample BERT fine-tuning","author":"Zhang","year":"2020"},{"key":"10.1016\/j.array.2026.100773_b68","series-title":"A survey on bias and fairness in natural language processing","author":"Bansal","year":"2022"},{"issue":"3","key":"10.1016\/j.array.2026.100773_b69","doi-asserted-by":"crossref","first-page":"368","DOI":"10.1002\/jrsm.1533","article-title":"Risk of bias assessment in preclinical literature using natural language processing","volume":"13","author":"Wang","year":"2022","journal-title":"Res Synth Methods"},{"key":"10.1016\/j.array.2026.100773_b70","doi-asserted-by":"crossref","unstructured":"Das D, Guha S, Brubaker JR, Semaan B. The \u201cColonial Impulse\u201d of Natural Language Processing: An Audit of Bengali Sentiment Analysis Tools and Their Identity-based Biases. In: Proceedings of the 2024 CHI conference on human factors in computing systems. 2024, p. 1\u201318.","DOI":"10.1145\/3613904.3642669"},{"key":"10.1016\/j.array.2026.100773_b71","series-title":"2023 6th international conference on signal processing and information security","first-page":"190","article-title":"Current topological and machine learning applications for bias detection in text","author":"Farrelly","year":"2023"},{"key":"10.1016\/j.array.2026.100773_b72","first-page":"1","article-title":"Toponym resolution leveraging lightweight and open-source large language models and geo-knowledge","author":"Hu","year":"2024","journal-title":"Int J Geogr Inf Sci"},{"key":"10.1016\/j.array.2026.100773_b73","series-title":"Automated ableism: An exploration of explicit disability biases in sentiment and toxicity analysis models","author":"Venkit","year":"2023"},{"key":"10.1016\/j.array.2026.100773_b74","series-title":"Findings of the association for computational linguistics: ACL 2024","first-page":"11548","article-title":"Identifying and mitigating annotation bias in natural language understanding using causal mediation analysis","author":"Sae Lim","year":"2024"},{"issue":"6","key":"10.1016\/j.array.2026.100773_b75","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3700438","article-title":"Gender bias in natural language processing and computer vision: A comparative survey","volume":"57","author":"Bartl","year":"2025","journal-title":"ACM Comput Surv"},{"key":"10.1016\/j.array.2026.100773_b76","series-title":"Extrinsic evaluation of cultural competence in large language models","author":"Bhatt","year":"2024"},{"issue":"6","key":"10.1016\/j.array.2026.100773_b77","doi-asserted-by":"crossref","first-page":"1708","DOI":"10.1007\/s11606-021-06682-z","article-title":"Testimonial injustice: Linguistic bias in the medical records of black patients and women","volume":"36","author":"Beach","year":"2021","journal-title":"J Gen Intern Med","ISSN":"https:\/\/id.crossref.org\/issn\/1525-1497","issn-type":"print"},{"key":"10.1016\/j.array.2026.100773_b78","article-title":"An intelligent system for classifying patient complaints using machine learning and natural language processing: Development and validation study","volume":"27","author":"Li","year":"2025","journal-title":"J Med Internet Res","ISSN":"https:\/\/id.crossref.org\/issn\/1438-8871","issn-type":"print"},{"issue":"4","key":"10.1016\/j.array.2026.100773_b79","doi-asserted-by":"crossref","first-page":"229","DOI":"10.1007\/s42452-025-06659-1","article-title":"Domain adaptation for bias mitigation in affective computing: use cases for facial emotion recognition and sentiment analysis systems","volume":"7","author":"Singhal","year":"2025","journal-title":"Discov Appl Sci"},{"issue":"4","key":"10.1016\/j.array.2026.100773_b80","doi-asserted-by":"crossref","first-page":"845","DOI":"10.29207\/resti.v7i4.5035","article-title":"Analysis and mitigation of religion bias in Indonesian natural language processing datasets","volume":"7","author":"Fauzan","year":"2023","journal-title":"J RESTI (Rekayasa Sist Dan Teknol Informasi)"},{"key":"10.1016\/j.array.2026.100773_b81","doi-asserted-by":"crossref","first-page":"53703","DOI":"10.1109\/ACCESS.2023.3276757","article-title":"Bias detection for customer interaction data: A survey on datasets, methods, and tools","volume":"11","author":"Donald","year":"2023","journal-title":"IEEE Access"},{"key":"10.1016\/j.array.2026.100773_b82","doi-asserted-by":"crossref","unstructured":"Dobesh SJ, Miller T, Newman P, Liu Y, Elglaly YN. Towards machine learning fairness education in a natural language processing course. In: Proceedings of the 54th ACM technical symposium on computer science education v. 1. 2023, p. 312\u20138.","DOI":"10.1145\/3545945.3569802"},{"key":"10.1016\/j.array.2026.100773_b83","doi-asserted-by":"crossref","unstructured":"Mai G, Yao X, Xie Y, Rao J, Li H, Zhu Q, Li Z, Lao N. SRL: Towards a general-purpose framework for spatial representation learning. In: Proceedings of the 32nd ACM international conference on advances in geographic information systems. 2024, p. 465\u20138.","DOI":"10.1145\/3678717.3691246"},{"key":"10.1016\/j.array.2026.100773_b84","series-title":"Diversity and language technology: How techno-linguistic bias can cause epistemic injustice","author":"Helm","year":"2023"},{"key":"10.1016\/j.array.2026.100773_b85","doi-asserted-by":"crossref","DOI":"10.2196\/50428","article-title":"Examining linguistic differences in electronic health records for diverse patients with diabetes: Natural language processing analysis","volume":"12","author":"Bilotta","year":"2024","journal-title":"JMIR Med Inform."},{"key":"10.1016\/j.array.2026.100773_b86","doi-asserted-by":"crossref","DOI":"10.1016\/j.array.2025.100378","article-title":"Research on sentiment analysis of hotel review text based on BERT-TCN-BiLSTM-attention model","volume":"25","author":"Chi","year":"2025","journal-title":"Array"},{"key":"10.1016\/j.array.2026.100773_b87","series-title":"Mitigating bias in BERT models for hiring practices","author":"Monteiro","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b88","series-title":"Men also like shopping: Reducing gender bias amplification using corpus-level constraints","author":"Zhao","year":"2017"},{"key":"10.1016\/j.array.2026.100773_b89","doi-asserted-by":"crossref","DOI":"10.1109\/TETCI.2024.3367819","article-title":"Dendritic neural network: A novel extension of dendritic neuron model","author":"Tang","year":"2024","journal-title":"IEEE Trans Emerg Top Comput Intell"},{"issue":"1","key":"10.1016\/j.array.2026.100773_b90","first-page":"1929","article-title":"Dropout: A simple way to prevent neural networks from overfitting","volume":"15","author":"Srivastava","year":"2014","journal-title":"J Mach Learn Res"},{"key":"10.1016\/j.array.2026.100773_b91","series-title":"Situated ground truths: Enhancing bias-aware AI by situating data labels with SituAnnotate","author":"Pandiani","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b92","series-title":"Data bias according to bipol: Men are naturally right and it is the role of women to follow their lead","author":"Pagliai","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b93","article-title":"Unveiling bias in fairness evaluations of large language models: A critical literature review of music and movie recommendation systems","author":"Sah","year":"2024","journal-title":"Zenodo"},{"key":"10.1016\/j.array.2026.100773_b94","series-title":"Artificial intelligence and cognitive science","first-page":"226","article-title":"Identity term sampling for measuring gender bias in training data","author":"Sobhani","year":"2023"},{"key":"10.1016\/j.array.2026.100773_b95","series-title":"Uncovering bias in the PlantVillage dataset","author":"Noyan","year":"2022"},{"key":"10.1016\/j.array.2026.100773_b96","doi-asserted-by":"crossref","first-page":"126832","DOI":"10.1109\/ACCESS.2022.3226517","article-title":"Tackling dataset bias with an automated collection of real-world samples","volume":"10","author":"Sevetlidis","year":"2022","journal-title":"IEEE Access"},{"key":"10.1016\/j.array.2026.100773_b97","series-title":"Automating political bias prediction","author":"Biessmann","year":"2016"},{"key":"10.1016\/j.array.2026.100773_b98","series-title":"Newb: 200,000+ sentences for political bias detection","author":"Wei","year":"2023"},{"key":"10.1016\/j.array.2026.100773_b99","series-title":"Beyond partisan leaning: A comparative analysis of political bias in large language models","author":"Peng","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b100","series-title":"Proceedings of the 33rd ACM international conference on information and knowledge management","first-page":"3922","article-title":"The elusiveness of detecting political bias in language models","author":"Lunardi","year":"2024"},{"issue":"1","key":"10.1016\/j.array.2026.100773_b101","first-page":"11","article-title":"Political bias in large language models","volume":"4","author":"Gover","year":"2023","journal-title":"Commons: Puget Sound J Politics"},{"key":"10.1016\/j.array.2026.100773_b102","series-title":"Benchmarking cognitive biases in large language models as evaluators","author":"Koo","year":"2023"},{"key":"10.1016\/j.array.2026.100773_b103","doi-asserted-by":"crossref","DOI":"10.1016\/j.array.2025.100478","article-title":"Comparative evaluation of ChatGPT and DeepSeek across key NLP tasks: Strengths, weaknesses, and domain-specific performance","author":"Etaiwi","year":"2025","journal-title":"Array"},{"key":"10.1016\/j.array.2026.100773_b104","unstructured":"Hongli Z, Hui H, Yunfei L, Bing X, Conghui Z, Hailong C, Muyun Y, Tiejun Z. Mitigating the Bias of Large Language Model Evaluation. In: Proceedings of the 23rd Chinese national conference on computational linguistics (volume 1: main conference). 2024, p. 1310\u20139."},{"key":"10.1016\/j.array.2026.100773_b105","unstructured":"Schmitt A, Walser M, Fahse TB. Conceptual Foundations on Debiasing for Machine Learning-Based Software. In: International conference of information systems. ICIS, 2022."},{"key":"10.1016\/j.array.2026.100773_b106","series-title":"Examining the interplay between privacy and fairness for speech processing: A review and perspective","author":"Leschanowsky","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b107","doi-asserted-by":"crossref","unstructured":"Edenberg E, Wood A. Disambiguating algorithmic bias: From neutrality to justice. In: Proceedings of the 2023 AAAI\/ACM conference on AI, ethics, and society. 2023, p. 691\u2013704.","DOI":"10.1145\/3600211.3604695"},{"key":"10.1016\/j.array.2026.100773_b108","doi-asserted-by":"crossref","DOI":"10.1162\/coli_a_00524","article-title":"Bias and fairness in large language models: A survey","author":"Gallegos","year":"2024","journal-title":"Comput Linguist"},{"key":"10.1016\/j.array.2026.100773_b109","series-title":"EMNLP","article-title":"Balancing out bias: Achieving fairness through balanced training","author":"Han","year":"2022"},{"key":"10.1016\/j.array.2026.100773_b110","series-title":"Editable fairness: Fine-grained bias mitigation in language models","author":"Chen","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b111","series-title":"Toward understanding bias correlations for mitigation in NLP","author":"Cheng","year":"2022"},{"key":"10.1016\/j.array.2026.100773_b112","series-title":"Analyzing fairness of computer vision and natural language processing models","author":"Rashed","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b113","series-title":"Proceedings of the ACM conference","article-title":"Should fairness be a metric or a model? A model-based framework for assessing bias in machine learning pipelines","author":"Lalor","year":"2022"},{"key":"10.1016\/j.array.2026.100773_b114","series-title":"Hidden technical debts for fair machine learning in financial services","author":"Huang","year":"2021"},{"key":"10.1016\/j.array.2026.100773_b115","article-title":"Toward fair NLP models: Bias detection and mitigation in cloud-based text mining services","author":"Bornare","year":"2024","journal-title":"Int J Multidiscip Res"},{"issue":"CSCW2","key":"10.1016\/j.array.2026.100773_b116","first-page":"1","article-title":"Aligning eyes between humans and deep neural network through interactive attention alignment","volume":"6","author":"Gao","year":"2022","journal-title":"Proc the ACM Human-Comput. Interact"},{"issue":"20","key":"10.1016\/j.array.2026.100773_b117","doi-asserted-by":"crossref","first-page":"4272","DOI":"10.3390\/math11204272","article-title":"A comprehensive review and analysis of deep learning-based medical image adversarial attack and defense","volume":"11","author":"Muoka","year":"2023","journal-title":"Mathematics"},{"key":"10.1016\/j.array.2026.100773_b118","series-title":"Tool to overcome technical barriers for bias assessment in human language technologies","author":"Alemany","year":"2024"},{"key":"10.1016\/j.array.2026.100773_b119","unstructured":"Rachleff J, van Paasschen F, Sambuy L. Embedding Bias: Analyzing the semantics of bias in breitbart with word vectors."},{"issue":"3","key":"10.1016\/j.array.2026.100773_b120","doi-asserted-by":"crossref","first-page":"578","DOI":"10.1111\/1748-8583.12499","article-title":"Digitalization and inclusiveness of HRM practices: The example of neurodiversity initiatives","volume":"34","author":"Walkowiak","year":"2024","journal-title":"Hum Resour Manag J"},{"key":"10.1016\/j.array.2026.100773_b121","series-title":"Combating confirmation bias: A unified pseudo-labeling framework for entity alignment","author":"Ding","year":"2023"}],"container-title":["Array"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S2590005626000962?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S2590005626000962?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,4,28]],"date-time":"2026-04-28T09:11:24Z","timestamp":1777367484000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S2590005626000962"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,7]]},"references-count":121,"alternative-id":["S2590005626000962"],"URL":"https:\/\/doi.org\/10.1016\/j.array.2026.100773","relation":{},"ISSN":["2590-0056"],"issn-type":[{"value":"2590-0056","type":"print"}],"subject":[],"published":{"date-parts":[[2026,7]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"A review of fairness challenges in natural language processing","name":"articletitle","label":"Article Title"},{"value":"Array","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.array.2026.100773","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 The Authors. Published by Elsevier Inc.","name":"copyright","label":"Copyright"}],"article-number":"100773"}}