{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T20:02:30Z","timestamp":1770926550464,"version":"3.50.1"},"reference-count":41,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T00:00:00Z","timestamp":1770854400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T00:00:00Z","timestamp":1770854400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Cluster Comput"],"published-print":{"date-parts":[[2026,6]]},"DOI":"10.1007\/s10586-026-05971-8","type":"journal-article","created":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T19:02:55Z","timestamp":1770922975000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Fine-tuning and prompting: a strategy for mitigating societal biases in large language models"],"prefix":"10.1007","volume":"29","author":[{"given":"Pradeep","family":"Kamboj","sequence":"first","affiliation":[]},{"given":"Shailender","family":"Kumar","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,2,12]]},"reference":[{"key":"5971_CR1","unstructured":"Lu,Y., Hu, Y., Foroosh, H., Jin, W., Liu, F.: STRUX: An LLM for decision-making with structured explanations (2024). https:\/\/arxiv.org\/abs\/2410.12583"},{"key":"5971_CR2","unstructured":"Eigner, E., H\u00e4ndler, T.: Determinants of LLM-assisted decision-making (2024). Available: https:\/\/arxiv.org\/abs\/2402.17385"},{"key":"5971_CR3","doi-asserted-by":"publisher","first-page":"265","DOI":"10.1007\/978-3-031-55865-8_10","volume-title":"Natural Language Processing in Biomedicine: A Practical Guide","author":"Y Wu","year":"2024","unstructured":"Wu, Y.: Large Language model and text generation. In: Xu, H., Demner Fushman, D. (eds.) Natural Language Processing in Biomedicine: A Practical Guide, pp. 265\u2013297. Springer International Publishing, Cham (2024). https:\/\/doi.org\/10.1007\/978-3-031-55865-8_10"},{"key":"5971_CR4","doi-asserted-by":"publisher","unstructured":"Wang, L., et al.: Benchmarking and improving long-text translation with large language models. In: Ku, L.-W., Martins, A., Srikumar, V. (eds.) Findings of the Association for Computational Linguistics: ACL 2024, pp. 7175\u20137187. Association for Computational Linguistics, Bangkok, Thailand (2024). https:\/\/doi.org\/10.18653\/v1\/2024.findings-acl.428","DOI":"10.18653\/v1\/2024.findings-acl.428"},{"key":"5971_CR5","doi-asserted-by":"publisher","unstructured":"Ghatora, P.S., Hosseini, S.E., Pervez, S., Iqbal, M.J., Shaukat, N.: Sentiment analysis of product reviews using machine learning and pre-trained LLM. Big Data Cogn. Comput. 8(12) (2024). https:\/\/doi.org\/10.3390\/bdcc8120199","DOI":"10.3390\/bdcc8120199"},{"key":"5971_CR6","doi-asserted-by":"publisher","first-page":"100065","DOI":"10.1016\/j.nlp.2024.100065","volume":"7","author":"MA Arefeen","year":"2024","unstructured":"Arefeen, M.A., Debnath, B., Chakradhar, S.: LeanContext: Cost-efficient domain-specific question answering using LLMs. Nat. Lang. Process. J. 7, 100065 (2024). https:\/\/doi.org\/10.1016\/j.nlp.2024.100065","journal-title":"Nat. Lang. Process. J."},{"key":"5971_CR7","doi-asserted-by":"crossref","unstructured":"Leidinger, A., Rogers, R.: How are LLMs mitigating stereotyping harms? Learning from search engine studies. In: Proceedings of the AAAI\/ACM Conference on AI, Ethics, and Society, pp. 839\u2013854 (2024)","DOI":"10.1609\/aies.v7i1.31684"},{"key":"5971_CR8","doi-asserted-by":"publisher","unstructured":"Kotek, H., Dockum, R., Sun, D.: Gender bias and stereotypes in Large Language Models. In: Proceedings of The ACM Collective Intelligence Conference, in CI \u201923, pp. 12\u201324. Association for Computing Machinery, New York, NY, USA (2023). https:\/\/doi.org\/10.1145\/3582269.3615599","DOI":"10.1145\/3582269.3615599"},{"key":"5971_CR9","doi-asserted-by":"crossref","unstructured":"Shrawgi, H., Rath, P., Singhal, T., Dandapat, S.: Uncovering stereotypes in large language models: A task complexity-based approach. In: Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1841\u20131857 (2024)","DOI":"10.18653\/v1\/2024.eacl-long.111"},{"key":"5971_CR10","doi-asserted-by":"publisher","unstructured":"Kwak, D.-H., Holtkamp, P., Kim, S.S.: Measuring and controlling social desirability bias: Applications in information systems research. J. Assoc. Inf. Syst. no. January. pp. 317\u2013345 (2019). https:\/\/doi.org\/10.17705\/1jais.00537","DOI":"10.17705\/1jais.00537"},{"key":"5971_CR11","doi-asserted-by":"publisher","unstructured":"Gallegos, I.O., et al.: Self-debiasing Large Language Models: Zero-shot recognition and reduction of stereotypes. In: Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pp. 873\u2013888 (2025). https:\/\/doi.org\/10.18653\/v1\/2025.naacl-short.74","DOI":"10.18653\/v1\/2025.naacl-short.74"},{"key":"5971_CR12","doi-asserted-by":"publisher","unstructured":"Touvron, H., et al.: Llama 2: Open foundation and Fine-Tuned chat models. https:\/\/doi.org\/10.48550\/arXiv.2307.09288 (2023)","DOI":"10.48550\/arXiv.2307.09288"},{"key":"5971_CR13","unstructured":"Wei, J., et al.: Chain-of-thought prompting elicits reasoning in Large Language Models (2023). https:\/\/arxiv.org\/abs\/2201.11903"},{"key":"5971_CR14","unstructured":"Conover, M., et al.: Free Dolly: Introducing the world\u2019s first truly open instruction-tuned LLM (2023). https:\/\/www.databricks.com\/blog\/2023\/04\/12\/dolly-first-open-commercially-viable-instruction-tuned-llm. Accessed 30 Jun 2023"},{"key":"5971_CR15","unstructured":"Nadeem, M., Bethke, A., Reddy, S.: StereoSet: Measuring stereotypical bias in pretrained language models. https:\/\/arxiv.org\/abs\/2004.09456 (2020)"},{"key":"5971_CR16","doi-asserted-by":"publisher","unstructured":"Raj, C., Mukherjee, A., Caliskan, A., Anastasopoulos, A., Zhu, Z.: Breaking bias, building bridges: Evaluation and mitigation of social biases in LLMs via contact hypothesis, in AIES 2024, (2024), pp. 1180\u20131189. https:\/\/doi.org\/10.48550\/arXiv.2407.02030","DOI":"10.48550\/arXiv.2407.02030"},{"issue":"1","key":"5971_CR17","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1038\/s41597-025-05156-9","volume":"12","author":"K Abramski","year":"2025","unstructured":"Abramski, K., Improta, R., Rossetti, G., Stella, M.: The ``LLM world of words'' English free association norms generated by Large Language Models. Sci. Data. 12(1), 1\u201316 (2025). https:\/\/doi.org\/10.1038\/s41597-025-05156-9","journal-title":"Sci. Data"},{"key":"5971_CR18","doi-asserted-by":"crossref","unstructured":"Zhang, T., Zeng, Z., Xiao, Y., Zhuang, H.: Genderalign: An alignment dataset for mitigating gender bias in Large Language Models (2024)","DOI":"10.18653\/v1\/2025.acl-long.553"},{"key":"5971_CR19","unstructured":"Lin, L., Wang, L., Guo, J., Wong, K.: Investigating bias in LLM-based bias detection: Disparities between LLMs and human perception. In: 31st International Conference on Computational Linguistics, pp. 10634\u201310649 (2025). http:\/\/arxiv.org\/abs\/2403.14896"},{"key":"5971_CR20","doi-asserted-by":"publisher","unstructured":"Raza, S., Garg, M., John, D., Raza, S., Ding, C.: Nbias: A natural language processing framework for BIAS identification in text. Expert Syst. Appl. 237(PB), 121542 (2024). https:\/\/doi.org\/10.1016\/j.eswa.2023.121542","DOI":"10.1016\/j.eswa.2023.121542"},{"key":"5971_CR21","doi-asserted-by":"publisher","unstructured":"Tang, K., et al.: Gendercare: A comprehensive framework for assessing and reducing gender bias in Large Language Models. In: Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, pp. 1196\u20131210 (2024). https:\/\/doi.org\/10.1145\/3658644.3670284","DOI":"10.1145\/3658644.3670284"},{"key":"5971_CR22","unstructured":"Bel\u00e9m, C.G., Seshadri, P., Razeghi, Y., Singh, S.: Are models biased on text without gender-related language? (2024). https:\/\/arxiv.org\/abs\/2405.00588"},{"key":"5971_CR23","doi-asserted-by":"publisher","unstructured":"Kamboj, P., Kumar, S., Goyal, V.: Measuring and mitigating gender bias in contextualized word embeddings. In: 2023 IEEE International Conference on Blockchain and Distributed Systems Security (ICBDS), pp. 1\u20135 (2023). https:\/\/doi.org\/10.1109\/ICBDS58040.2023.10346586","DOI":"10.1109\/ICBDS58040.2023.10346586"},{"key":"5971_CR24","first-page":"1","volume":"21","author":"C. Raffel","year":"2023","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res. 21, 1\u201367 (2023). https:\/\/arxiv.org\/abs\/1910.10683","journal-title":"J. Mach. Learn. Res."},{"key":"5971_CR25","doi-asserted-by":"publisher","unstructured":"Huang, D., Bu, Q., Zhang, J., Xie, X., Chen, J., Cui, H.: Bias testing and mitigation in LLM-based code generation (2024). https:\/\/doi.org\/10.48550\/arXiv.2309.14345","DOI":"10.48550\/arXiv.2309.14345"},{"key":"5971_CR26","doi-asserted-by":"publisher","unstructured":"Fan, Z., Chen, R., Xu, R., Liu, Z.: BiasAlert: A plug-and-play tool for social bias detection in LLMs. In: Proceedings of the Conference on Empirical Methods in Natural Language Processing, pp. 14778\u201314790 (2024). https:\/\/doi.org\/10.18653\/v1\/2024.emnlp-main.820","DOI":"10.18653\/v1\/2024.emnlp-main.820"},{"key":"5971_CR27","doi-asserted-by":"publisher","unstructured":"Sap, M., Gabriel, S., Qin, L., Jurafsky, D., Smith, N.A., Choi, Y.: Social bias frames: Reasoning about social and power implications of language. In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, Association for Computational Linguistics, pp. 5477\u20135490 (2020). https:\/\/doi.org\/10.18653\/v1\/2020.acl-main.486","DOI":"10.18653\/v1\/2020.acl-main.486"},{"key":"5971_CR28","doi-asserted-by":"crossref","unstructured":"Echterhoff, J., Liu, Y., Alessa, A., McAuley, J., He, Z.: Cognitive bias in decision-making with LLMs (2024). https:\/\/arxiv.org\/abs\/2403.00811","DOI":"10.18653\/v1\/2024.findings-emnlp.739"},{"key":"5971_CR29","doi-asserted-by":"crossref","unstructured":"Furniturewala, S., et al.: Thinking fair and slow: On the efficacy of structured prompts for debiasing language models (2024). https:\/\/arxiv.org\/abs\/2405.10431","DOI":"10.18653\/v1\/2024.emnlp-main.13"},{"key":"5971_CR30","unstructured":"Kamruzzaman, M., Kim, G.L.: Prompting techniques for reducing social bias in LLMs through system 1 and system 2 cognitive processes (2024). https:\/\/arxiv.org\/abs\/2404.17218"},{"key":"5971_CR31","unstructured":"Hida, R., Kaneko, M., Okazaki, N.: Social bias evaluation for Large Language Models requires prompt variations (2024). https:\/\/arxiv.org\/abs\/2407.03129"},{"key":"5971_CR32","doi-asserted-by":"publisher","unstructured":"Dong, X., Wang, Y., Yu, P.S., Caverlee, J.: Disclosure and mitigation of gender bias in LLMs (2024). https:\/\/doi.org\/10.48550\/arXiv.2402.11190","DOI":"10.48550\/arXiv.2402.11190"},{"key":"5971_CR33","doi-asserted-by":"publisher","unstructured":"Bartl, M., Leavy, S.: From showgirls to performers: Fine-tuning with gender-inclusive language for bias reduction in LLMs. In: Fale\u0144ska, A., Basta, C., Costa-juss\u00e0, M., Goldfarb-Tarrant, S., Nozza, D. (eds.) Proceedings of the 5th Workshop on gender bias in Natural Language Processing (GeBNLP), pp. 280\u2013294. Association for Computational Linguistics, Bangkok, Thailand (2024). https:\/\/doi.org\/10.18653\/v1\/2024.gebnlp-1.18","DOI":"10.18653\/v1\/2024.gebnlp-1.18"},{"key":"5971_CR34","doi-asserted-by":"crossref","unstructured":"Oba, D., Kaneko, M., Bollegala, D.: In-contextual gender bias suppression for Large Language Models. In, EACL 2024\u201318th Conference of the European Chapter of the Association for Computational Linguistics, Findings of EACL 2024, Association for Computational Linguistics (ACL), pp. 1722\u20131742. https:\/\/aclanthology.org\/2024.findings-eacl.121\/","DOI":"10.18653\/v1\/2024.findings-eacl.121"},{"key":"5971_CR35","doi-asserted-by":"publisher","first-page":"130193","DOI":"10.1016\/j.neucom.2025.130193","volume":"639","author":"Q. Wang","year":"2025","unstructured":"Wang, Q., Fu, Y., Cao, Y., Wang, S., Tian, Z., Ding, L.: Recursively summarizing enables long-term dialogue memory in large Language models. Neurocomputing. 639, 130193 (2025). https:\/\/doi.org\/10.1016\/J.NEUCOM.2025.130193","journal-title":"Neurocomputing"},{"key":"5971_CR36","doi-asserted-by":"crossref","unstructured":"Tjuatja, L., Chen, V., Wu, S.T., Talwalkar, A., Neubig, G.: Do LLMs exhibit human-like response biases? A case study in survey design (2024). https:\/\/arxiv.org\/abs\/2311.04076","DOI":"10.1162\/tacl_a_00685"},{"key":"5971_CR37","doi-asserted-by":"publisher","unstructured":"Lu, Q., Qiu, B., Ding, L., Zhang, K., Kocmi, T., Tao, D.: Error analysis prompting enables human-like translation evaluation in Large Language Models. In: Proceedings of the annual meeting of the Association for Computational Linguistics, Association for Computational Linguistics (ACL), pp. 8801\u20138816 (2024). https:\/\/doi.org\/10.18653\/v1\/2024.findings-acl.520","DOI":"10.18653\/v1\/2024.findings-acl.520"},{"key":"5971_CR38","unstructured":"Xu, Z., Peng, K., Ding, L., Tao, D., Lu, X.: Take care of your prompt bias! investigating and mitigating prompt bias in factual knowledge extraction. In, 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation, LREC-COLING 2024 - Main Conference Proceedings, pp. 15552\u201315565 (2024). https:\/\/aclanthology.org\/lrec-main.1352\/. Accessed 05 Sept 2025"},{"key":"5971_CR39","doi-asserted-by":"publisher","unstructured":"Wang, X., Pan, J., Ding, L., Biemann, C.: Mitigating hallucinations in large vision-language models with instruction contrastive decoding. In: Proceedings of the Annual Meeting of the Association for Computational Linguistics, pp. 15840\u201315853 (2024). https:\/\/doi.org\/10.18653\/v1\/2024.findings-acl.937","DOI":"10.18653\/v1\/2024.findings-acl.937"},{"key":"5971_CR40","unstructured":"Houlsby, N., et al.: Parameter-Efficient Transfer Learning for NLP (2019). https:\/\/arxiv.org\/abs\/1902.00751"},{"key":"5971_CR41","unstructured":"Dettmers, T., Pagnoni, A., Holtzman, A., Zettlemoyer, L.: QLoRA: Efficient finetuning of quantized LLMs (2023). https:\/\/arxiv.org\/abs\/2305.14314"}],"container-title":["Cluster Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10586-026-05971-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10586-026-05971-8","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10586-026-05971-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T19:03:09Z","timestamp":1770922989000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10586-026-05971-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2,12]]},"references-count":41,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2026,6]]}},"alternative-id":["5971"],"URL":"https:\/\/doi.org\/10.1007\/s10586-026-05971-8","relation":{},"ISSN":["1386-7857","1573-7543"],"issn-type":[{"value":"1386-7857","type":"print"},{"value":"1573-7543","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,2,12]]},"assertion":[{"value":"1 April 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 October 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 January 2026","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 February 2026","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no relevant financial or non-financial interests to disclose.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"149"}}