{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,10]],"date-time":"2026-05-10T06:42:23Z","timestamp":1778395343591,"version":"3.51.4"},"reference-count":54,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2025,4,24]],"date-time":"2025-04-24T00:00:00Z","timestamp":1745452800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,4,24]],"date-time":"2025-04-24T00:00:00Z","timestamp":1745452800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Mach Learn"],"published-print":{"date-parts":[[2025,6]]},"DOI":"10.1007\/s10994-025-06767-4","type":"journal-article","created":{"date-parts":[[2025,4,24]],"date-time":"2025-04-24T17:23:58Z","timestamp":1745515438000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":8,"title":["Developing safe and responsible large language model: can we balance bias reduction and language understanding?"],"prefix":"10.1007","volume":"114","author":[{"given":"Shaina","family":"Raza","sequence":"first","affiliation":[]},{"given":"Oluwanifemi","family":"Bamgbose","sequence":"additional","affiliation":[]},{"given":"Shardul","family":"Ghuge","sequence":"additional","affiliation":[]},{"given":"Fatemeh","family":"Tavakoli","sequence":"additional","affiliation":[]},{"given":"Deepak John","family":"Reji","sequence":"additional","affiliation":[]},{"given":"Syed Raza","family":"Bashir","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,4,24]]},"reference":[{"key":"6767_CR1","unstructured":"AI, C.: Confident AI documentation. [Online; accessed 10-May-2024] (2024). https:\/\/docs.confident-ai.com\/docs\/getting-started"},{"key":"6767_CR2","unstructured":"API, P.: Perspective API (2024). https:\/\/www.perspectiveapi.com\/"},{"key":"6767_CR3","unstructured":"Almazrouei, E., Alobeidli, H., Alshamsi, A., Cappelli, A., Cojocaru, R., Debbah, M., Goffinet, E., Heslow, D., Launay, J., Malartic, Q., Noune, B., Pannier, B., Penedo, G.: Falcon-40B: an open large language model with state-of-the-art performance (2023)"},{"key":"6767_CR4","unstructured":"Bai, Y., Jones, A., Ndousse, K., Askell, A., Chen, A., DasSarma, N., Drain, D., Fort, S., Ganguli, D., Henighan, T., et al.: Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862 (2022)"},{"key":"6767_CR5","doi-asserted-by":"crossref","unstructured":"Bender, E.M., Gebru, T., McMillan-Major, A., Shmitchell, S.: On the dangers of stochastic parrots: Can language models be too big? In Proceedings of the 2021 ACM conference on fairness, accountability, and transparency, pp. 610\u2013623 (2021)","DOI":"10.1145\/3442188.3445922"},{"key":"6767_CR6","unstructured":"Bianchi, F., Suzgun, M., Attanasio, G., R\u00f6ttger, P., Jurafsky, D., Hashimoto, T., Zou, J.: Safety-tuned llamas: Lessons from improving the safety of large language models that follow instructions. arXiv preprint arXiv:2309.07875 (2023)"},{"key":"6767_CR7","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. (2020). Language models are few-shot learners. Advances in neural information processing systems, 33, 1877\u20131901.","journal-title":"Advances in neural information processing systems"},{"issue":"70","key":"6767_CR8","first-page":"1","volume":"25","author":"HW Chung","year":"2024","unstructured":"Chung, H. W., Hou, L., Longpre, S., Zoph, B., Tay, Y., Fedus, W., Li, Y., Wang, X., Dehghani, M., Brahma, S., et al. (2024). Scaling instruction-finetuned language models. Journal of Machine Learning Research, 25(70), 1\u201353.","journal-title":"Journal of Machine Learning Research"},{"key":"6767_CR9","doi-asserted-by":"publisher","unstructured":"Dettmers, T., Pagnoni, A., Holtzman, A., Zettlemoyer, L.: QLoRA: Efficient Finetuning of Quantized LLMs. arXiv. arXiv:2305.14314 [cs] (2023). https:\/\/doi.org\/10.48550\/arXiv.2305.14314 . http:\/\/arxiv.org\/abs\/2305.14314 Accessed 2024-02-04","DOI":"10.48550\/arXiv.2305.14314"},{"key":"6767_CR10","doi-asserted-by":"publisher","unstructured":"Dhamala, J., Sun, T., Kumar, V., Krishna, S., Pruksachatkun, Y., Chang, K.-W., Gupta, R.: BOLD: Dataset and metrics for measuring biases in open-ended language generation. In: Proceedings of the 2021 ACM conference on fairness, accountability, and transparency, pp. 862\u2013872 (2021). https:\/\/doi.org\/10.1145\/3442188.3445924 . arXiv:2101.11718 [cs]. http:\/\/arxiv.org\/abs\/2101.11718 Accessed 2023-11-04","DOI":"10.1145\/3442188.3445924"},{"key":"6767_CR11","doi-asserted-by":"crossref","unstructured":"Ding, B., Qin, C., Liu, L., Chia, Y.K., Joty, S., Li, B., Bing, L.: Is gpt-3 a good data annotator? arXiv preprint arXiv:2212.10450 (2022)","DOI":"10.18653\/v1\/2023.acl-long.626"},{"key":"6767_CR12","doi-asserted-by":"crossref","unstructured":"Dodge, J., Prewitt, T., Combes, R., Odmark, E., Schwartz, R., Strubell, E., Luccioni, A.S., Smith, N.A., DeCario, N., Buchanan, W.: Measuring the carbon intensity of AI in cloud instances. In Proceedings of the 2022 ACM conference on fairness, accountability, and transparency, pp. 1877\u20131894 (2022)","DOI":"10.1145\/3531146.3533234"},{"key":"6767_CR13","doi-asserted-by":"crossref","unstructured":"Fleiss, J.L.: Measuring nominal scale agreement among many raters. Psychological bulletin 76(5), 378 (1971). Publisher: American Psychological Association","DOI":"10.1037\/h0031619"},{"issue":"3","key":"6767_CR14","doi-asserted-by":"publisher","first-page":"1097","DOI":"10.1162\/coli_a_00524","volume":"50","author":"IO Gallegos","year":"2024","unstructured":"Gallegos, I. O., Rossi, R. A., Barrow, J., Tanjim, M. M., Kim, S., Dernoncourt, F., Yu, T., Zhang, R., & Ahmed, N. K. (2024). Bias and fairness in large language models: A survey. Computational Linguistics, 50(3), 1097\u20131179.","journal-title":"Computational Linguistics"},{"key":"6767_CR15","doi-asserted-by":"crossref","unstructured":"Gallegos, I.O., Rossi, R.A., Barrow, J., Tanjim, M.M., Kim, S., Dernoncourt, F., Yu, T., Zhang, R., Ahmed, N.K.: Bias and fairness in large language models: a survey. arXiv preprint arXiv:2309.00770 (2023)","DOI":"10.1162\/coli_a_00524"},{"key":"6767_CR16","unstructured":"Ganguli, D., Lovitt, L., Kernion, J., Askell, A., Bai, Y., Kadavath, S., Mann, B., Perez, E., Schiefer, N., Ndousse, K., Jones, A., Bowman, S., Chen, A., Conerly, T., DasSarma, N., Drain, D., Elhage, N., El-Showk, S., Fort, S., Hatfield-Dodds, Z., Henighan, T., Hernandez, D., Hume, T., Jacobson, J., Johnston, S., Kravec, S., Olsson, C., Ringer, S., Tran-Johnson, E., Amodei, D., Brown, T., Joseph, N., McCandlish, S., Olah, C., Kaplan, J., Clark, J.: Red teaming language models to reduce harms: methods, scaling behaviors, and lessons learned. arXiv. arXiv:2209.07858 [cs] (2022). http:\/\/arxiv.org\/abs\/2209.07858 Accessed 2024-01-19"},{"key":"6767_CR17","unstructured":"Guardrails: Guardrails AI | Your Enterprise AI needs Guardrails - guardrailsai.com (2024). https:\/\/www.guardrailsai.com\/docs\/ Accessed 2024-02-01"},{"key":"6767_CR18","doi-asserted-by":"publisher","unstructured":"Hartvigsen, T., Gabriel, S., Palangi, H., Sap, M., Ray, D., Kamar, E.: ToxiGen: A large-scale machine-generated dataset for adversarial and implicit hate speech detection. In: Proceedings of the 60th annual meeting of the association for computational linguistics (Volume 1: Long Papers), pp. 3309\u20133326. Association for Computational Linguistics, Dublin, Ireland (2022). https:\/\/doi.org\/10.18653\/v1\/2022.acl-long.234 . https:\/\/aclanthology.org\/2022.acl-long.234 Accessed 2023-11-17","DOI":"10.18653\/v1\/2022.acl-long.234"},{"key":"6767_CR19","unstructured":"Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., Steinhardt, J.: Measuring massive multitask language understanding. arXiv preprint arXiv:2009.03300 (2020)"},{"key":"6767_CR20","doi-asserted-by":"crossref","unstructured":"Hosseini, S., Palangi, H., Awadallah, A.H.: An empirical study of metrics to measure representational harms in pre-trained language models. arXiv. arXiv:2301.09211 [cs] (2023). http:\/\/arxiv.org\/abs\/2301.09211 Accessed 2023-11-04","DOI":"10.18653\/v1\/2023.trustnlp-1.11"},{"key":"6767_CR21","unstructured":"Inan, H., Upasani, K., Chi, J., Rungta, R., Iyer, K., Mao, Y., Tontchev, M., Hu, Q., Fuller, B., Testuggine, D., et al.: Llama guard: Llm-based input-output safeguard for human-ai conversations. arXiv preprint arXiv:2312.06674 (2023)"},{"key":"6767_CR22","unstructured":"Jiang, A.Q., Sablayrolles, A., Mensch, A., Bamford, C., Chaplot, D.S., Casas, D.d.l., Bressand, F., Lengyel, G., Lample, G., Saulnier, L., Lavaud, L.R., Lachaux, M.-A., Stock, P., Scao, T.L., Lavril, T., Wang, T., Lacroix, T., Sayed, W.E.: Mistral 7B. arXiv. arXiv:2310.06825 [cs] (2023). http:\/\/arxiv.org\/abs\/2310.06825 Accessed 2024-02-05"},{"issue":"6","key":"6767_CR23","doi-asserted-by":"publisher","first-page":"540","DOI":"10.4097\/kjae.2015.68.6.540","volume":"68","author":"TK Kim","year":"2015","unstructured":"Kim, T. K. (2015). T test as a parametric statistic. Korean Journal of Anesthesiology, 68(6), 540\u2013546.","journal-title":"Korean Journal of Anesthesiology"},{"key":"6767_CR24","doi-asserted-by":"crossref","unstructured":"Lewis, M., Liu, Y., Goyal, N., Ghazvininejad, M., Mohamed, A., Levy, O., Stoyanov, V., Zettlemoyer, L.: Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461 (2019)","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"6767_CR25","unstructured":"Li, X.L., Liang, P.: Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190 (2021)"},{"key":"6767_CR26","unstructured":"Liang, P., Bommasani, R., Lee, T., Tsipras, D., Soylu, D., Yasunaga, M., Zhang, Y., Narayanan, D., Wu, Y., Kumar, A., Newman, B., Yuan, B., Yan, B., Zhang, C., Cosgrove, C., Manning, C.D., R\u00e9, C., Acosta-Navas, D., Hudson, D.A., Zelikman, E., Durmus, E., Ladhak, F., Rong, F., Ren, H., Yao, H., Wang, J., Santhanam, K., Orr, L., Zheng, L., Yuksekgonul, M., Suzgun, M., Kim, N., Guha, N., Chatterji, N., Khattab, O., Henderson, P., Huang, Q., Chi, R., Xie, S.M., Santurkar, S., Ganguli, S., Hashimoto, T., Icard, T., Zhang, T., Chaudhary, V., Wang, W., Li, X., Mai, Y., Zhang, Y., Koreeda, Y.: Holistic evaluation of language models. arXiv. arXiv:2211.09110 [cs] (2023). http:\/\/arxiv.org\/abs\/2211.09110 Accessed 2024-01-15"},{"key":"6767_CR27","doi-asserted-by":"crossref","unstructured":"Lin, S., Hilton, J., Evans, O.: Truthfulqa: Measuring how models mimic human falsehoods. arXiv preprint arXiv:2109.07958 (2021)","DOI":"10.18653\/v1\/2022.acl-long.229"},{"key":"6767_CR28","doi-asserted-by":"crossref","unstructured":"Miller, A.H., Feng, W., Fisch, A., Lu, J., Batra, D., Bordes, A., Parikh, D., Weston, J.: ParlAI: A dialog research software platform. arXiv preprint arXiv:1705.06476 (2017)","DOI":"10.18653\/v1\/D17-2014"},{"key":"6767_CR29","doi-asserted-by":"publisher","unstructured":"Nadeem, M., Bethke, A., Reddy, S.: StereoSet: Measuring stereotypical bias in pretrained language models. In: Zong, C., Xia, F., Li, W., Navigli, R. (eds.) Proceedings of the 59th annual meeting of the association for computational linguistics and the 11th international joint conference on natural language processing (Volume 1: Long Papers), pp. 5356\u20135371. Association for Computational Linguistics, Online (2021). https:\/\/doi.org\/10.18653\/v1\/2021.acl-long.416 . https:\/\/aclanthology.org\/2021.acl-long.416 Accessed 2024-02-06","DOI":"10.18653\/v1\/2021.acl-long.416"},{"key":"6767_CR30","unstructured":"OpenAI: Moderation - OpenAI API (2024). https:\/\/platform.openai.com\/docs\/guides\/moderation"},{"key":"6767_CR31","first-page":"27730","volume":"35","author":"L Ouyang","year":"2022","unstructured":"Ouyang, L., Wu, J., Jiang, X., Almeida, D., Wainwright, C., Mishkin, P., Zhang, C., Agarwal, S., Slama, K., Ray, A., et al. (2022). Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35, 27730\u201327744.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"6767_CR32","unstructured":"Qi, X., Zeng, Y., Xie, T., Chen, P.-Y., Jia, R., Mittal, P., Henderson, P.: Fine-tuning aligned language models compromises safety, Even when users do not intend to! arXiv. arXiv:2310.03693 [cs] (2023). http:\/\/arxiv.org\/abs\/2310.03693 Accessed 2024-02-01"},{"key":"6767_CR33","unstructured":"Qi, X., Zeng, Y., Xie, T., Chen, P.-Y., Jia, R., Mittal, P., Henderson, P.: Fine-tuning aligned language models compromises safety, even when users do not intend to! arXiv preprint arXiv:2310.03693 (2023)"},{"issue":"8","key":"6767_CR34","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I., et al. (2019). Language models are unsupervised multitask learners. OpenAI blog, 1(8), 9.","journal-title":"OpenAI blog"},{"key":"6767_CR35","unstructured":"Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., Zhou, Y., Li, W., Liu, P.J.: Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv preprint arXiv:1910.10683 (2019)"},{"key":"6767_CR36","doi-asserted-by":"crossref","unstructured":"Raza, S., Raval, A., Chatrath, V.: Mbias: Mitigating bias in large language models while retaining context. arXiv preprint arXiv:2405.11290 (2024)","DOI":"10.18653\/v1\/2024.wassa-1.9"},{"key":"6767_CR37","unstructured":"Raza, S., Bamgbose, O., Ghuge, S., Pandya, D.: Safe and sound: Evaluating language models for bias mitigation and understanding. In: Neurips Safe Generative AI Workshop 2024 (2024)"},{"key":"6767_CR38","unstructured":"Raza, S., Saleh, C., Hasan, E., Ogidi, F., Powers, M., Chatrath, V., Lotif, M., Javadi, R., Zahid, A., Khazaie, V.R.: Vilbias: A framework for bias detection using linguistic and visual cues. arXiv preprint arXiv:2412.17052 (2024)"},{"key":"6767_CR39","doi-asserted-by":"crossref","unstructured":"Ross, A., Willson, V.L.: One-sample T-test. In basic and advanced statistical Tests, pp. 9\u201312. Brill, (2017)","DOI":"10.1007\/978-94-6351-086-8_2"},{"key":"6767_CR40","unstructured":"Schlicht, I.B., Altiok, D., Taouk, M., Flek, L.: Pitfalls of conversational llms on news debiasing. arXiv preprint arXiv:2404.06488 (2024)"},{"key":"6767_CR41","unstructured":"Si, C., Gan, Z., Yang, Z., Wang, S., Wang, J., Boyd-Graber, J., Wang, L.: Prompting GPT-3 to be reliable. arXiv. arXiv:2210.09150 [cs] (2023). http:\/\/arxiv.org\/abs\/2210.09150 Accessed 2024-02-02"},{"key":"6767_CR42","doi-asserted-by":"publisher","unstructured":"Smith, E.M., Hall, M., Kambadur, M., Presani, E., Williams, A.: \u201cI\u2019m sorry to hear that\u201d: Finding new biases in language models with a holistic descriptor dataset. In: Proceedings of the 2022 conference on empirical methods in natural language processing, pp. 9180\u20139211. Association for Computational Linguistics, Abu Dhabi, United Arab Emirates (2022). https:\/\/doi.org\/10.18653\/v1\/2022.emnlp-main.625 . https:\/\/aclanthology.org\/2022.emnlp-main.625 Accessed 2023-11-17","DOI":"10.18653\/v1\/2022.emnlp-main.625"},{"key":"6767_CR43","unstructured":"Smith, E.M., Gonzalez-Rico, D., Dinan, E., Boureau, Y.-L.: Controlling style in generated dialogue. arXiv preprint arXiv:2009.10855 (2020)"},{"key":"6767_CR44","unstructured":"Taori, R., Gulrajani, I., Zhang, T., Dubois, Y., Li, X., Guestrin, C., Liang, P., Hashimoto, T.B.: Alpaca: A strong, replicable instruction-following model. Stanford Center for Research on Foundation Models. https:\/\/crfm. stanford. edu\/2023\/03\/13\/alpaca. html 3(6), 7 (2023)"},{"key":"6767_CR45","unstructured":"Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.-A., Lacroix, T., Rozi\u00e8re, B., Goyal, N., Hambro, E., Azhar, F., et al.: Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"6767_CR46","unstructured":"Wan, Z., Wang, X., Liu, C., Alam, S., Zheng, Y., Qu, Z., Yan, S., Zhu, Y., Zhang, Q., Chowdhury, M., et al.: Efficient large language models: A survey. arXiv preprint arXiv:2312.038631 (2023)"},{"key":"6767_CR47","unstructured":"Wang, B., Chen, W., Pei, H., Xie, C., Kang, M., Zhang, C., Xu, C., Xiong, Z., Dutta, R., Schaeffer, R., et al. (2024) Decodingtrust: A comprehensive assessment of trustworthiness in gpt models. Advances in Neural Information Processing Systems 36"},{"key":"6767_CR48","unstructured":"Wang, Y., Zhong, W., Li, L., Mi, F., Zeng, X., Huang, W., Shang, L., Jiang, X., Liu, Q.: Aligning large language models with human: A survey. arXiv preprint arXiv:2307.12966 (2023)"},{"key":"6767_CR49","unstructured":"Weidinger, L., Mellor, J., Rauh, M., Griffin, C., Uesato, J., Huang, P.-S., Cheng, M., Glaese, M., Balle, B., Kasirzadeh, A., et al.: Ethical and social risks of harm from language models. arXiv preprint arXiv:2112.04359 (2021)"},{"key":"6767_CR50","doi-asserted-by":"publisher","unstructured":"Wolf, T., Debut, L., Sanh, V., Chaumond, J., Delangue, C., Moi, A., Cistac, P., Rault, T., Louf, R., Funtowicz, M., Davison, J., Shleifer, S., Platen, P., Ma, C., Jernite, Y., Plu, J., Xu, C., Le\u00a0Scao, T., Gugger, S., Drame, M., Lhoest, Q., Rush, A.: Transformers: State-of-the-art natural language processing. In: Liu, Q., Schlangen, D. (eds.) Proceedings of the 2020 conference on empirical methods in natural language processing: system demonstrations, pp. 38\u201345. Association for Computational Linguistics, Online (2020). https:\/\/doi.org\/10.18653\/v1\/2020.emnlp-demos.6 . https:\/\/aclanthology.org\/2020.emnlp-demos.6","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"6767_CR51","unstructured":"Zhang, Y., Zhou, F.: Bias mitigation in fine-tuning pre-trained models for enhanced fairness and efficiency. arXiv preprint arXiv:2403.00625 (2024)"},{"key":"6767_CR52","unstructured":"Zhang, S., Dong, L., Li, X., Zhang, S., Sun, X., Wang, S., Li, J., Hu, R., Zhang, T., Wu, F., et al.: Instruction tuning for large language models: A survey. arXiv preprint arXiv:2308.10792 (2023)"},{"key":"6767_CR53","unstructured":"Zhao, W.X., Zhou, K., Li, J., Tang, T., Wang, X., Hou, Y., Min, Y., Zhang, B., Zhang, J., Dong, Z., et al.: A survey of large language models. arXiv preprint arXiv:2303.18223 (2023)"},{"key":"6767_CR54","unstructured":"Zou, A., Wang, Z., Carlini, N., Nasr, M., Kolter, J.Z., Fredrikson, M.: Universal and transferable adversarial attacks on aligned language models. arXiv. arXiv:2307.15043 [cs] (2023). http:\/\/arxiv.org\/abs\/2307.15043 Accessed 2024-02-02"}],"container-title":["Machine Learning"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-025-06767-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10994-025-06767-4","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-025-06767-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,24]],"date-time":"2026-04-24T00:02:42Z","timestamp":1776988962000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10994-025-06767-4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,24]]},"references-count":54,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2025,6]]}},"alternative-id":["6767"],"URL":"https:\/\/doi.org\/10.1007\/s10994-025-06767-4","relation":{},"ISSN":["0885-6125","1573-0565"],"issn-type":[{"value":"0885-6125","type":"print"},{"value":"1573-0565","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,4,24]]},"assertion":[{"value":"14 September 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"13 January 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 March 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 April 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no Conflict of interest to declare that are relevant to the content of this article","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval and Consent to Participate"}},{"value":"Not applicable.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}},{"value":"This content has been made available to all.","name":"free","label":"Free to read"}],"article-number":"140"}}