{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T10:56:55Z","timestamp":1774090615436,"version":"3.50.1"},"reference-count":352,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T00:00:00Z","timestamp":1774051200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T00:00:00Z","timestamp":1774051200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["AI Ethics"],"published-print":{"date-parts":[[2026,4]]},"DOI":"10.1007\/s43681-025-00914-2","type":"journal-article","created":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T09:11:52Z","timestamp":1774084312000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Evaluation of AI ethics tools in language models: a developers\u2019 perspective case study"],"prefix":"10.1007","volume":"6","author":[{"given":"Jhessica","family":"Silva","sequence":"first","affiliation":[]},{"given":"Diego","family":"A. B. Moreira","sequence":"additional","affiliation":[]},{"given":"Gabriel","family":"O. dos Santos","sequence":"additional","affiliation":[]},{"given":"Alef","family":"Ferreira","sequence":"additional","affiliation":[]},{"given":"Helena","family":"Maia","sequence":"additional","affiliation":[]},{"given":"Sandra","family":"Avila","sequence":"additional","affiliation":[]},{"given":"Helio","family":"Pedrini","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,3,21]]},"reference":[{"key":"914_CR1","unstructured":"OpenAI.: Introducing ChatGPT. https:\/\/openai.com\/blog\/chatgpt. Available from: https:\/\/openai.com\/blog\/chatgpt (2022)"},{"key":"914_CR2","doi-asserted-by":"crossref","unstructured":"Bender, E.M., Koller, A.: Climbing towards NLU: on meaning, form, and understanding in the age of data. In: Jurafsky, D., Chai, J., Schluter, N., Tetreault, J., (eds.) Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics. Online: Association for Computational Linguistics. pp. 5185\u20135198. Available from: https:\/\/aclanthology.org\/2020.acl-main.463\/ (2020)","DOI":"10.18653\/v1\/2020.acl-main.463"},{"key":"914_CR3","doi-asserted-by":"crossref","unstructured":"Hovy, D., Spruit, S.L.: The social impact of natural language processing. In: Annual Meeting of the Association for Computational Linguistics (vol. 2: short papers), pp. 591\u2013598 (2016)","DOI":"10.18653\/v1\/P16-2096"},{"key":"914_CR4","doi-asserted-by":"crossref","unstructured":"Bender, E.M., Gebru, T., McMillan-Major, A., Shmitchell, S.: On the dangers of stochastic parrots: can language models be too big? In: ACM Conference on Fairness, Accountability, and Transparency, pp. 610\u2013623 (2021)","DOI":"10.1145\/3442188.3445922"},{"key":"914_CR5","unstructured":"Weidinger, L., Mellor, J., Rauh, M., Griffin, C., Uesato, J., Huang, P.S., et\u00a0al.: Ethical and social risks of harm from language models. arXiv:2112.04359. (2021)"},{"key":"914_CR6","unstructured":"Nicholas, G., Bhatia, A.: Lost in translation: large language models in non-english content analysis. arXiv:2306.07377. [cs.CL] (2023)"},{"key":"914_CR7","doi-asserted-by":"crossref","unstructured":"Santos, G.O., Moreira, D.A.B., Ferreira, A.I., Silva, J., Pereira, L., Bueno P,, et\u00a0al.: CAPIVARA: cost-efficient approach for improving multilingual CLIP performance on low-resource languages. In: Workshop on Multi-lingual Representation Learning (MRL), Conference on Empirical Methods in Natural Language Processing (EMNLP);p p. 184\u2013207 (2023)","DOI":"10.18653\/v1\/2023.mrl-1.15"},{"key":"914_CR8","unstructured":"Johnson, R.L., Pistilli, G., Men\u00e9dez-Gonz\u00e1lez, N., Duran, L.D.D., Panai, E., Kalpokiene, J., et\u00a0al.: The Ghost in the Machine Has an American Accent: Value Conflict in GPT-3. arXiv:2203.07785. (2022)"},{"issue":"8","key":"914_CR9","doi-asserted-by":"publisher","DOI":"10.1111\/lnc3.12432","volume":"15","author":"D Hovy","year":"2021","unstructured":"Hovy, D., Prabhumoye, S.: Five sources of bias in natural language processing. Lang. Linguist. Compass. 15(8), e12432 (2021)","journal-title":"Lang. Linguist. Compass."},{"issue":"1","key":"914_CR10","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3634685","volume":"24","author":"N Brown","year":"2024","unstructured":"Brown, N., Xie, B., Sarder, E., Fiesler, C., Wiese, E.S.: Teaching ethics in computing: a systematic literature review of ACM computer science education publications. ACM Trans. Comput. Educ. 24(1), 1\u201336 (2024)","journal-title":"ACM Trans. Comput. Educ."},{"key":"914_CR11","doi-asserted-by":"crossref","unstructured":"Goetze, T.S.: Integrating ethics into computer science education: multi-, inter-, and transdisciplinary approaches. In: 54th ACM Technical Symposium on Computer Science Education; pp. 645\u2013651 (2023)","DOI":"10.1145\/3545945.3569792"},{"key":"914_CR12","doi-asserted-by":"crossref","unstructured":"Mitchell, M., Wu, S., Zaldivar, A., Barnes, P., Vasserman, L., Hutchinson, B., et\u00a0al.: Model cards for model reporting. In: Conference on Fairness, Accountability, and Transparency, pp. 220\u2013229 (2019)","DOI":"10.1145\/3287560.3287596"},{"key":"914_CR13","unstructured":"High-Level Expert Group on Artificial Intelligence. The Assessment List for Trustworthy Artificial Intelligence (ALTAI). Brussels: European Commission, (2020). Available from https:\/\/digital-strategy.ec.europa.eu\/pt\/node\/806"},{"key":"914_CR14","unstructured":"Microsoft.: Harms Modeling - Azure Application Architecture Guide. (2022). Available from: https:\/\/learn.microsoft.com\/en-us\/azure\/architecture\/guide\/responsible-innovation\/harms-modeling\/"},{"issue":"4\/5","key":"914_CR15","doi-asserted-by":"publisher","first-page":"6:1","DOI":"10.1147\/JRD.2019.2942288","volume":"63","author":"M Arnold","year":"2019","unstructured":"Arnold, M., Bellamy, R.K.E., Hind, M., Houde, S., Mehta, S., Mojsilovi\u0107, A., et al.: FactSheets: increasing trust in AI services through supplier\u2019s declarations of conformity. IBM J. Res. Dev. 63(4\/5), 6:1-6:13 (2019). https:\/\/doi.org\/10.1147\/JRD.2019.2942288","journal-title":"IBM J. Res. Dev."},{"issue":"3","key":"914_CR16","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1007\/s43681-021-00084-x","volume":"2","author":"J Ayling","year":"2022","unstructured":"Ayling, J., Chapman, A.: Putting AI ethics to work: are the tools fit for purpose? AI Ethics 2(3), 405\u2013429 (2022)","journal-title":"AI Ethics"},{"issue":"CSCW1","key":"914_CR17","first-page":"1","volume":"7","author":"RY Wong","year":"2023","unstructured":"Wong, R.Y., Madaio, M.A., Merrill, N.: Seeing like a toolkit: how toolkits envision the work of AI ethics. ACM Hum. Comput. Interact. 7(CSCW1), 1\u201327 (2023)","journal-title":"ACM Hum. Comput. Interact."},{"key":"914_CR18","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s43681-023-00258-9","volume":"3","author":"E Prem","year":"2023","unstructured":"Prem, E.: From ethical AI frameworks to tools: a review of approaches. AI Ethics 3, 1\u201318 (2023)","journal-title":"AI Ethics"},{"key":"914_CR19","first-page":"1","volume":"6","author":"V Qiang","year":"2023","unstructured":"Qiang, V., Rhim, J., Aj, Moon: No such thing as one-size-fits-all in AI ethics frameworks: a comparative case study. AI Soc. 6, 1\u201320 (2023)","journal-title":"AI Soc."},{"issue":"4","key":"914_CR20","doi-asserted-by":"publisher","first-page":"2141","DOI":"10.1007\/s11948-019-00165-5","volume":"26","author":"J Morley","year":"2020","unstructured":"Morley, J., Floridi, L., Kinsey, L., Elhalal, A.: From what to how: an initial review of publicly available AI ethics tools, methods and research to translate principles into practices. Sci. Eng. Ethics 26(4), 2141\u20132168 (2020)","journal-title":"Sci. Eng. Ethics"},{"issue":"5","key":"914_CR21","doi-asserted-by":"publisher","DOI":"10.1016\/j.telpol.2022.102479","volume":"47","author":"N Palladino","year":"2022","unstructured":"Palladino, N.: A \u2018Biased\u2019 Emerging Governance Regime for Artificial Intelligence? How AI Ethics Get Skewed Moving from Principles to Practices. Telecommunications Policy. 47(5), 102479 (2022)","journal-title":"Telecommunications Policy."},{"issue":"5","key":"914_CR22","doi-asserted-by":"publisher","first-page":"110","DOI":"10.1007\/s10462-024-10740-3","volume":"57","author":"R Ortega-Bola\u00f1os","year":"2024","unstructured":"Ortega-Bola\u00f1os, R., Bernal-Salcedo, J., Germ\u00e1n Ortiz, M., Galeano Sarmiento, J., Ruz, G.A., Tabares-Soto, R.: Applying the ethics of AI: a systematic review of tools for developing and assessing AI-based systems. Artif. Intell. Rev. 57(5), 110 (2024). https:\/\/doi.org\/10.1007\/s10462-024-10740-3","journal-title":"Artif. Intell. Rev."},{"key":"914_CR23","unstructured":"IDEO.: IDEO\u2019s AI Ethics Cards. (2019). Available from: https:\/\/www.ideo.com\/journal\/ai-needs-an-ethical-compass-this-tool-can-help"},{"key":"914_CR24","doi-asserted-by":"publisher","first-page":"875","DOI":"10.1016\/j.jbusres.2019.10.006","volume":"122","author":"L Lobschat","year":"2021","unstructured":"Lobschat, L., Mueller, B., Eggers, F., Brandimarte, L., Diefenbach, S., Kroschke, M., et al.: Corporate digital responsibility. J. Bus. Rese. 122, 875\u201388 (2021). https:\/\/doi.org\/10.1016\/j.jbusres.2019.10.006","journal-title":"J. Bus. Rese."},{"key":"914_CR25","unstructured":"for Designers E.: Ethics for Designers \u2014 The Toolkit. (2017). Available from: https:\/\/www.ethicsfordesigners.com\/tools"},{"issue":"12","key":"914_CR26","doi-asserted-by":"publisher","first-page":"86","DOI":"10.1145\/3458723","volume":"64","author":"T Gebru","year":"2021","unstructured":"Gebru, T., Morgenstern, J., Vecchione, B., Vaughan, J.W., Wallach, H.D., III., et al.: Datasheets for datasets. Communications of the ACM. 64(12), 86\u201392 (2021). https:\/\/doi.org\/10.1145\/3458723. (HD)","journal-title":"Communications of the ACM."},{"key":"914_CR27","doi-asserted-by":"publisher","first-page":"587","DOI":"10.1162\/tacl_a_00041","volume":"6","author":"EM Bender","year":"2018","unstructured":"Bender, E.M., Friedman, B.: Data statements for natural language processing: Toward mitigating system bias and enabling better science. Transactions of the Association for Computational Linguistics. 6, 587\u2013604 (2018)","journal-title":"Transactions of the Association for Computational Linguistics."},{"key":"914_CR28","unstructured":"Holland, S., Hosny, A., Newman, S., Joseph, J., Chmielinski, K.: The Dataset Nutrition Label: A Framework To Drive Higher Data Quality Standards. arXiv:1805.03677. (2018 May);[cs]"},{"key":"914_CR29","unstructured":"Saleiro, P., Kuester, B., Hinkson, L., London, J., Stevens, A., Anisfeld, A., et\u00a0al.: Aequitas: A Bias and Fairness Audit Toolkit. arXiv:1811.05577. (2019);[cs.LG]"},{"key":"914_CR30","doi-asserted-by":"crossref","unstructured":"Arya, V., Bellamy, R.K.E., Chen, P.Y., Dhurandhar, A., Hind, M., Hoffman, S.C., et\u00a0al.: AI Explainability 360 Toolkit. In: 3rd ACM India Joint International Conference on Data Science & Management of Data (8th ACM IKDD CODS & 26th COMAD). CODS-COMAD \u201921. New York, NY, USA: Association for Computing Machinery; p. 376\u2013379 (2021)","DOI":"10.1145\/3430984.3430987"},{"key":"914_CR31","unstructured":"Research PA.: What-if Tool. 2018. Available from: https:\/\/pair-code.github.io\/what-if-tool\/"},{"key":"914_CR32","unstructured":"for Ethical AI & Machine\u00a0Learning TI.: AI-RFX Procurement Framework. (2019). Available from: https:\/\/ethical.institute\/rfx.html"},{"key":"914_CR33","doi-asserted-by":"crossref","unstructured":"Cabrera, A.A., Fu, E., Bertucci, D., Holstein, K., Talwalkar, A., Hong, J.I., et\u00a0al.: Zeno: An Interactive Framework for Behavioral Evaluation of Machine Learning. In: CHI Conference on Human Factors in Computing Systems. CHI \u201923. New York, NY, USA: Association for Computing Machinery; (2023)","DOI":"10.1145\/3544548.3581268"},{"key":"914_CR34","doi-asserted-by":"crossref","unstructured":"Ribeiro, M.T., Singh, S., Guestrin, C.: \u201cWhy Should I Trust You?\u201d: Explaining the Predictions of Any Classifier. In: ACM SIGKDD International Conference on Knowledge Discovery and Data Mining. KDD \u201916. New York, NY, USA: Association for Computing Machinery; 2016. p. 1135\u20131144","DOI":"10.1145\/2939672.2939778"},{"key":"914_CR35","unstructured":"Lundberg, S.M., Lee, S.I.: A Unified Approach to Interpreting Model Predictions. In: Advances in Neural Information Processing Systems. vol.\u00a030; (2017)"},{"key":"914_CR36","doi-asserted-by":"crossref","unstructured":"Dhamala, J., Sun, T., Kumar, V., Krishna, S., Pruksachatkun, Y., Chang, K.W., et\u00a0al.: BOLD: Dataset and Metrics for Measuring Biases in Open-Ended Language Generation. In: ACM Conference on Fairness, Accountability, and Transparency. FAccT \u201921. New York, NY, USA: Association for Computing Machinery; p. 862\u2013872 (2021)","DOI":"10.1145\/3442188.3445924"},{"key":"914_CR37","unstructured":"for Ethical AI & Machine\u00a0Learning TI.: XAI - An eXplainability toolbox for machine learning. 2021. Available from: https:\/\/github.com\/EthicalML\/xai"},{"issue":"4","key":"914_CR38","doi-asserted-by":"publisher","first-page":"1085","DOI":"10.1007\/s43681-023-00289-2","volume":"4","author":"J M\u00f6kander","year":"2024","unstructured":"M\u00f6kander, J., Schuett, J., Kirk, H.R., Floridi, L.: Auditing large language models: A three-layered approach. AI and Ethics. 4(4), 1085\u20131115 (2024). https:\/\/doi.org\/10.1007\/s43681-023-00289-2","journal-title":"AI and Ethics."},{"key":"914_CR39","doi-asserted-by":"crossref","unstructured":"Raji, I.D., Smart, A., White, R.N., Mitchell, M., Gebru, T., Hutchinson, B., et\u00a0al.: Closing the AI accountability gap: defining an end-to-end framework for internal algorithmic auditing. In: Conference on Fairness, Accountability, and Transparency. FAT* \u201920. New York, NY, USA: Association for Computing Machinery; (2020). p. 33\u201344","DOI":"10.1145\/3351095.3372873"},{"issue":"2","key":"914_CR40","doi-asserted-by":"publisher","first-page":"299","DOI":"10.1007\/s43681-023-00266-9","volume":"4","author":"A Agarwal","year":"2023","unstructured":"Agarwal, A., Agarwal, H.: A seven-layer model with checklists for standardising fairness assessment throughout the AI lifecycle. AI and Ethics. 4(2), 299\u2013314 (2023). https:\/\/doi.org\/10.1007\/s43681-023-00266-9","journal-title":"AI and Ethics."},{"issue":"1","key":"914_CR41","doi-asserted-by":"publisher","first-page":"51","DOI":"10.7326\/M18-1376","volume":"170","author":"RF Wolff","year":"2019","unstructured":"Wolff, R.F., Moons, K.G., Riley, R.D., Whiting, P.F., Westwood, M., Collins, G.S., et al.: PROBAST: a tool to assess the risk of bias and applicability of prediction model studies. Ann. Intern. Med. 170(1), 51\u201358 (2019)","journal-title":"Ann. Intern. Med."},{"key":"914_CR42","doi-asserted-by":"crossref","unstructured":"Ballard, S., Chappell, K.M., Kennedy, K.: Judgment Call the Game: Using Value Sensitive Design and Design Fiction to Surface Ethical Concerns Related to Technology. In: Designing Interactive Systems Conference; (2019). p. 421\u2013433","DOI":"10.1145\/3322276.3323697"},{"key":"914_CR43","unstructured":"Microsoft.: Community Jury - Azure Application Architecture Guide. 2022. Available from: https:\/\/learn.microsoft.com\/en-us\/azure\/architecture\/guide\/responsible-innovation\/community-jury\/"},{"key":"914_CR44","unstructured":"Privacy, T.: TensorFlow Privacy. (2019). Available from: https:\/\/github.com\/tensorflow\/privacy"},{"key":"914_CR45","unstructured":"Nicolae, M.I., Sinn, M., Tran, M.N., Buesser, B., Rawat, A., Wistuba, M., et\u00a0al.: Adversarial Robustness Toolbox v1.0.0. arXiv:1807.01069. (2019);[cs.LG]"},{"key":"914_CR46","unstructured":"Doteveryone.: Consequence Scanning: An Agile event for Responsible Innovators. (2019). Available from: https:\/\/doteveryone.org.uk\/wp-content\/uploads\/2021\/02\/Consequence-Scanning-Agile-Event-Manual-TechTransformed-Doteveryone-2.pdf"},{"key":"914_CR47","first-page":"43","volume":"2018","author":"S Kiritchenko","year":"2018","unstructured":"Kiritchenko, S., Mohammad, S.M.: Examining Gender and Race Bias in Two Hundred Sentiment Analysis Systems. NAACL HLT 2018, 43 (2018)","journal-title":"NAACL HLT"},{"key":"914_CR48","doi-asserted-by":"crossref","unstructured":"Benatti, R.M., Villarroel, C.M.L., Avila, S., Colombini, E.L., Severi, F., Should, I.: disclose my dataset? Caveats between reproducibility and individual data rights. In: Natural Legal Language Processing Workshop. Association for Computational Linguistics; p. 228\u2013237 (2022)","DOI":"10.18653\/v1\/2022.nllp-1.20"},{"key":"914_CR49","doi-asserted-by":"crossref","unstructured":"Epstein, Z., Payne, B.H., Shen, J.H., Hong, C.J., Felbo, B., Dubey, A., et\u00a0al.: TuringBox: An Experimental Platform for the Evaluation of AI Systems. In: Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-18. International Joint Conferences on Artificial Intelligence Organization; p. 5826\u20135828 (2018)","DOI":"10.24963\/ijcai.2018\/851"},{"issue":"13","key":"914_CR50","doi-asserted-by":"publisher","first-page":"15981","DOI":"10.1609\/aaai.v37i13.26897","volume":"37","author":"S Ali","year":"2024","unstructured":"Ali, S., Kumar, V., Breazeal, C.: AI Audit: A Card Game to Reflect on Everyday AI Systems. AAAI Conference on Artificial Intelligence. 37(13), 15981\u201315989 (2024). https:\/\/doi.org\/10.1609\/aaai.v37i13.26897","journal-title":"AAAI Conference on Artificial Intelligence."},{"key":"914_CR51","doi-asserted-by":"crossref","unstructured":"Zhou, J., Chen, F., Berry, A., Reed, M., Zhang, S., Savage, S.: A Survey on Ethical Principles of AI and Implementations. In: IEEE Symposium Series on Computational Intelligence. Canberra, Australia: IEEE; p. 3010\u20133017 (2020)","DOI":"10.1109\/SSCI47803.2020.9308437"},{"key":"914_CR52","doi-asserted-by":"crossref","unstructured":"Schiff, D., Biddle, J., Borenstein, J., Laas, K.: What\u2019s next for AI ethics, policy, and governance? a global overview. In: AAAI\/ACM Conference on AI, Ethics, and Society. New York City, NY, USA: ACM; p. 153\u2013158 (2020)","DOI":"10.1145\/3375627.3375804"},{"issue":"2","key":"914_CR53","doi-asserted-by":"publisher","first-page":"185","DOI":"10.1007\/s13347-019-00354-x","volume":"32","author":"L Floridi","year":"2019","unstructured":"Floridi, L.: Translating principles into practices of digital ethics: Five risks of being unethical. Philosophy & Technology. 32(2), 185\u2013193 (2019)","journal-title":"Philosophy & Technology."},{"key":"914_CR54","doi-asserted-by":"crossref","unstructured":"Khan, A.A., Badshah, S., Liang, P., Waseem, M., Khan, B., Ahmad, A., et\u00a0al.: Ethics of AI: A systematic literature review of principles and challenges. In: 26th International Conference on Evaluation and Assessment in Software Engineering. Gothenburg, Sweden: ACM; p. 383\u2013392 (2022)","DOI":"10.1145\/3530019.3531329"},{"key":"914_CR55","doi-asserted-by":"crossref","unstructured":"Corr\u00eaa, N.K., Galv\u00e3o, C., Santos, J.W., Del\u00a0Pino, C., Pinto, E.P., Barbosa, C., et\u00a0al.: Worldwide AI ethics: A review of 200 guidelines and recommendations for AI governance. Patterns. 4(10) (2023)","DOI":"10.1016\/j.patter.2023.100857"},{"issue":"9","key":"914_CR56","doi-asserted-by":"publisher","first-page":"389","DOI":"10.1038\/s42256-019-0088-2","volume":"1","author":"A Jobin","year":"2019","unstructured":"Jobin, A., Ienca, M., Vayena, E.: Artificial Intelligence: The Global Landscape of Ethics Guidelines. Nature Machine Intelligence. 1(9), 389\u2013399 (2019)","journal-title":"Nature Machine Intelligence."},{"issue":"1","key":"914_CR57","doi-asserted-by":"publisher","first-page":"61","DOI":"10.1108\/JICES-12-2019-0138","volume":"19","author":"M Ryan","year":"2020","unstructured":"Ryan, M., Stahl, B.C.: Artificial Intelligence Ethics Guidelines for Developers and Users: Clarifying Their Content and Normative Implications. J. Inf. Commun. Ethics Soc. 19(1), 61\u201386 (2020)","journal-title":"J. Inf. Commun. Ethics Soc."},{"issue":"1","key":"914_CR58","doi-asserted-by":"publisher","first-page":"34","DOI":"10.1109\/TTS.2020.2974991","volume":"1","author":"D Peters","year":"2020","unstructured":"Peters, D., Vold, K., Robinson, D., Calvo, R.A.: Responsible AI: Two Frameworks for Ethical Design Practice. IEEE Transactions on Technology and Society. 1(1), 34\u201347 (2020)","journal-title":"IEEE Transactions on Technology and Society."},{"key":"914_CR59","unstructured":"High-Level Expert Group on Artificial Intelligence.: Ethics Guidelines for Trustworthy AI. (2019). Available from: https:\/\/digital-strategy.ec.europa.eu\/en\/library\/ethics-guidelines-trustworthy-ai"},{"key":"914_CR60","unstructured":"Anderson, D., Bonaguro, J., McKinney, M., Nicklin, A., Wiseman, J.: Ethics & Algorithms Toolkit (beta). (2018). Available from: https:\/\/ethicstoolkit.ai\/"},{"key":"914_CR61","unstructured":"Treasury Board of Canada.: Algorithmic Impact Assessment Tool. (2021). Available from: https:\/\/www.canada.ca\/en\/government\/system\/digital-government\/digital-government-innovations\/responsible-use-ai\/algorithmic-impact-assessment.html"},{"key":"914_CR62","volume-title":"The Turing Way: A Handbook for Reproducible","author":"TW Community","year":"2021","unstructured":"Community, T.W.: The Turing Way: A Handbook for Reproducible. Ethical and Collaborative Research, Zenodo (2021)"},{"issue":"1","key":"914_CR63","first-page":"1","volume":"1","author":"D Reisman","year":"2018","unstructured":"Reisman, D., Schultz, J., Crawford, K., Whittaker, M.: Algorithmic Impact Assessments: A Practical Framework for Public Agency Accountability. AI Now Intitute. 1(1), 1\u201322 (2018)","journal-title":"AI Now Intitute."},{"key":"914_CR64","unstructured":"Dhurandhar, A., Chen, P.Y., Luss, R., Tu, C.C., Ting, P., Shanmugam, K., et\u00a0al.: Explanations based on the missing: Towards contrastive explanations with pertinent negatives. Advances in Neural Information Processing Systems. 31 (2018)"},{"key":"914_CR65","unstructured":"Goodman, D., Xin, H., Yang, W., Yuesheng, W., Junfeng, X., Huan, Z.: Advbox: A toolbox to generate adversarial examples that fool neural networks. arXiv:2001.05574. (2020);[cs.LG]"},{"issue":"1","key":"914_CR66","doi-asserted-by":"publisher","first-page":"32","DOI":"10.26599\/IJCS.2022.9100033","volume":"7","author":"J Zhang","year":"2023","unstructured":"Zhang, J., Shu, Y., Yu, H.: Fairness in Design: A Framework for Facilitating Ethical Artificial Intelligence Designs. International Journal of Crowd Science. 7(1), 32\u201339 (2023). https:\/\/doi.org\/10.26599\/IJCS.2022.9100033","journal-title":"International Journal of Crowd Science."},{"key":"914_CR67","doi-asserted-by":"publisher","DOI":"10.1016\/j.softx.2023.101352","volume":"22","author":"A Goldsteen","year":"2023","unstructured":"Goldsteen, A., Saadi, O., Shmelkin, R., Shachor, S., Razinkov, N.: AI Privacy Toolkit. SoftwareX. 22, 101352 (2023). https:\/\/doi.org\/10.1016\/j.softx.2023.101352","journal-title":"AI Privacy Toolkit. SoftwareX."},{"key":"914_CR68","unstructured":"ICO.: Guide to the UK General Data Protection Regulation (UK GDPR). (2020)"},{"key":"914_CR69","unstructured":"OECD Digital Economy Papers.: OECD Framework for the Classification of AI Systems. Paris: OECD Publishing. 2022"},{"key":"914_CR70","unstructured":"Forum WE.: AI Procurement in a Box. (2020). Available from: https:\/\/www3.weforum.org\/docs\/WEF_AI_Procurement_in_a_Box_Project_Overview_2020.pdf"},{"key":"914_CR71","unstructured":"For Ethical AI & Machine\u00a0Learning TI.: Machine Learning Maturity Model, AI & Machine Learning Solutions. (2019). Available from: https:\/\/ethical.institute\/mlmm"},{"key":"914_CR72","unstructured":"Guillou, P.: GPorTuguese-2 (Portuguese GPT-2 small): a Language Model for Portuguese text generation (and more NLP tasks...). (2020). Available from: https:\/\/huggingface.co\/pierreguillou\/gpt2-small-portuguese"},{"key":"914_CR73","unstructured":"Carmo, D., Piau, M., Campiotti, I., Nogueira, R., Lotufo, R.: PTT5: Pretraining and validating the T5 model on Brazilian Portuguese data. arXiv:2008.09144. (2020)"},{"key":"914_CR74","doi-asserted-by":"publisher","first-page":"403","DOI":"10.1007\/978-3-030-61377-8_28","volume-title":"Intelligent systems","author":"F Souza","year":"2020","unstructured":"Souza, F., Nogueira, R., Lotufo, R.: BERTimbau: Pretrained BERT Models for Brazilian Portuguese. In: Cerri, R., Prati, R.C. (eds.) Intelligent systems, pp. 403\u2013417. Springer, Cham (2020)"},{"key":"914_CR75","doi-asserted-by":"crossref","unstructured":"Schneider, E.T.R., de\u00a0Souza, J.V.A., Knafou, J., Oliveira, L.E.S.e., Copara, J., Gumiel, Y.B., et\u00a0al.: BioBERTpt - a Portuguese neural language model for clinical named entity recognition. In: 3rd Clinical Natural Language Processing Workshop; pp. 65\u201372 (2020)","DOI":"10.18653\/v1\/2020.clinicalnlp-1.7"},{"key":"914_CR76","unstructured":"Finardi, P., Viegas, J.D., Ferreira, G.T., Mansano, A.F., Carid\u00e1, V.F.: BERTa\u00fa: Ita\u00fa BERT for Digital Customer Service. arXiv:2101.12015. (2021)"},{"key":"914_CR77","doi-asserted-by":"crossref","unstructured":"Schneider, E.T.R., de\u00a0Souza, J.V.A.: Gumiel YB, Moro C, Paraiso EC. A GPT-2 language model for biomedical texts in Portuguese. In: IEEE 34th International Symposium on Computer-Based Medical Systems; pp. 474\u2013479 (2021)","DOI":"10.1109\/CBMS52027.2021.00056"},{"key":"914_CR78","doi-asserted-by":"crossref","unstructured":"Polo, F.M., Mendon\u00e7a, G.C.F., Parreira, K.C.J., Gianvechio, L., Cordeiro, P., Ferreira, J.B., et\u00a0al.: LegalNLP \u2013 Natural Language Processing methods for the Brazilian Legal Language. arXiv:2110.15709. (2021)","DOI":"10.5753\/eniac.2021.18301"},{"key":"914_CR79","doi-asserted-by":"crossref","unstructured":"Rodrigues, R.B.M., Privatto, P.I.M., de\u00a0Sousa, G.J., Murari, R.P., Afonso, L.C.S., Papa, J.P., et\u00a0al.: PetroBERT: a domain adaptation language model for oil and gas applications in Portuguese. In: Computational Processing of the Portuguese Language; pp. 101\u2013109 (2022)","DOI":"10.1007\/978-3-030-98305-5_10"},{"key":"914_CR80","doi-asserted-by":"crossref","unstructured":"Pires, R., Abonizio, H., Almeida, T.S., Nogueira, R.: Sabi\u00e1: Portuguese large language models. In: Intelligent Systems; pp. 226\u2013240 (2023)","DOI":"10.1007\/978-3-031-45392-2_15"},{"key":"914_CR81","doi-asserted-by":"crossref","unstructured":"Rodrigues, J., Gomes, L., Silva, J., Branco, A., Santos, R., Cardoso, H.L., et\u00a0al.: Advancing neural encoding of Portuguese with transformer albertina PT-*. In: Progress in Artificial Intelligence, pp. 441\u2013453 (2023)","DOI":"10.1007\/978-3-031-49008-8_35"},{"key":"914_CR82","doi-asserted-by":"crossref","unstructured":"Schneider, E.T.R., Gumiel, Y.B., de\u00a0Souza, J.V.A., Mie\u00a0Mukai, L., Emanuel Silva\u00a0e Oliveira, L., de\u00a0Sa\u00a0Rebelo, M., et\u00a0al. CardioBERTpt: transformer-based models for cardiology language representation in Portuguese. In: IEEE 36th International Symposium on Computer-Based Medical Systems, pp. 378\u2013381 (2023)","DOI":"10.1109\/CBMS58004.2023.00247"},{"key":"914_CR83","first-page":"349","volume-title":"Computational science and its applications - ICCSA 2023","author":"CFO Viegas","year":"2023","unstructured":"Viegas, C.F.O., Costa, B.C., Ishii, R.P.: JurisBERT: A New Approach that Converts a Classification Corpus into an STS One. In: Gervasi, O., Murgante, B., Taniar, D., Apduhan, B.O., Braga, A.C., Garau, C., et al. (eds.) Computational science and its applications - ICCSA 2023, pp. 349\u2013365. Springer, Cham (2023)"},{"key":"914_CR84","unstructured":"Larcher, C., Piau, M., Finardi, P., Gengo, P., Esposito, P., Carid\u00e1, V.: Cabrita: Closing the Gap for Foreign Languages. arXiv:2308.11878. (2023)"},{"key":"914_CR85","doi-asserted-by":"crossref","unstructured":"Costa, P.B., Pavan, M.C., Santos, W.R., Silva, S.C., Paraboni, I.: BERTabaporu: assessing a genre-specific language model for Portuguese NLP. In: 14th International Conference on Recent Advances in Natural Language Processing, pp. 217\u2013223 (2023)","DOI":"10.26615\/978-954-452-092-2_024"},{"key":"914_CR86","unstructured":"Campiotti, I., Rodrigues, M., Albuquerque, Y., Azevedo, R., Andrade, A.: DeBERTinha: A Multistep Approach to Adapt DebertaV3 XSmall for Brazilian Portuguese Natural Language Processing Task. 230916844. (2023)"},{"key":"914_CR87","doi-asserted-by":"crossref","unstructured":"Silveira, R., Ponte, C., Almeida, V., Pinheiro, V., Furtado, V.: LegalBert-pt: A pretrained language model for the Brazilian Portuguese Legal Domain. In: Intelligent Systems; p. 268\u2013282 (2023)","DOI":"10.1007\/978-3-031-45392-2_18"},{"key":"914_CR88","unstructured":"Garcia, G.L., Paiola, P.H., Morelli, L.H., Candido, G., J\u00fanior, A.C., Jodas, D.S., et\u00a0al.: Introducing Bode: A Fine-Tuned Large Language Model for Portuguese Prompt-Based Task. CoRR. (2024)"},{"key":"914_CR89","volume":"16","author":"NK Corr\u00eaa","year":"2024","unstructured":"Corr\u00eaa, N.K., Falk, S., Fatimah, S., Sen, A., De Oliveira, N.: TeenyTinyLlama: open-source tiny language models trained in Brazilian Portuguese. Mach. Learn. Appl. 16, 100558 (2024)","journal-title":"Mach. Learn. Appl."},{"key":"914_CR90","unstructured":"de\u00a0Mello, G.L., Finger, M., Serras, F., de\u00a0Mello\u00a0Carpi, M., Jose, M.M., et\u00a0al.: PeLLE: Encoder-based language models for Brazilian Portuguese based on open data. arXiv:2402.19204. (2024)"},{"key":"914_CR91","first-page":"441","volume":"2024","author":"R Lopes","year":"2024","unstructured":"Lopes, R., Magalh\u00e3es, J., Semedo, D.: Gl\u00f3rIA: a generative and open large language model for Portuguese. PROPOR 2024, 441 (2024)","journal-title":"PROPOR"},{"key":"914_CR92","doi-asserted-by":"crossref","unstructured":"Santos, R., Silva, J., Gomes, L., Rodrigues, J., Branco, A.: Advancing generative AI for Portuguese with open decoder Gerv\u00e1sio PT. In: 3rd Annual Meeting of the Special Interest Group on Under-resourced Languages@ LREC-COLING 2024; pp. 16\u201326 (2024)","DOI":"10.63317\/2a4ddkfa3t7f"},{"key":"914_CR93","doi-asserted-by":"crossref","unstructured":"Garcia, E.A.S., Silva, N.F.F., Siqueira, F., Albuquerque, H.O., Gomes, J.R.S., Souza, E., et\u00a0al.: RoBERTaLexPT: a legal RoBERTa Model pretrained with deduplication for Portuguese. In: 16th International Conference on Computational Processing of Portuguese; pp. 374\u2013383 (2024)","DOI":"10.21814\/lm.16.2.457"},{"key":"914_CR94","unstructured":"Almeida, T.S., Abonizio, H., Nogueira, R., Pires, R.: Sabi\u00e1-2: A New Generation of Portuguese Large Language Models. arXiv:2403.09887. (2024)"},{"key":"914_CR95","unstructured":"Junior, R.M., Pires, R., Romero, R., Nogueira, R.: Juru: Legal Brazilian Large Language Model from Reputable Sources. arXiv:2403.18140. (2024)"},{"key":"914_CR96","doi-asserted-by":"publisher","first-page":"324","DOI":"10.1007\/978-3-031-79032-4_23","volume-title":"Intelligent systems","author":"M Piau","year":"2025","unstructured":"Piau, M., Lotufo, R., Nogueira, R.: ptt5-v2: A Closer Look at Continued Pretraining of T5 Models for the Portuguese Language. In: Paes, A., Verri, F.A.N. (eds.) Intelligent systems, pp. 324\u2013338. Springer, Cham (2025)"},{"key":"914_CR97","doi-asserted-by":"publisher","first-page":"267","DOI":"10.1007\/978-3-031-73503-5_22","volume-title":"Progress in artificial intelligence","author":"L Gomes","year":"2025","unstructured":"Gomes, L., Branco, A., Silva, J., Rodrigues, J., Santos, R.: Open Sentence Embeddings for Portuguese with the Serafim PT* Encoders Family. In: Santos, M.F., Machado, J., Novais, P., Cortez, P., Moreira, P.M. (eds.) Progress in artificial intelligence, pp. 267\u2013279. Springer, Cham (2025)"},{"key":"914_CR98","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2024.109233","volume":"182","author":"M Nunes","year":"2024","unstructured":"Nunes, M., Bon\u00e9, J., Ferreira, J.C., Chaves, P., Elvas, L.B.: MediAlbertina: an European Portuguese medical language model. Comput. Biol. Med. 182, 109233 (2024). https:\/\/doi.org\/10.1016\/j.compbiomed.2024.109233","journal-title":"Comput. Biol. Med."},{"key":"914_CR99","doi-asserted-by":"publisher","unstructured":"Abonizio, H., Almeida, T.S., Laitz, T., Junior, R.M., Bon\u00e1s, G.K., Nogueira, R., et\u00a0al. Sabi\u00e1-3 Technical Report. arXiv:2410.12049. https:\/\/doi.org\/10.48550\/arXiv.2410.12049 (2025 Feb)","DOI":"10.48550\/arXiv.2410.12049"},{"key":"914_CR100","doi-asserted-by":"publisher","unstructured":"Corr\u00eaa, N.K., Sen, A., Falk, S., Fatimah, S.: Tucano: Advancing Neural Text Generation for Portuguese. arXiv:2411.07854. https:\/\/doi.org\/10.48550\/arXiv.2411.07854 (2024 Nov)","DOI":"10.48550\/arXiv.2411.07854"},{"key":"914_CR101","doi-asserted-by":"publisher","first-page":"292","DOI":"10.1007\/978-3-031-73503-5_24","volume-title":"Progress in artificial intelligence","author":"P Gamallo","year":"2025","unstructured":"Gamallo, P., Rodr\u00edguez, P., Santos, D., Sotelo, S., Miquelina, N., Paniagua, S., et al.: A Galician-Portuguese Generative Model. In: Santos, M.F., Machado, J., Novais, P., Cortez, P., Moreira, P.M. (eds.) Progress in artificial intelligence, pp. 292\u2013304. Springer, Cham (2025)"},{"key":"914_CR102","doi-asserted-by":"publisher","first-page":"317","DOI":"10.18653\/v1\/2024.customnlp4u-1.24","volume-title":"1st Workshop on Customizable NLP: Progress and Challenges in Customizing NLP for a Domain, Application, Group, or Individual (CustomNLP4U)","author":"A Simpl\u00edcio","year":"2024","unstructured":"Simpl\u00edcio, A., Semedo, D., Magalhaes, J.: V-Gl\u00f3rIA - Customizing Large Vision and Language Models to European Portuguese. In: Kumar, S., Balachandran, V., Park, C.Y., Shi, W., Hayati, S.A., Tsvetkov, Y., et al. (eds.) 1st Workshop on Customizable NLP: Progress and Challenges in Customizing NLP for a Domain, Application, Group, or Individual (CustomNLP4U), pp. 317\u2013326. Association for Computational Linguistics, Miami, Florida, USA (2024)"},{"key":"914_CR103","doi-asserted-by":"publisher","first-page":"228","DOI":"10.1007\/978-3-031-76607-7_17","volume-title":"Progress in Pattern Recognition, Image Analysis, Computer Vision, and Applications","author":"GL Garcia","year":"2025","unstructured":"Garcia, G.L., Paiola, P.H., Garcia, E., Ribeiro Manesco, J.R., Papa, J.P.: GemBode and PhiBode: Adapting Small Language Models to Brazilian Portuguese. In: Hern\u00e1ndez-Garc\u00eda, R., Barrientos, R.J., Velastin, S.A. (eds.) Progress in Pattern Recognition, Image Analysis, Computer Vision, and Applications, pp. 228\u2013243. Springer Nature Switzerland, Cham (2025)"},{"issue":"6","key":"914_CR104","doi-asserted-by":"publisher","first-page":"4363","DOI":"10.1007\/s00521-024-10711-3","volume":"37","author":"F Carneiro","year":"2025","unstructured":"Carneiro, F., Vianna, D., Carvalho, J., Plastino, A., Paes, A.: BERTweet. BR: a pre-trained language model for tweets in Portuguese. Neural Comput. Appl. 37(6), 4363\u20134385 (2025). https:\/\/doi.org\/10.1007\/s00521-024-10711-3","journal-title":"Neural Comput. Appl."},{"key":"914_CR105","doi-asserted-by":"publisher","first-page":"19","DOI":"10.1007\/978-3-031-79032-4_2","volume-title":"Intelligent Systems","author":"MO Silva","year":"2025","unstructured":"Silva, M.O., Oliveira, G.P., Costa, L.G.L., Pappa, G.L.: GovBERT-BR: A BERT-Based Language Model for Brazilian Portuguese Governmental Data. In: Paes, A., Verri, F.A.N. (eds.) Intelligent Systems, pp. 19\u201332. Springer, Cham (2025)"},{"key":"914_CR106","doi-asserted-by":"publisher","unstructured":"Pires, H., Paucar, L., Carvalho, J.P.: DeB3RTa: a transformer-based model for the Portuguese Financial Domain. Big Data Cognit. Comput. 9(3):51. https:\/\/doi.org\/10.3390\/bdcc9030051 (2025)","DOI":"10.3390\/bdcc9030051"},{"key":"914_CR107","doi-asserted-by":"crossref","unstructured":"Fish, B., Kun, J., \u00c1d\u00e1m, D.:\u00a0Lelkes.: A Confidence-Based Approach for Balancing Fairness and Accuracy. In: SIAM International Conference on Data Mining, pp. 144\u2013152 (2016)","DOI":"10.1137\/1.9781611974348.17"},{"key":"914_CR108","unstructured":"Ryffel, T., Trask, A., Dahl, M., Wagner, B., Mancuso, J., Rueckert, D., et\u00a0al.: A generic framework for privacy preserving deep learning. CoRR. (2018)"},{"key":"914_CR109","doi-asserted-by":"crossref","unstructured":"Narayanan, M., Schoeberl, C.: A Matrix for Selecting Responsible AI Frameworks. Center for Security and Emerging Technology https:\/\/www.csetgeorgetownedu\/wp-content\/uploads\/CSET-A-Matrix-for-Selecting-Responsible-AI-Frameworks.pdf (2023)","DOI":"10.51593\/20220029"},{"key":"914_CR110","unstructured":"IFTF.: A Playbook for Ethical Technology Governance. Available from: https:\/\/www.iftf.org\/projects\/a-playbook-for-ethical-tech-governance\/? (2021)"},{"issue":"1","key":"914_CR111","doi-asserted-by":"publisher","first-page":"175","DOI":"10.1007\/s43681-022-00154-8","volume":"3","author":"E Vyhmeister","year":"2023","unstructured":"Vyhmeister, E., Castane, G., \u00d6stberg, P.O., Thevenin, S.: A responsible AI framework: pipeline contextualisation. AI Ethics 3(1), 175\u2013197 (2023). https:\/\/doi.org\/10.1007\/s43681-022-00154-8","journal-title":"AI Ethics"},{"key":"914_CR112","doi-asserted-by":"crossref","unstructured":"Wachter, S., Mittelstadt, B.: A right to reasonable inferences: re-thinking data protection law in the age of big data and AI. Colum Bus L Rev. pp. 494 (2019)","DOI":"10.31228\/osf.io\/mu2kf"},{"key":"914_CR113","doi-asserted-by":"publisher","unstructured":"Guidotti, R., Monreale, A., Ruggieri, S., Turini, F., Giannotti, F., Pedreschi, D.: A survey of methods for explaining black box models. ACM Comput. Surv. 51(5) (2018) https:\/\/doi.org\/10.1145\/3236009","DOI":"10.1145\/3236009"},{"issue":"2","key":"914_CR114","doi-asserted-by":"publisher","first-page":"63","DOI":"10.1561\/1100000015","volume":"11","author":"B Friedman","year":"2017","unstructured":"Friedman, B., Hendry, D.G., Borning, A.: A survey of value sensitive design methods. Found. Trends\u00ae Human Comput. Interact. 11(2), 63\u2013125 (2017). https:\/\/doi.org\/10.1561\/1100000015","journal-title":"Found. Trends\u00ae Human Comput. Interact."},{"issue":"2","key":"914_CR115","doi-asserted-by":"publisher","first-page":"126","DOI":"10.1057\/ejis.2013.18","volume":"23","author":"MC Oetzel","year":"2014","unstructured":"Oetzel, M.C., Spiekermann, S.: A systematic methodology for privacy impact assessments: a design science approach. Eur. J. Inf. Syst. 23(2), 126\u2013150 (2014). https:\/\/doi.org\/10.1057\/ejis.2013.18","journal-title":"Eur. J. Inf. Syst."},{"key":"914_CR116","doi-asserted-by":"crossref","unstructured":"Dom\u00ednguez\u00a0Hern\u00e1ndez, A., Galanos, V.: A toolkit of dilemmas: beyond debiasing and fairness formulas for responsible AI\/ML. In: IEEE International Symposium on Technology and Society (ISTAS). vol.\u00a01; pp. 1\u20134 (2022)","DOI":"10.1109\/ISTAS55053.2022.10227133"},{"key":"914_CR117","first-page":"536","volume-title":"HCI International 2021 - Late Breaking Papers: Multimodality, eXtended Reality, and Artificial Intelligence","author":"S Schmager","year":"2021","unstructured":"Schmager, S., Sousa, S.: A Toolkit to Enable the Design of Trustworthy AI. In: Stephanidis, C., Kurosu, M., Chen, J.Y.C., Fragomeni, G., Streitz, N., Konomi, S., et al. (eds.) HCI International 2021 - Late Breaking Papers: Multimodality, eXtended Reality, and Artificial Intelligence, pp. 536\u2013555. Springer International Publishing, Cham (2021)"},{"key":"914_CR118","unstructured":"AI A.: A3i the Trust in AI Framework. Available from: http:\/\/a3i.ai\/trust-in-ai (2025)"},{"key":"914_CR119","unstructured":"Kolter, Z., Madry, A.: Adversarial Robustness - Theory and Practice. Available from: https:\/\/adversarial-ml-tutorial.org\/ (2018)"},{"key":"914_CR120","unstructured":"Accenture.: Accenture Fairness Evaluation Tool. Available from: https:\/\/www.nist.gov\/system\/files\/documents\/2019\/06\/11\/nist-ai-rfi-accenture-001.pdf (2019)"},{"key":"914_CR121","first-page":"633","volume":"633","author":"JA Kroll","year":"2017","unstructured":"Kroll, J.A., Huey, J., Barocas, S., Felten, E.W., Reidenberg, J.R., Robinson, D.G., et al.: Accountable algorithms, 165 U. Pa L Rev. 633, 633 (2017)","journal-title":"Pa L Rev."},{"key":"914_CR122","unstructured":"Butnaru, C.: Agile Ethics for AI (HAI). Available from: https:\/\/trello.com\/b\/SarLFYOd\/agile-ethics-for-ai-hai (2018)"},{"issue":"4","key":"914_CR123","doi-asserted-by":"publisher","first-page":"754","DOI":"10.1016\/j.clsr.2018.05.017","volume":"34","author":"A Mantelero","year":"2018","unstructured":"Mantelero, A.: AI and Big Data: a blueprint for a human rights, social and ethical impact assessment. Comput. Law Secur. Rev. 34(4), 754\u2013772 (2018). https:\/\/doi.org\/10.1016\/j.clsr.2018.05.017","journal-title":"Comput. Law Secur. Rev."},{"key":"914_CR124","unstructured":"Calderon, A., Taber, D., Qu, H., Wen, J.: AI Blindspots: a discovery process for spotting unconscious biases and structural inequalities in AI systems. Available from: https:\/\/aiblindspot.media.mit.edu\/ (2019)"},{"key":"914_CR125","unstructured":"Commons, A.: AI Commons. Available from: https:\/\/ai-commons.org\/ (2016)"},{"key":"914_CR126","doi-asserted-by":"crossref","unstructured":"Ortega, E., Tran, M., Bandeen, G.: AI digital tool product lifecycle governance framework through ethics and compliance by design. In: IEEE Conference on Artificial Intelligence (CAI), pp. 353\u2013356 (2023)","DOI":"10.1109\/CAI54212.2023.00155"},{"issue":"4\/5","key":"914_CR127","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1147\/JRD.2019.2942287","volume":"63","author":"RK Bellamy","year":"2019","unstructured":"Bellamy, R.K., Dey, K., Hind, M., Hoffman, S.C., Houde, S., Kannan, K., et al.: AI fairness 360: an extensible toolkit for detecting and mitigating algorithmic bias. IBM J. Res. Dev. 63(4\/5), 1\u201315 (2019)","journal-title":"IBM J. Res. Dev."},{"key":"914_CR128","unstructured":"Database AI.: Welcome to the Artificial Intelligence Incident Database. Available from: https:\/\/incidentdatabase.ai\/ (2025)"},{"key":"914_CR129","doi-asserted-by":"crossref","unstructured":"Wahle, J.P., Ruas, T., Mohammad, S.M., Meuschke, N., Gipp, B.: AI usage cards: responsibly reporting AI-generated content. In: ACM\/IEEE Joint Conference on Digital Libraries (JCDL), pp. 282\u2013284 (2023)","DOI":"10.1109\/JCDL57899.2023.00060"},{"key":"914_CR130","unstructured":"at\u00a0Northwestern\u00a0University CJL.: Algorithm Tips - Resources and leads for investigating algorithms in society. (2019). Available from: http:\/\/algorithmtips.org\/"},{"issue":"4","key":"914_CR131","doi-asserted-by":"publisher","first-page":"543","DOI":"10.1007\/s13347-017-0263-5","volume":"31","author":"R Binns","year":"2018","unstructured":"Binns, R.: Algorithmic accountability and public reason. Philos. Technol. 31(4), 543\u2013556 (2018). https:\/\/doi.org\/10.1007\/s13347-017-0263-5","journal-title":"Philos. Technol."},{"key":"914_CR132","unstructured":"Institute AN.: Algorithmic Accountability Policy Toolkit. (2018). Available from: https:\/\/ainowinstitute.org\/publication\/algorithmic-accountability-policy-toolkit"},{"key":"914_CR133","doi-asserted-by":"publisher","unstructured":"Diakopoulos, N.:. Algorithmic Accountability. Digit. Journal. 3(3):398\u2013415. (2015) https:\/\/doi.org\/10.1080\/21670811.2014.976411","DOI":"10.1080\/21670811.2014.976411"},{"key":"914_CR134","unstructured":"Of\u00a0Canada G.: Algorithmic Impact Assessment tool. (2020). Available from: https:\/\/www.canada.ca\/en\/government\/system\/digital-government\/digital-government-innovations\/responsible-use-ai\/algorithmic-impact-assessment.html"},{"key":"914_CR135","doi-asserted-by":"crossref","unstructured":"Datta, A., Sen, S., Zick, Y.: Algorithmic transparency via quantitative input influence: theory and experiments with learning systems. In: IEEE Symposium on Security and Privacy (SP); pp. 598\u2013617 (2016)","DOI":"10.1109\/SP.2016.42"},{"issue":"181","key":"914_CR136","first-page":"1","volume":"22","author":"J Klaise","year":"2021","unstructured":"Klaise, J., Looveren, A.V., Vacanti, G., Coca, A.: Alibi Explain: Algorithms for Explaining Machine Learning Models. J. Mach. Learn. Res. 22(181), 1\u20137 (2021)","journal-title":"J. Mach. Learn. Res."},{"key":"914_CR137","doi-asserted-by":"crossref","unstructured":"Hardt, M., Chen, X., Cheng, X., Donini, M., Gelman, J., Gollaprolu, S., et\u00a0al.: Amazon SageMaker clarify: machine learning bias detection and explainability in the cloud. In: 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining. KDD \u201921. New York, NY, USA: Association for Computing Machinery, pp. 2974\u20132983 (2021)","DOI":"10.1145\/3447548.3467177"},{"issue":"3","key":"914_CR138","doi-asserted-by":"publisher","first-page":"667","DOI":"10.1007\/s11948-015-9724-3","volume":"22","author":"I Van de Poel","year":"2016","unstructured":"Van de Poel, I.: An ethical framework for evaluating experimental technology. Sci. Eng. Ethics 22(3), 667\u2013686 (2016). https:\/\/doi.org\/10.1007\/s11948-015-9724-3","journal-title":"Sci. Eng. Ethics"},{"key":"914_CR139","unstructured":"Vallor, S., Green, B., Raicu, I.: An Ethical Toolkit for Engineering\/Design Practice \u2013 The Markkula Center for Applied Ethics at Santa Clara University. Available from: https:\/\/www.scu.edu\/ethics-in-technology-practice\/ethical-toolkit\/ (2018)"},{"key":"914_CR140","doi-asserted-by":"crossref","unstructured":"Bluemke, D.A., Moy, L., Bredella, M.A., Ertl-Wagner, B.B., Fowler, K.J., Goh, V.J., et\u00a0al.: Assessing radiology research on artificial intelligence: A brief guide for authors, reviewers, and readers \u2013 from the radiology editorial board. Radiol. Soc. N. Am. (2020)","DOI":"10.1148\/radiol.2019192515"},{"key":"914_CR141","unstructured":"team\u00a0at pymetrics DS.: Audit-AI: Open Sourced Bias Testing for Generalized Machine Learning Applications. Available from: https:\/\/github.com\/pymetrics\/audit-ai (2019)"},{"key":"914_CR142","unstructured":"Group AAR, of\u00a0Computer Sciences\u00a0at Northeastern\u00a0University KC.: Auditing Algorithms @ Northeastern. Available from: https:\/\/personalization.ccs.neu.edu\/ (2025)"},{"key":"914_CR143","unstructured":"Sandvig, C., Hamilton, K., Karahalios, K., Langbort, C.: Auditing algorithms: research methods for detecting discrimination on internet platforms. In: Data and discrimination: converting critical concerns into productive inquiry, pp. 4349\u20134357 (2014)"},{"key":"914_CR144","doi-asserted-by":"crossref","unstructured":"Papakyriakopoulos, O., Choi, A.S.G., Thong, W., Zhao, D., Andrews, J., Bourke, R., et\u00a0al.: Augmented datasheets for speech datasets and ethical decision-making. In: ACM Conference on Fairness, Accountability, and Transparency. FAccT \u201923. New York, NY, USA: Association for Computing Machinery, pp. 881\u2013904 (2023)","DOI":"10.1145\/3593013.3594049"},{"key":"914_CR145","doi-asserted-by":"publisher","first-page":"4902","DOI":"10.18653\/v1\/2020.acl-main.442","volume-title":"58th Annual Meeting of the Association for Computational Linguistics","author":"MT Ribeiro","year":"2020","unstructured":"Ribeiro, M.T., Wu, T., Guestrin, C., Singh, S.: Beyond Accuracy: Behavioral Testing of NLP Models with CheckList. In: Jurafsky, D., Chai, J., Schluter, N., Tetreault, J. (eds.) 58th Annual Meeting of the Association for Computational Linguistics, pp. 4902\u20134912. Association for Computational Linguistics, Online (2020)"},{"key":"914_CR146","unstructured":"Sandler, R., Basl, J., Tiell, S.: Building Data and Artificial Intelligence Ethics Committees \u2013 Accenture. Accenture and Ethics Institute at Northeastern University. Available from: https:\/\/cssh.northeastern.edu\/ethics\/wp-content\/uploads\/sites\/31\/2021\/08\/Building_Data_and_AI_Ethics_Committees.pdf (2019)"},{"key":"914_CR147","unstructured":"Captum.: Captum - Model Interpretability for PyTorch. Available from: https:\/\/github.com\/pymetrics\/audit-ai (2019)"},{"key":"914_CR148","unstructured":"FROG.: Cards for Humanity. Available from: https:\/\/cardsforhumanity.frog.co\/ (2020)"},{"key":"914_CR149","doi-asserted-by":"crossref","unstructured":"Feldman, M., Friedler, S.A., Moeller, J., Scheidegger, C., Venkatasubramanian, S.: Certifying and removing disparate impact. In: 21th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining. KDD \u201915. New York, NY, USA: Association for Computing Machinery, pp. 259\u2013268 (2015)","DOI":"10.1145\/2783258.2783311"},{"key":"914_CR150","doi-asserted-by":"publisher","unstructured":"Ehsan, U., Saha, K., De\u00a0Choudhury, M., Riedl, M.O.: Charting the Sociotechnical Gap in Explainable AI: A Framework to Address the Gap in XAI. ACM Transactions on Computer-Human Interaction. (2023) Apr;7(CSCW1). (2023) https:\/\/doi.org\/10.1145\/3579467","DOI":"10.1145\/3579467"},{"key":"914_CR151","unstructured":"Papernot, N., Faghri, F., Carlini, N., Goodfellow, I., Feinman, R., Kurakin, A., et\u00a0al.: Technical Report on the CleverHans v2.1.0 Adversarial Examples Library. arXiv:1610.00768. (2018)"},{"key":"914_CR152","doi-asserted-by":"crossref","unstructured":"Madaio, M.A., Stark, L., Wortman\u00a0Vaughan, J., Wallach, H.: Co-designing checklists to understand organizational challenges and opportunities around fairness in AI. In: CHI Conference on Human Factors in Computing Systems. CHI \u201920. New York, NY, USA: Association for Computing Machinery, pp. 1\u201314 (2020)","DOI":"10.1145\/3313831.3376445"},{"key":"914_CR153","doi-asserted-by":"publisher","unstructured":"Wachter, S., Mittelstadt, B., Russell, C.: Counterfactual Explanations without Opening the Black Box: Automated Decisions and the GDPR. arXiv:1711.00399. https:\/\/doi.org\/10.48550\/arXiv.1711.00399 (2018)","DOI":"10.48550\/arXiv.1711.00399"},{"key":"914_CR154","unstructured":"Kusner, M.J., Loftus, J., Russell, C., Silva, R.: Counterfactual fairness. In: Guyon I, Luxburg UV, Bengio S, Wallach H, Fergus R, Vishwanathan S, et\u00a0al., (eds). Advances in Neural Information Processing Systems. vol.\u00a030. Curran Associates, Inc.; (2017)"},{"key":"914_CR155","unstructured":"CISA.: Cyber Resilience Review. Available from: https:\/\/www.cisa.gov\/resources-tools\/services\/cyber-resilience-review-crr (2025)"},{"key":"914_CR156","doi-asserted-by":"crossref","unstructured":"Pushkarna, M., Zaldivar, A., Kjartansson, O.: Data Cards: purposeful and transparent dataset documentation for responsible AI. In: ACM Conference on Fairness, Accountability, and Transparency. FAccT \u201922. New York, NY, USA: Association for Computing Machinery, pp. 1776\u20131826 (2022)","DOI":"10.1145\/3531146.3533231"},{"key":"914_CR157","unstructured":"Institute OD.: Data Ethics Canvas user guide V2. Available from: https:\/\/docs.google.com\/document\/d\/1MkvoAP86CwimbBD0dxySVCO0zeVOput_bu1A6kHV73M\/edit?tab=t.0#heading=h.n0a7uoc7jz (2018)"},{"key":"914_CR158","unstructured":"UK G.: Data Ethics Framework: Legislation and Codes of Practice for Use of Data. Available from: https:\/\/www.gov.uk\/government\/publications\/data-ethics-framework\/data-ethics-framework-legislation-and-codes-of-practice-for-use-of-data (2020)"},{"key":"914_CR159","unstructured":"Academy, B.: the Royal\u00a0Society.: Data Management and Use: Governance in the 21st Century. The British Academy and the Royal Society London. Available from: https:\/\/royalsociety.org\/~\/media\/policy\/projects\/data-governance\/data-management-governance.pdf (2017)"},{"key":"914_CR160","doi-asserted-by":"crossref","unstructured":"Antignac, T., Sands, D., Schneider, G.: Data minimisation: a language-based approach. In: Martinelli, F., De Capitani di Vimercati, S. (eds.) ICT Systems Security and Privacy Protection, pp. 442\u2013456. Springer, Cham (2017)","DOI":"10.1007\/978-3-319-58469-0_30"},{"key":"914_CR161","doi-asserted-by":"publisher","unstructured":"McMillan-Major, A., Bender, E.M., Friedman, B.: Data statements: from technical concept to community practice. ACM J Responsib Comput. 1(1). (2024) https:\/\/doi.org\/10.1145\/3594737","DOI":"10.1145\/3594737"},{"key":"914_CR162","unstructured":"Data\u00a0School UU.: Data Ethics Framework: legislation and codes of practice for use of data. Available from: https:\/\/deda.dataschool.nl\/ (2017)"},{"key":"914_CR163","doi-asserted-by":"publisher","unstructured":"Simonyan, K., Vedaldi, A., Zisserman, A.: Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps. arXiv:1312.6034. https:\/\/doi.org\/10.48550\/arXiv.1312.6034 (2014)","DOI":"10.48550\/arXiv.1312.6034"},{"key":"914_CR164","doi-asserted-by":"crossref","unstructured":"Li, O., Liu, H., Chen, C., Rudin, C.: Deep learning for case-based reasoning through prototypes: A neural network that explains its predictions. In: AAAI Conference on Artificial Intelligence. vol.\u00a032; (2018)","DOI":"10.1609\/aaai.v32i1.11771"},{"key":"914_CR165","unstructured":"DeepExplain.: DeepExplain. Available from: https:\/\/edwinwenink.github.io\/ai-ethics-tool-landscape\/tools\/deepexplain\/ (2017)"},{"key":"914_CR166","unstructured":"DeepLIFT.: DeepLIFT. Available from: https:\/\/edwinwenink.github.io\/ai-ethics-tool-landscape\/tools\/deeplift\/ (2017)"},{"key":"914_CR167","unstructured":"Deon.: An ethics checklist for data scientists. Available from: https:\/\/deon.drivendata.org\/ (2025)"},{"key":"914_CR168","unstructured":"Ethically, D.: Design Ethically Toolkit. Available from: https:\/\/www.designethically.com\/ (2023)"},{"key":"914_CR169","doi-asserted-by":"publisher","unstructured":"Peters, D., Calvo, R.A., Ryan, R.M.: Designing for motivation, engagement and wellbeing in digital experience. Front. Psychol. 9-201 (2018) https:\/\/doi.org\/10.3389\/fpsyg.2018.00797","DOI":"10.3389\/fpsyg.2018.00797"},{"key":"914_CR170","doi-asserted-by":"crossref","unstructured":"Suphakul, T., Senivongse, T.: Development of privacy design patterns based on privacy principles and UML. In: 18th IEEE\/ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel\/Distributed Computing (SNPD), pp. 369\u2013375 (2017)","DOI":"10.1109\/SNPD.2017.8022748"},{"key":"914_CR171","doi-asserted-by":"crossref","unstructured":"Shang, X., Peng, Z., Yuan, Q., Khan, S., Xie, L., Fang, Y., et\u00a0al.: DIANES: A DEI audit toolkit for news sources. In: 45th International ACM SIGIR Conference on Research and Development in Information Retrieval. SIGIR \u201922. New York, NY, USA: Association for Computing Machinery, pp. 3312\u20133317 (2022)","DOI":"10.1145\/3477495.3531660"},{"key":"914_CR172","doi-asserted-by":"crossref","unstructured":"Mothilal, R.K., Sharma, A., Tan, C.: Explaining machine learning classifiers through diverse counterfactual explanations. In: Conference on Fairness, Accountability, and Transparency, pp. 607\u2013617. Available from: https:\/\/github.com\/interpretml\/DiCE (2020)","DOI":"10.1145\/3351095.3372850"},{"key":"914_CR173","unstructured":"Lab DCS.: Digital Impact Toolkit. Available from: https:\/\/digitalimpact.io\/toolkit\/ (2025)"},{"key":"914_CR174","unstructured":"ELI5.: ELI5. Available from: https:\/\/edwinwenink.github.io\/ai-ethics-tool-landscape\/tools\/eli5\/ (2016)"},{"key":"914_CR175","unstructured":"SAS, E.: Ellpha. Available from: https:\/\/ellpha.ai\/(2025)"},{"key":"914_CR176","unstructured":"Forum, W.E.: Empowering AI Leadership. Available from: https:\/\/new.express.adobe.com\/page\/RsXNkZANwMLEf\/ (2025)"},{"key":"914_CR177","unstructured":"Forum, W.E.: Empowering AI Leadership: AI C-Suite Toolkit. Available from: https:\/\/www.weforum.org\/publications\/empowering-ai-leadership-ai-c-suite-toolkit\/(2022)"},{"key":"914_CR178","doi-asserted-by":"crossref","unstructured":"Shrobe, H., Shrier, D.L., Pentland, A.: In: Enigma: Decentralized Computation Platform with Guaranteed Privacy. MIT Press, pp. 425\u2013454 (2018)","DOI":"10.7551\/mitpress\/11636.003.0018"},{"key":"914_CR179","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijinfomgt.2021.102433","volume":"62","author":"M Ashok","year":"2022","unstructured":"Ashok, M., Madan, R., Joha, A., Sivarajah, U.: Ethical framework for Artificial Intelligence and Digital technologies. Int. J. Inf. Manage. 62, 102433 (2022). https:\/\/doi.org\/10.1016\/j.ijinfomgt.2021.102433","journal-title":"Int. J. Inf. Manage."},{"key":"914_CR180","doi-asserted-by":"crossref","unstructured":"Agbese, M., Mohanani, R., Khan, A.A., Abrahamsson, P.: Ethical Requirements Stack: A framework for implementing ethical requirements of AI in software engineering practices. In: 27th International Conference on Evaluation and Assessment in Software Engineering. EASE \u201923. New York, NY, USA: Association for Computing Machinery, pp. 326\u2013328 (2023)","DOI":"10.1145\/3593434.3593489"},{"key":"914_CR181","doi-asserted-by":"crossref","unstructured":"Shahriari, K., Shahriari, M.: IEEE standard review\u2014Ethically aligned design: a vision for prioritizing human wellbeing with artificial intelligence and autonomous systems. In: IEEE Canada International Humanitarian Technology Conference (IHTC). IEEE; p. 197\u2013201. Available from: https:\/\/standards.ieee.org\/wp-content\/uploads\/import\/documents\/other\/ead_v2.pdf (2017)","DOI":"10.1109\/IHTC.2017.8058187"},{"key":"914_CR182","unstructured":"Anderson, D., Bonaguro, J., McKinney, M., Nicklin, A., Wiseman, J.: Ethics & Algorithms Toolkit. Available from: https:\/\/ethicstoolkit.ai\/ (2025)"},{"issue":"3","key":"914_CR183","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1109\/MSP.2018.2701164","volume":"16","author":"BC Stahl","year":"2018","unstructured":"Stahl, B.C., Wright, D.: Ethics and privacy in AI and big data: implementing responsible research and innovation. IEEE Secur. Priv. 16(3), 26\u201333 (2018). https:\/\/doi.org\/10.1109\/MSP.2018.2701164","journal-title":"IEEE Secur. Priv."},{"key":"914_CR184","unstructured":"Hesketh, P.: Ethics Cards. Available from: http:\/\/ethicskit.org\/ethics-cards.html (2025)"},{"key":"914_CR185","unstructured":"Ethics Kit.: Ethics Kit Tools. Available from: http:\/\/ethicskit.org\/tools.html (2025)"},{"key":"914_CR186","unstructured":"EthicsNet.: EthicsNet. Available from: https:\/\/www.ethicsnet.org\/about (2025)"},{"key":"914_CR187","doi-asserted-by":"crossref","unstructured":"Sokol, K., Flach, P.: Explainability fact sheets: a framework for systematic assessment of explainable approaches. In: Conference on Fairness, Accountability, and Transparency. FAT* \u201920. New York, NY, USA: Association for Computing Machinery, pp. 56\u201367 (2020)","DOI":"10.1145\/3351095.3372870"},{"key":"914_CR188","unstructured":"Oxborough, C., Cameron, E., Rao, A., Birchall, A., Townsend, A., Westermann, C.: Explainable AI: Driving business value through greater understanding. Retrieved from PWC website: https:\/\/www.pwc.co.uk\/audit-assurance\/assets\/explainable-ai.pdf (2018)"},{"issue":"4","key":"914_CR189","doi-asserted-by":"publisher","first-page":"611","DOI":"10.1007\/s13347-017-0279-x","volume":"31","author":"B Lepri","year":"2018","unstructured":"Lepri, B., Oliver, N., Letouz\u00e9, E., Pentland, A., Vinck, P.: Fair, transparent, and accountable algorithmic decision-making processes: the premise, the proposed solutions, and the open challenges. Philos. Technol. 31(4), 611\u2013627 (2018). https:\/\/doi.org\/10.1007\/s13347-017-0279-x","journal-title":"Philos. Technol."},{"key":"914_CR190","unstructured":"Bird, S., Dud\u00edk, M., Edgar, R., Horn, B., Lutz, R., Milan, V., et\u00a0al.: Fairlearn: A toolkit for assessing and improving fairness in AI. Microsoft, Tech Rep MSR-TR-2020-32. (2020)"},{"key":"914_CR191","unstructured":"Zafar, M.B., Valera, I., Rogriguez, M.G., Gummadi, K.P.: Fairness Constraints: Mechanisms for Fair Classification. In: Singh A, Zhu J, editors. 20th International Conference on Artificial Intelligence and Statistics. vol.\u00a054 of Proceedings of Machine Learning Research. PMLR, pp. 962\u2013970 (2017)"},{"key":"914_CR192","unstructured":"Zafar, M.B., Valera, I., Rodriguez, M.G., Gummadi, K., Weller, A.: Fairness in Classification. Available from: https:\/\/github.com\/mbilalzafar\/fair-classification (2016)"},{"issue":"1","key":"914_CR193","doi-asserted-by":"publisher","first-page":"267","DOI":"10.1007\/s43681-022-00147-7","volume":"3","author":"A Agarwal","year":"2023","unstructured":"Agarwal, A., Agarwal, H., Agarwal, N.: Fairness Score and process standardization: framework for fairness certification in artificial intelligence systems. AI Ethics 3(1), 267\u2013279 (2023). https:\/\/doi.org\/10.1007\/s43681-022-00147-7","journal-title":"AI Ethics"},{"key":"914_CR194","doi-asserted-by":"publisher","unstructured":"Rauber, J., Brendel, W., Bethge, M.: Foolbox: A Python toolbox to benchmark the robustness of machine learning models. arXiv:1707.04131. 20 https:\/\/doi.org\/10.48550\/arXiv.1707.04131","DOI":"10.48550\/arXiv.1707.04131"},{"key":"914_CR195","unstructured":"Institute OR.: Foresight into AI Ethics (FAIE): A toolkit for creating an ethics roadmap for your AI project. Available from: https:\/\/openroboethics.org\/wp-content\/uploads\/2021\/07\/ORI-Foresight-into-Artificial-Intelligence-Ethics-Launch-V1.pdf (2019)"},{"key":"914_CR196","doi-asserted-by":"crossref","unstructured":"Sharma, V., Mishra, N., Kukreja, V., Alkhayyat, A., Elngar, A.A.: Framework for evaluating ethics in AI. In: International Conference on Innovative Data Communication Technologies and Application (ICIDCA), pp. 307\u2013312 (2023)","DOI":"10.1109\/ICIDCA56705.2023.10099747"},{"key":"914_CR197","unstructured":"Glenn, J.: Futures Wheel. Available from: http:\/\/ethicskit.org\/futures-wheel.html (2025)"},{"key":"914_CR198","unstructured":"Joshi, C., Kaloskampis, I., Nolan, L.: Generative adversarial networks (GANs) for synthetic dataset generation with binary classes. Data Science Campus, 21 (2019)"},{"key":"914_CR199","doi-asserted-by":"crossref","unstructured":"Sokol, K., Flach, P.: Glass-Box: explaining AI decisions with counterfactual statements through conversation with a voice-enabled virtual assistant. In: 27th International Joint Conference on Artificial Intelligence, IJCAI-18. International Joint Conferences on Artificial Intelligence Organization, pp. 5868\u20135870 (2018)","DOI":"10.24963\/ijcai.2018\/865"},{"key":"914_CR200","unstructured":"Officer, I.: Guide to the UK general data protection regulation (UK GDPR). Available from: https:\/\/ico.org.uk\/for-organisations\/uk-gdpr-guidance-and-resources\/ (2025)"},{"key":"914_CR201","unstructured":"H2O ai.: H2O.ai Machine Learning Interpretability Resources. Available from: https:\/\/github.com\/h2oai\/mli-resources (2017)"},{"key":"914_CR202","unstructured":"Microsoft.: HAX Workbook. Available from: https:\/\/www.microsoft.com\/en-us\/haxtoolkit\/workbook\/ (2025)"},{"key":"914_CR203","unstructured":"Hazy.: Hazy. Available from: https:\/\/hazy.com\/ (2025)"},{"key":"914_CR204","unstructured":"Adams, L., Burall, S.: How to stimulate effective public engagement on the ethics of artificial intelligence. Involve: London. Available from: https:\/\/www.involve.org.uk\/sites\/default\/files\/uploads\/docuemnt\/How%20to%20stimulate%20effective%20public%20debate%20on%20the%20ethics%20of%20artificial%20intelligence%20.pdf (2019)"},{"issue":"1","key":"914_CR205","doi-asserted-by":"publisher","first-page":"237","DOI":"10.1093\/qje\/qjx032","volume":"133","author":"J Kleinberg","year":"2017","unstructured":"Kleinberg, J., Lakkaraju, H., Leskovec, J., Ludwig, J., Mullainathan, S.: Human decisions and machine predictions. Q. J. Econ. 133(1), 237\u2013293 (2017). https:\/\/doi.org\/10.1093\/qje\/qjx032","journal-title":"Q. J. Econ."},{"key":"914_CR206","unstructured":"Studio IW.: IBM Watson OpenScale. Available from: https:\/\/www.ibm.com\/uk-en\/cloud\/watson-openscale (2025)"},{"key":"914_CR207","unstructured":") Graham, C.: Anonymisation: Managing Data Protection Risk Code of Practice. Information Commissioner\u2019s Office. (2012)"},{"key":"914_CR208","unstructured":"Office IC.: Guide to the General Data Protection Regulation (GDPR). Available from: https:\/\/ico.org.uk\/for-organisations\/guide-to-data-protection\/guide-to-the-general-data-protection-regulation-gdpr\/ (2025)"},{"key":"914_CR209","unstructured":"Olszewska, J.I., Systems, Committee SES, et\u00a0al.: IEEE standard model process for addressing ethical concerns during system design: IEEE Standard 7000-2021. IEEE. (2021)"},{"key":"914_CR210","unstructured":"IEEE.: IEEE Recommended Practice for Assessing the Impact of Autonomous and Intelligent Systems on Human Well-Being. Available from: https:\/\/standards.ieee.org\/industry-connections\/ec\/autonomous-systems.html (2022)"},{"key":"914_CR211","unstructured":"IEEE.: IEEE Ethics Certification Program for Autonomous and Intelligent Systems (ECPAIS). Available from: https:\/\/standards.ieee.org\/industry-connections\/ecpais.html (2020)"},{"issue":"1","key":"914_CR212","doi-asserted-by":"publisher","DOI":"10.1088\/1757-899X\/428\/1\/012049","volume":"428","author":"WW Zhao","year":"2018","unstructured":"Zhao, W.W.: Improving social responsibility of artificial intelligence by using ISO 26000. IOP Conf. Ser.: Mater. Sci. Eng. 428(1), 012049 (2018). https:\/\/doi.org\/10.1088\/1757-899X\/428\/1\/012049","journal-title":"IOP Conf. Ser.: Mater. Sci. Eng."},{"key":"914_CR213","doi-asserted-by":"crossref","unstructured":"Crisan, A., Drouhard, M., Vig, J., Rajani, N.: Interactive Model Cards: A Human-Centered Approach to Model Documentation. In: ACM Conference on Fairness, Accountability, and Transparency. FAccT \u201922. New York, NY, USA: Association for Computing Machinery, pp. 427\u2013439 (2022)","DOI":"10.1145\/3531146.3533108"},{"key":"914_CR214","unstructured":"Nori, H., Jenkins, S., Koch, P., Caruana, R.: InterpretML: A Unified Framework for Machine Learning Interpretability. arXiv preprint arXiv:1909.09223. (2019)"},{"key":"914_CR215","unstructured":"Madras, D., Creager, E., Pitassi, T., Zemel, R.: Learning adversarially fair and transferable representations. In: Dy, J., Krause, A. (eds.) 35th International Conference on Machine Learning. vol.\u00a080 of Proceedings of Machine Learning Research. PMLR, pp. 3384\u20133393. Available from: https:\/\/proceedings.mlr.press\/v80\/madras18a.html (2018)"},{"key":"914_CR216","unstructured":"Shrikumar, A., Greenside, P., Kundaje, A.: Learning Important Features Through Propagating Activation Differences. In: Precup, D., Teh, Y.W. (eds.) 34th International Conference on Machine Learning, vol.\u00a070 of Proceedings of Machine Learning Research. PMLR, pp. 3145\u20133153 (2017)"},{"key":"914_CR217","unstructured":"Johansso, F., Shalit, U., Sontag, D.: Learning Representations for Counterfactual Inference. In: Balcan, M.F., Weinberger, K.Q. (eds.) 33rd International Conference on Machine Learning. vol.\u00a048 of Proceedings of Machine Learning Research. New York, New York, USA: PMLR, pp. 3020\u20133029. Available from: https:\/\/proceedings.mlr.press\/v48\/johansson16.html (2016)"},{"key":"914_CR218","unstructured":"Bolukbasi, T., Chang, K.W., Zou, J.Y., Saligrama, V., Kalai, A.T.: Man is to Computer Programmer as Woman is to Homemaker? Debiasing Word Embeddings. Adv. Neural Inf. Process. Syst. 29 (2016)"},{"key":"914_CR219","doi-asserted-by":"publisher","unstructured":"Hoffman, R.R., Mueller, S.T., Klein, G., Litman, J.: Metrics for Explainable AI: Challenges and Prospects. arXiv:1812.04608. (2019) https:\/\/doi.org\/10.48550\/arXiv.1812.04608","DOI":"10.48550\/arXiv.1812.04608"},{"key":"914_CR220","unstructured":"Microsoft.: Microsoft\u2019s framework for building AI systems responsibly. Available from: https:\/\/blogs.microsoft.com\/on-the-issues\/2022\/06\/21\/microsofts-framework-for-building-ai-systems-responsibly\/?ref=blog.salesforceairesearch.com (2022)"},{"key":"914_CR221","unstructured":"Foundation IA.: Model Ethical Data Impact Assessment. Available from: https:\/\/www.informationaccountability.org\/publications (2019)"},{"key":"914_CR222","doi-asserted-by":"publisher","unstructured":"Bassily, R., Thakkar, O., Thakurta, A.: Model-Agnostic Private Learning via Stability. arXiv:1803.05101. (2018) https:\/\/doi.org\/10.48550\/arXiv.1803.05101","DOI":"10.48550\/arXiv.1803.05101"},{"key":"914_CR223","unstructured":"Machine, M.: Moral Machine. Available from: https:\/\/www.moralmachine.net\/ (2025)"},{"issue":"6","key":"914_CR224","doi-asserted-by":"publisher","first-page":"2589","DOI":"10.1109\/tai.2023.3345805","volume":"5","author":"N Seedat","year":"2024","unstructured":"Seedat, N., Imrie, F., Mvd, Schaar: Navigating data-centric artificial intelligence with DC-Check: advances, challenges, and opportunities. IEEE Trans. Artif. Intell. 5(6), 2589\u20132603 (2024). https:\/\/doi.org\/10.1109\/tai.2023.3345805","journal-title":"IEEE Trans. Artif. Intell."},{"key":"914_CR225","unstructured":"Group, K.: Neural network exchange format. Available from: https:\/\/www.khronos.org\/nnef#:%7E:text=Neural%20Network%20Exchange%20Format%20%20(NNEF,range%20of%20devices%20and%20platforms (2025)"},{"key":"914_CR226","unstructured":"Kit, E.: New Economy Impact Model. Available from: http:\/\/ethicskit.org\/downloads\/economy-impact-model.pdf (2025)"},{"issue":"7","key":"914_CR227","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1371\/journal.pone.0130140","volume":"10","author":"S Bach","year":"2015","unstructured":"Bach, S., Binder, A., Montavon, G., Klauschen, F., M\u00fcller, K.R., Samek, W.: On pixel-wise explanations for non-linear classifier decisions by layer-wise relevance propagation. PLoS ONE 10(7), 1\u201346 (2015). https:\/\/doi.org\/10.1371\/journal.pone.0130140","journal-title":"PLoS ONE"},{"key":"914_CR228","unstructured":"ONNX.: Open Neural Network Exchange. Available from: https:\/\/github.com\/onnx (2017)"},{"key":"914_CR229","unstructured":"Foundation, O.: OpenMined. Available from: https:\/\/openmined.org\/ (2025)"},{"key":"914_CR230","unstructured":"The Open Machine Learning Foundation.: OpenML. Available from: https:\/\/www.openml.org\/about (2025)"},{"key":"914_CR231","doi-asserted-by":"publisher","unstructured":"Goldstein, A., Kapelner, A., Bleich, J.: EP. Peeking inside the black box: visualizing statistical learning with plots of individual conditional expectation. J. Comput. Gr. Stat. 24(1):44\u201365. (2015) https:\/\/doi.org\/10.1080\/10618600.2014.907095","DOI":"10.1080\/10618600.2014.907095"},{"key":"914_CR232","unstructured":"PAIR G.: People + AI Research Guidebook. Available from: https:\/\/pair.withgoogle.com\/guidebook\/ (2019)"},{"key":"914_CR233","unstructured":"Orcutt, M.: Personal AI privacy watchdog could help you regain control of your data. MIT Technology Review. Available from: https:\/\/www.technologyreview.com\/s\/607830\/personal-ai-privacy-watchdog-could-help-you-regain-control-of-your-data\/ (2017)"},{"key":"914_CR234","doi-asserted-by":"crossref","unstructured":"Guazzelli, A., Zeller, M., Lin, W.C., Williams, G.: PMML: An open standard for sharing models. R Journal. (2009)","DOI":"10.32614\/RJ-2009-010"},{"key":"914_CR235","doi-asserted-by":"publisher","first-page":"305","DOI":"10.1007\/s10515-014-0168-9","volume":"23","author":"LA Dennis","year":"2016","unstructured":"Dennis, L.A., Fisher, M., Lincoln, N.K., Lisitsa, A., Veres, S.M.: Practical verification of decision-making in agent-based autonomous systems. Autom. Softw. Eng. 23, 305\u2013359 (2016). https:\/\/doi.org\/10.1007\/s10515-014-0168-9","journal-title":"Autom. Softw. Eng."},{"key":"914_CR236","unstructured":"Diakopoulos, N., Friedler, S., Arenas, M., Barocas, S., Hay, M., Howe, B. et\u00a0al.: Principles for Accountable Algorithms and a Social Impact Statement for Algorithms. Available from: https:\/\/www.fatml.org\/resources\/principles-for-accountable-algorithms (2025)"},{"key":"914_CR237","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1007\/s12394-010-0053-z","volume":"3","author":"A Cavoukian","year":"2010","unstructured":"Cavoukian, A., Taylor, S., Abrams, M.E.: Privacy by Design: essential for organizational accountability and strong business practices. Identity Inf. Soc. 3, 405\u2013413 (2010). https:\/\/doi.org\/10.1007\/s12394-010-0053-z","journal-title":"Identity Inf. Soc."},{"key":"914_CR238","doi-asserted-by":"publisher","first-page":"219","DOI":"10.1007\/978-3-319-22906-5_17","volume-title":"Trust, Privacy and Security in Digital Business","author":"EL Makri","year":"2015","unstructured":"Makri, E.L., Lambrinoudakis, C.: Privacy principles: towards a common privacy audit methodology. In: Fischer-H\u00fcbner, S., Lambrinoudakis, C., L\u00f3pez, J. (eds.) Trust, Privacy and Security in Digital Business, pp. 219\u2013234. Springer, Cham (2015)"},{"key":"914_CR239","doi-asserted-by":"crossref","unstructured":"Souza, R., Azevedo, L., Louren\u00e7o, V., Soares, E., Thiago, R., Brand\u00e3o, R., et\u00a0al.: Provenance data in the machine learning lifecycle in computational science and engineering. In: IEEE\/ACM Workflows in Support of Large-Scale Science (WORKS), pp. 1\u201310 (2019)","DOI":"10.1109\/WORKS49585.2019.00006"},{"key":"914_CR240","unstructured":"Overdorf, R., Kulynych, B., Balsa, E., Troncoso, C., G\u00fcrses, S.: Questioning the assumptions behind fairness solutions. arXiv:1811.11293 (2018)"},{"key":"914_CR241","unstructured":"RAIL.: Responsible AI Licenses (RAIL). Available from: https:\/\/www.licenses.ai\/ai-licenses (2025)"},{"key":"914_CR242","unstructured":"AI MR.: Responsible AI Toolbox. Available from: https:\/\/responsibleaitoolbox.ai\/ (2022)"},{"key":"914_CR243","unstructured":"TensorFlow.: Responsible AI Toolkit. Available from: https:\/\/www.tensorflow.org\/responsible_ai (2025)"},{"key":"914_CR244","doi-asserted-by":"publisher","unstructured":"Sandoval, J.C.B., de\u00a0Santana, V.F., Berger, S., Quigley, L.T., Hobson, S.: Responsible and Inclusive Technology Framework: A Formative Framework to Promote Societal Considerations in Information Technology Contexts. arXiv:2302.11565. (2023) https:\/\/doi.org\/10.48550\/arXiv.2302.11565","DOI":"10.48550\/arXiv.2302.11565"},{"key":"914_CR245","doi-asserted-by":"publisher","DOI":"10.1016\/j.techfore.2022.122306","volume":"188","author":"RK Behera","year":"2023","unstructured":"Behera, R.K., Bala, P.K., Rana, N.P., Irani, Z.: Responsible natural language processing: a principlist framework for social benefits. Technol. Forecast. Soc. Chang. 188, 122306 (2023). https:\/\/doi.org\/10.1016\/j.techfore.2022.122306","journal-title":"Technol. Forecast. Soc. Chang."},{"key":"914_CR246","unstructured":"Pulse UG.: Risks, Harms and Benefits Assessment Tool. Available from: https:\/\/www.unglobalpulse.org\/document\/risks-harms-and-benefits-assessment-tool\/ (2018)"},{"key":"914_CR247","doi-asserted-by":"crossref","unstructured":"Dinan, E., Abercrombie, G., Bergman, S.A., Spruit, S., Hovy, D., Boureau, Y.L., et\u00a0al.: SafetyKit: first aid for measuring safety in open-domain conversational systems. In: 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics; Available from: https:\/\/iris.unibocconi.it\/bitstream\/11565\/4053224\/1\/2022.acl-long.284.pdf (2022)","DOI":"10.18653\/v1\/2022.acl-long.284"},{"key":"914_CR248","unstructured":"Papernot, N., Song, S., Mironov, I., Raghunathan, A., Talwar, K., Erlingsson, U.: Scalable Private Learning with PATE. In: International Conference on Learning Representations (2018)"},{"key":"914_CR249","unstructured":"Green, N., Procope, C., Cheema, A., Adediji, A.: System Cards, a new resource for understanding how AI systems work. Meta AI. (2021)"},{"key":"914_CR250","doi-asserted-by":"publisher","unstructured":"Zook, M., Barocas, S., boyd, d., Crawford, K., Keller, E., Gangadharan, S.P., et\u00a0al.: Ten simple rules for responsible big data research. PLOS Comput. Biol. Mar. 13(3):1\u201310. https:\/\/doi.org\/10.1371\/journal.pcbi.1005399","DOI":"10.1371\/journal.pcbi.1005399"},{"key":"914_CR251","unstructured":"Indicators, T.F.: Tensorflow\u2019s Fairness Evaluation and Visualization Toolkit. Available from: https:\/\/github.com\/tensorflow\/fairness-indicators (2020)"},{"key":"914_CR252","first-page":"72","volume-title":"2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing: System Demonstrations","author":"D Simig","year":"2022","unstructured":"Simig, D., Wang, T., Dankers, V., Henderson, P., Batsuren, K., Hupkes, D., et al.: Text Characterization Toolkit (TCT). In: Buntine, W., Liakata, M. (eds.) 2nd Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 12th International Joint Conference on Natural Language Processing: System Demonstrations, pp. 72\u201387. Association for Computational Linguistics, Taipei, Taiwan (2022)"},{"key":"914_CR253","doi-asserted-by":"publisher","first-page":"59","DOI":"10.1007\/s10676-018-9447-7","volume":"20","author":"T Arnold","year":"2018","unstructured":"Arnold, T., Scheutz, M.: The, big red button is too late: an alternative model for the ethical evaluation of AI systems. Ethics Inf. Technol. 20, 59\u201369 (2018). https:\/\/doi.org\/10.1007\/s10676-018-9447-7","journal-title":"Ethics Inf. Technol."},{"key":"914_CR254","unstructured":"Group CPS, the Tech Fairness\u00a0Coalition, the ACLU\u00a0of Washington.: The Algorithmic Equity Toolkit (AEKit). Available from: https:\/\/www.aclu-wa.org\/AEKit (2020)"},{"key":"914_CR255","doi-asserted-by":"publisher","unstructured":"Chmielinski, K.S., Newman, S., Taylor, M., Joseph, J., Thomas, K., Yurkofsky, J., et\u00a0al.: The Dataset Nutrition Label (2nd Gen): Leveraging Context to Mitigate Harms in Artificial Intelligence. arXiv:2201.03954. (2022) https:\/\/doi.org\/10.48550\/arXiv.2201.03954","DOI":"10.48550\/arXiv.2201.03954"},{"key":"914_CR256","unstructured":"org I.: The Field Guide to Human-Centered Design - Design Kit. Available from: https:\/\/www.designkit.org\/resources\/1.html (2015)"},{"issue":"2","key":"914_CR257","doi-asserted-by":"publisher","first-page":"257","DOI":"10.1016\/j.clsr.2018.01.004","volume":"34","author":"M Butterworth","year":"2018","unstructured":"Butterworth, M.: The ICO and artificial intelligence: the role of fairness in the GDPR framework. Comput. Law Secur. Rev. 34(2), 257\u2013268 (2018). https:\/\/doi.org\/10.1016\/j.clsr.2018.01.004","journal-title":"Comput. Law Secur. Rev."},{"key":"914_CR258","doi-asserted-by":"crossref","unstructured":"Vasudevan, S., Kenthapadi, K.: LiFT: A Scalable Framework for Measuring Fairness in ML Applications. In: 29th ACM International Conference on Information and Knowledge Management (CIKM); Available from: https:\/\/github.com\/linkedin\/lift (2020)","DOI":"10.1145\/3340531.3412705"},{"key":"914_CR259","unstructured":"Pineau, J.: The Machine Learning Reproducibility Checklist. Available from: https:\/\/www.cs.mcgill.ca\/~jpineau\/ReproducibilityChecklist.pdf (2020)"},{"key":"914_CR260","doi-asserted-by":"crossref","unstructured":"Shen, H., Wang, L., Deng, W.H., Brusse, C., Velgersdijk, R., Zhu, H.: The Model Card Authoring Toolkit: Toward Community-centered, Deliberation-driven AI Design. In: ACM Conference on Fairness, Accountability, and Transparency. FAccT \u201922. New York, NY, USA: Association for Computing Machinery, pp. 440\u2013451 (2022)","DOI":"10.1145\/3531146.3533110"},{"key":"914_CR261","unstructured":"Bates, A.G., \u0160pakulov\u00e1, I., Dove, I., Mealor, A.: for National\u00a0Statistics O.: ONS Methodology Working paper series number 16 - Synthetic Data Pilot. Available from: https:\/\/www.ons.gov.uk\/methodology\/methodologicalpublications\/generalmethodology\/onsworkingpaperseries\/onsmethodologyworkingpaperseriesnumber16syntheticdatapilot#toc (2025)"},{"key":"914_CR262","first-page":"1","volume":"89","author":"DK Citron","year":"2014","unstructured":"Citron, D.K., Pasquale, F.: The scored society: due process for automated predictions. Wash. Law Rev. 89, 1 (2014)","journal-title":"Wash. Law Rev."},{"key":"914_CR263","doi-asserted-by":"crossref","unstructured":"Lakkaraju, H., Kleinberg, J., Leskovec, J., Ludwig, J., Mullainathan, S.: The Selective Labels Problem: Evaluating Algorithmic Predictions in the Presence of Unobservables. In: 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining. New York, NY, USA: Association for Computing Machinery, pp. 275\u2013284 (2017)","DOI":"10.1145\/3097983.3098066"},{"key":"914_CR264","unstructured":"Tale, T.W.: Whole Tale Project. Available from: https:\/\/wholetale.org\/ (2025)"},{"issue":"2","key":"914_CR265","doi-asserted-by":"publisher","first-page":"555","DOI":"10.1007\/s43681-023-00292-7","volume":"4","author":"K Simbeck","year":"2024","unstructured":"Simbeck, K.: They shall be fair, transparent, and robust: auditing learning analytics systems. AI Ethics 4(2), 555\u201357 (2024). https:\/\/doi.org\/10.1007\/s43681-023-00292-7","journal-title":"AI Ethics"},{"key":"914_CR266","doi-asserted-by":"publisher","first-page":"277","DOI":"10.1007\/s10618-010-0190-x","volume":"21","author":"T Calders","year":"2010","unstructured":"Calders, T., Verwer, S.: Three naive bayes approaches for discrimination-free classification. Data Min. Knowl. Disc. 21, 277\u201329 (2010). https:\/\/doi.org\/10.1007\/s10618-010-0190-x","journal-title":"Data Min. Knowl. Disc."},{"key":"914_CR267","doi-asserted-by":"crossref","unstructured":"Katell, M., Young, M., Dailey, D., Herman, B., Guetler, V., Tam, A., et\u00a0al.: Toward situated interventions for algorithmic equity: Lessons from the field. In: Conference on Fairness, Accountability, and Transparency. FAT* \u201920. New York, NY, USA: Association for Computing Machinery, pp. 45\u201355 (2020)","DOI":"10.1145\/3351095.3372874"},{"key":"914_CR268","doi-asserted-by":"publisher","first-page":"161","DOI":"10.1007\/978-3-319-67280-9_9","volume-title":"Privacy Technologies and Policy","author":"M Alshammari","year":"2017","unstructured":"Alshammari, M., Simpson, A.: Towards a principled approach for engineering privacy by design. In: Schweighofer, E., Leitold, H., Mitrakas, A., Rannenberg, K. (eds.) Privacy Technologies and Policy, pp. 161\u2013177. Springer, Cham (2017)"},{"key":"914_CR269","doi-asserted-by":"crossref","unstructured":"Ancona, M., Ceolini, E., \u00d6ztireli, C., Gross, M.: Towards better understanding of gradient-based attribution methods for Deep Neural Networks. In: International Conference on Learning Representations (2018)","DOI":"10.1007\/978-3-030-28954-6_9"},{"key":"914_CR270","unstructured":"Security, U.P.: Uber Differential Privacy. Available from: https:\/\/medium.com\/uber-security-privacy\/differential-privacy-open-source-7892c82c42b6 (2017)"},{"key":"914_CR271","unstructured":"Lane, G., Angus, A., Murdoch, A.: UnBias Fairness Toolkit. Zenodo. (2018)"},{"key":"914_CR272","doi-asserted-by":"crossref","unstructured":"Leslie, D.: Understanding Artificial Intelligence Ethics and Safety: A Guide for the Responsible Design and Implementation of AI Systems in the Public Sector. Zenodo, (2019)","DOI":"10.2139\/ssrn.3403301"},{"key":"914_CR273","doi-asserted-by":"publisher","unstructured":"Spiekermann, S., Winkler, T.: Value-based Engineering for Ethics by Design. arXiv:2004.13676. (2020) https:\/\/doi.org\/10.48550\/arXiv.2004.13676","DOI":"10.48550\/arXiv.2004.13676"},{"key":"914_CR274","unstructured":"Biases, W.: The AI developer platform to build AI agents, applications, and models with confidence. Available from: https:\/\/wandb.ai\/site (2025)"},{"key":"914_CR275","unstructured":"Data, W.: A new method for ethical data science. Available from: https:\/\/medium.com\/wellcome-data\/a-new-method-for-ethical-data-science-edb59e400ae9 (2025)"},{"key":"914_CR276","unstructured":"Russell, C., Kusner, M.J., Loftus, J., Silva, R.: When Worlds Collide: Integrating Different Counterfactual Assumptions in Fairness. In: Guyon I, Luxburg UV, Bengio S, Wallach H, Fergus R, Vishwanathan S, et\u00a0al., editors. Advances in Neural Information Processing Systems. vol.\u00a030. Curran Associates, Inc.; (2017)"},{"key":"914_CR277","unstructured":"Hasselbalch, G., Olsen, B.K., Tranberg, P.: White Paper on Data Ethics in Public Procurement of AI based Services and Solutions. DataEthics, Dinamarca. Available from: https:\/\/dataethics.eu\/wp-content\/uploads\/dataethics-whitepaper-april-2020.pdf (2020)"},{"key":"914_CR278","volume-title":"A Capability Approach to Ethical Development and Internal Auditing of Ai Technology [SSRN Scholarly Paper]","author":"M Graves","year":"2024","unstructured":"Graves, M., Ratti, E.: A Capability Approach to Ethical Development and Internal Auditing of Ai Technology [SSRN Scholarly Paper]. Social Science Research Network, Rochester (2024)"},{"key":"914_CR279","doi-asserted-by":"publisher","unstructured":"Zlateva, P., Steshina, L., Petukhov, I., Velev, D.: A Conceptual Framework for Solving Ethical Issues in Generative Artificial Intelligence. Frontiers in Artificial Intelligence and Applications. 381:110\u2013119. https:\/\/doi.org\/10.3233\/FAIA231182 (2024)","DOI":"10.3233\/FAIA231182"},{"key":"914_CR280","volume-title":"A Human Rights-Based Approach to Artificial Intelligence in Healthcare: A Proposal for a Patients\u2019 Rights Impact Assessment Tool [SSRN Scholarly Paper]","author":"H van Kolfschooten","year":"2024","unstructured":"van Kolfschooten, H.: A Human Rights-Based Approach to Artificial Intelligence in Healthcare: A Proposal for a Patients\u2019 Rights Impact Assessment Tool [SSRN Scholarly Paper]. Social Science Research Network, Rochester (2024)"},{"key":"914_CR281","doi-asserted-by":"publisher","unstructured":"El-Haber, N., Burnett, D., Halford, A., Stamp, K., De\u00a0Silva, D., Manic, M., et\u00a0al.: A Lifecycle Approach for Artificial Intelligence Ethics in Energy Systems. Energies. 17(14). (2024) https:\/\/doi.org\/10.3390\/en17143572","DOI":"10.3390\/en17143572"},{"issue":"3","key":"914_CR282","doi-asserted-by":"publisher","first-page":"428","DOI":"10.1080\/13678868.2024.2346492","volume":"27","author":"J Wang","year":"2024","unstructured":"Wang, J., Pashmforoosh, R.: A new framework for ethical artificial intelligence: keeping HRD in the loop. Human Resour. Dev. Int. 27(3), 428\u2013451 (2024). https:\/\/doi.org\/10.1080\/13678868.2024.2346492","journal-title":"Human Resour. Dev. Int."},{"key":"914_CR283","doi-asserted-by":"crossref","unstructured":"Yurt, E., Kasarci, I.: A Questionnaire of artificial intelligence use motives: a contribution to investigating the connection between AI and motivation. Int. J. Technol. Educ. 7(2) (2024)","DOI":"10.46328\/ijte.725"},{"key":"914_CR284","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-024-00480-z","author":"MA Cappelli","year":"2024","unstructured":"Cappelli, M.A., Di Marzo, S.G.: A semi-automated software model to support AI ethics compliance assessment of an AI system guided by ethical principles of AI. AI Ethics (2024). https:\/\/doi.org\/10.1007\/s43681-024-00480-z","journal-title":"AI Ethics"},{"key":"914_CR285","doi-asserted-by":"crossref","unstructured":"Hosobe, H., Satoh, K.: A soft constraint-based framework for ethical reasoning. In: International Conference on Agents and Artificial Intelligence, pp. 1354\u20131361 (2024)","DOI":"10.5220\/0012474100003636"},{"key":"914_CR286","unstructured":"Eindhoven University of Technology, the Netherlands, Faber I. A tangible toolkit to uncover clinician\u2019s ethical values about AI clinical decision support systems. In: DRS Conference Proceedings (2024)"},{"key":"914_CR287","doi-asserted-by":"publisher","DOI":"10.1007\/s13162-024-00275-9","author":"OC Ferrell","year":"2024","unstructured":"Ferrell, O.C., Harrison, D.E., Ferrell, L.K., Ajjan, H., Hochstein, B.W.: A theoretical framework to guide AI ethical decision making. AMS Rev. (2024). https:\/\/doi.org\/10.1007\/s13162-024-00275-9","journal-title":"AMS Rev."},{"key":"914_CR288","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-024-00478-7","author":"Z McFadden","year":"2024","unstructured":"McFadden, Z.: ACESOR: a critical engagement in systems of oppression AI assessment tool. AI Ethics (2024). https:\/\/doi.org\/10.1007\/s43681-024-00478-7","journal-title":"AI Ethics"},{"issue":"5","key":"914_CR289","doi-asserted-by":"publisher","first-page":"37","DOI":"10.1109\/MIC.2024.3451351","volume":"28","author":"E Bogucka","year":"2024","unstructured":"Bogucka, E., Constantinides, M., \u0160\u0107epanovi\u0107, S., Quercia, D.: AI design: a responsible artificial intelligence framework for prefilling impact assessment reports. IEEE Internet Comput. 28(5), 37\u201345 (2024). https:\/\/doi.org\/10.1109\/MIC.2024.3451351","journal-title":"IEEE Internet Comput."},{"key":"914_CR290","doi-asserted-by":"crossref","unstructured":"Kon\u00e9, L.A., Leonteva, A.O., Diallo, M.T., Haouba, A., COLLET, P.: AI ethical framework: a government-centric tool using generative AI. Int J. Adv. Comput. Sci. Appl. 15(11) (2024)","DOI":"10.14569\/IJACSA.2024.0151108"},{"issue":"1","key":"914_CR291","doi-asserted-by":"publisher","first-page":"2463722","DOI":"10.1080\/08839514.2025.2463722","volume":"39","author":"P Radanliev","year":"2025","unstructured":"Radanliev, P.: AI ethics: integrating transparency, fairness, and privacy in AI development. Appl. Artif. Intell. 39(1), 2463722 (2025). https:\/\/doi.org\/10.1080\/08839514.2025.2463722","journal-title":"Appl. Artif. Intell."},{"key":"914_CR292","doi-asserted-by":"crossref","unstructured":"Han, T.A., Pandit, D., Joneidy, S., Hasan, M.M., Hossain, J., Hoque\u00a0Tania, M., et\u00a0al.: An Explainable AI Tool for Operational Risks Evaluation of AI Systems for SMEs. In: 15th International Conference on Software, Knowledge, Information Management and Applications (SKIMA), pp. 69\u201374. (2023)","DOI":"10.1109\/SKIMA59232.2023.10387301"},{"key":"914_CR293","doi-asserted-by":"crossref","unstructured":"Iturbe, E., Rios, E., Rego, A., Toledo, N.: Artificial intelligence for next generation cybersecurity: the AI4CYBER framework. In: 18th International Conference on Availability, Reliability and Security. ARES \u201923. New York, NY, USA: Association for Computing Machinery, (2023)","DOI":"10.1145\/3600160.3605051"},{"key":"914_CR294","doi-asserted-by":"publisher","DOI":"10.1007\/s00146-025-02312-y","author":"A Ghosh","year":"2025","unstructured":"Ghosh, A., Saini, A., Barad, H.: Artificial intelligence in governance: recent trends, risks, challenges, innovative frameworks and future directions. AI Soc. (2025). https:\/\/doi.org\/10.1007\/s00146-025-02312-y","journal-title":"AI Soc."},{"key":"914_CR295","doi-asserted-by":"publisher","unstructured":"Sallam, M., Snygg, J., Sallam, M.: Assessment of Artificial Intelligence Credibility in Evidence-Based Healthcare Management with AERUS Innovative Tool. J. Artif. Intell. Mach. Learn. Data Sci. 2(1):9\u201318. (2024) https:\/\/doi.org\/10.51219\/JAIMLD\/mohammed-sallam\/20","DOI":"10.51219\/JAIMLD\/mohammed-sallam\/20"},{"key":"914_CR296","unstructured":"G\u00f6llner, S.: Tropmann-Frick M. Bridging the Gap between Theory and Practice: Towards Responsible AI Evaluation. In: CHAI@ KI; p. 68\u201376 (2023)"},{"key":"914_CR297","doi-asserted-by":"crossref","unstructured":"Rossi\u00a0de Borba, J.G., Dias\u00a0Canedo, E., Filho, G.P.R.: Bridging Theory and Practice: A Tool for Translating Ethical Ai Requirements into Ethical User Stories [SSRN Scholarly Paper]. Rochester, NY: Social Science Research Network. (2024)","DOI":"10.2139\/ssrn.5067796"},{"key":"914_CR298","doi-asserted-by":"crossref","unstructured":"Wang, Z., Hao, S., Carpendale, S.: Card-Based Approach to Engage Exploring Ethics in AI for Data Visualization. In: Extended Abstracts of the CHI Conference on Human Factors in Computing Systems. Honolulu HI USA: ACM, pp. 1\u20137 (2024)","DOI":"10.1145\/3613905.3650972"},{"key":"914_CR299","doi-asserted-by":"publisher","unstructured":"Tejani, A.S., Klontzas, M.E., Gatti, A.A., Mongan, J.T., Moy, L., Park, S.H., et\u00a0al.: Checklist for Artificial Intelligence in Medical Imaging (CLAIM): 2024 Update. Radiol. Artif. Intell. 6(4):e240300. (2024) https:\/\/doi.org\/10.1148\/ryai.240300","DOI":"10.1148\/ryai.240300"},{"key":"914_CR300","doi-asserted-by":"crossref","unstructured":"Bhat, M.: Creative explainable AI tools to understand algorithmic decision-making. In: 16th Conference on Creativity & Cognition. C&C \u201924. New York, NY, USA: Association for Computing Machinery, pp. 10\u201316 (2024)","DOI":"10.1145\/3635636.3664622"},{"key":"914_CR301","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-024-00469-8","author":"NK Corr\u00eaa","year":"2024","unstructured":"Corr\u00eaa, N.K., Santos, J.W., Galv\u00e3o, C., Pasetti, M., Schiavon, D., Naqvi, F., et al.: Crossing the principle\u2013practice gap in AI ethics with ethical problem-solving. AI Ethics (2024). https:\/\/doi.org\/10.1007\/s43681-024-00469-8","journal-title":"AI Ethics"},{"key":"914_CR302","doi-asserted-by":"crossref","unstructured":"Hanschke, V.A., Rees, D., Alanyali, M., Hopkinson, D., Marshall, P.: Data ethics emergency drill: a toolbox for discussing responsible AI for industry teams. In: CHI Conference on Human Factors in Computing Systems. CHI \u201924. New York, NY, USA: Association for Computing Machinery (2024)","DOI":"10.1145\/3613904.3642402"},{"key":"914_CR303","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-023-00376-4","author":"W Badawy","year":"2023","unstructured":"Badawy, W.: Data-driven framework for evaluating digitization and artificial intelligence risk: a comprehensive analysis. AI Ethics (2023). https:\/\/doi.org\/10.1007\/s43681-023-00376-4","journal-title":"AI Ethics"},{"key":"914_CR304","unstructured":"Stewart, M., Warden, P., Omri, Y., Prakash, S., Santos, J., Hymel, S., et\u00a0al.: Datasheets for Machine Learning Sensors. arXiv:2306.08848 [cs]. (2023)"},{"key":"914_CR305","doi-asserted-by":"publisher","unstructured":"Waters, G., Mapp, W., Honenberger, P.: Decisional value scores: A new family of metrics for ethical AI-ML. AI Ethics (2024). https:\/\/doi.org\/10.1007\/s43681-024-00504-8","DOI":"10.1007\/s43681-024-00504-8"},{"key":"914_CR306","doi-asserted-by":"publisher","unstructured":"Zhou, J., Chen, F.: E-LENS: User Requirements-Oriented AI Ethics Assurance. arXiv:2503.04747 [cs]. https:\/\/doi.org\/10.48550\/arXiv.2503.04747 (2025)","DOI":"10.48550\/arXiv.2503.04747"},{"issue":"1","key":"914_CR307","doi-asserted-by":"publisher","first-page":"131","DOI":"10.1007\/s43681-023-00393-3","volume":"4","author":"C Sieberichs","year":"2024","unstructured":"Sieberichs, C., Geerkens, S., Braun, A., Waschulzik, T.: ECS: an interactive tool for data quality assurance. AI Ethics 4(1), 131\u2013139 (2024). https:\/\/doi.org\/10.1007\/s43681-023-00393-3","journal-title":"AI Ethics"},{"key":"914_CR308","doi-asserted-by":"publisher","unstructured":"Allen, L.K., Kendeou, P.: ED-AI Lit: An Interdisciplinary Framework for AI Literacy in Education. Policy Insights from the Behavioral and Brain Sciences. p. 23727322231220339. Publisher: SAGE Publications. https:\/\/doi.org\/10.1177\/23727322231220339 (2023)","DOI":"10.1177\/23727322231220339"},{"issue":"4","key":"914_CR309","doi-asserted-by":"publisher","first-page":"1117","DOI":"10.1007\/s43681-023-00309-1","volume":"4","author":"S Afroogh","year":"2024","unstructured":"Afroogh, S., Mostafavi, A., Akbari, A., Pouresmaeil, Y., Goudarzi, S., Hajhosseini, F., et al.: Embedded ethics for responsible artificial intelligence systems (EE-RAIS) in disaster management: a conceptual model and its deployment. AI Ethics 4(4), 1117\u20131141 (2024). https:\/\/doi.org\/10.1007\/s43681-023-00309-1","journal-title":"AI Ethics"},{"key":"914_CR310","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-024-00560-0","author":"D Casaburo","year":"2024","unstructured":"Casaburo, D., Marsh, I.: Ensuring fundamental rights compliance and trustworthiness of law enforcement AI systems: the ALIGNER fundamental rights impact assessment. AI Ethics (2024). https:\/\/doi.org\/10.1007\/s43681-024-00560-0","journal-title":"AI Ethics"},{"key":"914_CR311","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-025-00670-3","author":"A Tripathi","year":"2025","unstructured":"Tripathi, A., Kumar, V.: Ethical practices of artificial intelligence: a management framework for responsible AI deployment in businesses. AI Ethics (2025). https:\/\/doi.org\/10.1007\/s43681-025-00670-3","journal-title":"AI Ethics"},{"key":"914_CR312","doi-asserted-by":"publisher","unstructured":"van Hilten, M., Ryan, M., Blok, V., de\u00a0Roo, N.: Ethical, Legal and Social Aspects (ELSA) for AI: an assessment tool for Agri-food. Smart Agric. Technol. pp. 100710. (2024) https:\/\/doi.org\/10.1016\/j.atech.2024.100710","DOI":"10.1016\/j.atech.2024.100710"},{"key":"914_CR313","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-023-00330-4","author":"P Brey","year":"2023","unstructured":"Brey, P., Dainow, B.: Ethics by design for artifcial intelligence. AI Ethics (2023). https:\/\/doi.org\/10.1007\/s43681-023-00330-4","journal-title":"AI Ethics"},{"issue":"9","key":"914_CR314","doi-asserted-by":"publisher","first-page":"AIcs2300269","DOI":"10.1056\/AIcs2300269","volume":"1","author":"N Dagan","year":"2024","unstructured":"Dagan, N., Devons-Sberro, S., Paz, Z., Zoller, L., Sommer, A., Shaham, G.: Evaluation of AI solutions in health care organizations - the OPTICA tool. NEJM AI 1(9), AIcs2300269 (2024). https:\/\/doi.org\/10.1056\/AIcs2300269","journal-title":"NEJM AI"},{"key":"914_CR315","unstructured":"Dotan, R., Blili-Hamelin, B., Madhavan, R., Matthews, J., Scarpino, J.: Evolving AI Risk Management: A Maturity Model based on the NIST AI Risk Management Framework. arXiv:2401.15229 [cs]. (2024)"},{"key":"914_CR316","first-page":"79","volume":"127","author":"C O\u2019Neil","year":"2024","unstructured":"O\u2019Neil, C., Sargeant, H., Appel, J.: Explainable fairness in regulatory algorithmic auditing. W. Va. Law Rev. 127, 79 (2024)","journal-title":"W. Va. Law Rev."},{"issue":"2","key":"914_CR317","doi-asserted-by":"publisher","first-page":"559","DOI":"10.1162\/dint_a_00255","volume":"6","author":"S Raza","year":"2024","unstructured":"Raza, S., Ghuge, S., Ding, C., Dolatabadi, E., Pandya, D.: FAIR enough: develop and assess a FAIR-compliant dataset for large language model training? Data Intell. 6(2), 559\u2013585 (2024). https:\/\/doi.org\/10.1162\/dint_a_00255","journal-title":"Data Intell."},{"key":"914_CR318","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-024-00568-6","author":"D Sallami","year":"2024","unstructured":"Sallami, D., A\u00efmeur, E.: Fairframe: a fairness framework for bias detection and mitigation in news. AI Ethics (2024). https:\/\/doi.org\/10.1007\/s43681-024-00568-6","journal-title":"AI Ethics"},{"key":"914_CR319","unstructured":"Moreira, D.A.B., Ferreira, A.I., Silva, J., dos Santos, G.O., Pereira, L., Gondim, J.M., et\u00a0al.: FairPIVARA: Reducing and Assessing Biases in CLIP-Based Multimodal Models. In: British Machine Vision Conference; (2024)"},{"key":"914_CR320","doi-asserted-by":"crossref","unstructured":"Hoche, M., Mineeva, O., Burger, M., Blasimme, A., R\u00e4tsch, G.: FAMEWS: a Fairness Auditing tool for Medical Early-Warning Systems. medRxiv. Pages: 2024.02.08.24302458 (2024)","DOI":"10.1101\/2024.02.08.24302458"},{"key":"914_CR321","doi-asserted-by":"crossref","unstructured":"Rosado\u00a0Gomez, A.A., Calder\u00f3n\u00a0Benavides, M.L.: Framework for bias detection in machine learning models: a fairness approach. In: 17th ACM International Conference on Web Search and Data Mining. WSDM \u201924. New York, NY, USA: Association for Computing Machinery, pp. 1152\u20131154 (2024)","DOI":"10.1145\/3616855.3635731"},{"issue":"11","key":"914_CR322","doi-asserted-by":"publisher","first-page":"e848","DOI":"10.1016\/S2589-7500(24)00143-2","volume":"6","author":"Y Ning","year":"2024","unstructured":"Ning, Y., Teixayavong, S., Shang, Y., Savulescu, J., Nagaraj, V., Miao, D., et al.: Generative artificial intelligence and ethical considerations in health care: a scoping review and ethics checklist. Lancet Digit. Health. 6(11), e848\u2013e856 (2024). https:\/\/doi.org\/10.1016\/S2589-7500(24)00143-2","journal-title":"Lancet Digit. Health."},{"key":"914_CR323","unstructured":"Lachenmaier, J., Werling, M., Morar, D.: Governance of Artificial Intelligence - A Framework Towards Ethical AI Applications. Ethics and Information Technology. (2023)"},{"key":"914_CR324","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-023-00409-y","author":"J Schuett","year":"2024","unstructured":"Schuett, J., Reuel, A.K., Carlier, A.: How to design an AI ethics board. AI Ethics (2024). https:\/\/doi.org\/10.1007\/s43681-023-00409-y","journal-title":"AI Ethics"},{"key":"914_CR325","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-024-00597-1","author":"J Adams","year":"2024","unstructured":"Adams, J.: Introducing the ethical-epistemic matrix: a principle-based tool for evaluating artificial intelligence in medicine. AI Ethics (2024). https:\/\/doi.org\/10.1007\/s43681-024-00597-1","journal-title":"AI Ethics"},{"key":"914_CR326","doi-asserted-by":"crossref","unstructured":"Morand, C., Ligozat, A.L., N\u00e9v\u00e9ol, A.: MLCA: A Tool for Machine Learning Life Cycle Assessment. In: 10th International Conference on ICT for Sustainability (ICT4S), pp. 227\u2013238 (2024)","DOI":"10.1109\/ICT4S64576.2024.00031"},{"key":"914_CR327","doi-asserted-by":"publisher","unstructured":"Jethwani, H., Lewis, A.C.F.: Multi-Value Alignment for Ml\/Ai Development ChoicesA Four-step Process Including Approaches to Trade-offs. American Philosophical Quarterly. 62(2):133\u2013152. Publisher: Duke University Press. (2025) https:\/\/doi.org\/10.5406\/21521123.62.2.03","DOI":"10.5406\/21521123.62.2.03"},{"key":"914_CR328","doi-asserted-by":"crossref","unstructured":"Baldassarre, M.T., Gigante, D., Kalinowski, M., Ragone, A.: POLARIS: A framework to guide the development of Trustworthy AI systems. In: IEEE\/ACM 3rd International Conference on AI Engineering-Software Engineering for AI, pp. 200\u2013210 (2024)","DOI":"10.1145\/3644815.3644947"},{"key":"914_CR329","doi-asserted-by":"publisher","first-page":"133097","DOI":"10.1109\/ACCESS.2024.3454061","volume":"12","author":"D Mart\u00edn-Moncunill","year":"2024","unstructured":"Mart\u00edn-Moncunill, D., Garc\u00eda Laredo, E., Carlos Nieves, J.: POTDAI: a tool to evaluate the perceived operational trust degree in artificial intelligence systems. IEEE Access 12, 133097\u2013133109 (2024). https:\/\/doi.org\/10.1109\/ACCESS.2024.3454061","journal-title":"IEEE Access"},{"key":"914_CR330","unstructured":"Lee, S.U., Perera, H., Liu, Y., Xia, B., Lu, Q., Zhu, L.: Responsible AI Question Bank: A Comprehensive Tool for AI Risk Assessment. CoRR. (2024)"},{"key":"914_CR331","doi-asserted-by":"publisher","unstructured":"Maathuis, C., Chockalingam, S.: Risk Assessment of Large Language Models Beyond Apocalyptic Visions. In: European Conference on Cyber Warfare and Security. 23(1):279\u2013286. (2024) https:\/\/doi.org\/10.34190\/eccws.23.1.2293","DOI":"10.34190\/eccws.23.1.2293"},{"key":"914_CR332","doi-asserted-by":"crossref","unstructured":"Li, C., Liu, C., Wu, W.: RuleGLM: An Ethics Evaluation Framework with Knowledge Vector Space. In: Huang, D.S., Si, Z., Zhang, Q. (eds.) Advanced Intelligent Computing Technology And Applications, PT III, ICIC 2024. vol. 14877, Series Title: Lecture Notes in Artificial Intelligence Web of Science ID: WOS:001307345000038. Singapore: Springer-Verlag Singapore Pte Ltd; (2024), pp. 463\u2013474","DOI":"10.1007\/978-981-97-5669-8_38"},{"key":"914_CR333","first-page":"65072","volume":"37","author":"CY Hsu","year":"2024","unstructured":"Hsu, C.Y., Tsai, Y.L., Lin, C.H., Chen, P.Y., Yu, C.M., Huang, C.Y.: Safe LoRA: the silver lining of reducing safety risks when finetuning large language models. Adv. Neural. Inf. Process. Syst. 37, 65072\u201365094 (2024)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"914_CR334","doi-asserted-by":"crossref","unstructured":"Banerjee, S., Layek, S., Tripathy, S., Kumar, S., Mukherjee, A., Hazra, R.: Safeinfer: Context adaptive decoding time safety alignment for large language models. In: AAAI Conference on Artificial Intelligence. vol.\u00a039; p. 27188\u201327196 (2025)","DOI":"10.1609\/aaai.v39i26.34927"},{"issue":"1","key":"914_CR335","doi-asserted-by":"publisher","first-page":"3447","DOI":"10.30574\/ijsra.2024.13.1.1798","volume":"13","author":"N Kodakandla","year":"2024","unstructured":"Kodakandla, N.: Scaling AI responsibly: leveraging MLOps for sustainable machine learning deployments. Int. J. Sci. Res. Arch. 13(1), 3447\u20133455 (2024)","journal-title":"Int. J. Sci. Res. Arch."},{"key":"914_CR336","unstructured":"Xie, T., Qi, X., Zeng, Y., Huang, Y., Sehwag, U.M., Huang, K., et\u00a0al.: SORRY-Bench: Systematically Evaluating Large Language Model Safety Refusal. In: The 13th International Conference on Learning Representations (2025)"},{"key":"914_CR337","doi-asserted-by":"publisher","DOI":"10.1007\/s00146-023-01851-6","author":"Y Wang","year":"2024","unstructured":"Wang, Y., Lu, E., Ruan, Z., Liang, Y., Zeng, Y.: Stream: social data and knowledge collective intelligence platform for TRaining Ethical AI Models. AI Soc. (2024). https:\/\/doi.org\/10.1007\/s00146-023-01851-6","journal-title":"AI Soc."},{"key":"914_CR338","doi-asserted-by":"publisher","unstructured":"Bauroth, M., Rath-Manakidis, P., Langholf, V., Wiskott, L., Glasmachers, T.: tachAId\u2013An interactive tool supporting the design of human-centered AI solutions. Front. Artif. Intell. 7. (2024). https:\/\/doi.org\/10.3389\/frai.2024.1354114","DOI":"10.3389\/frai.2024.1354114"},{"key":"914_CR339","doi-asserted-by":"publisher","unstructured":"Vyhmeister, E., Castane, G.G.: TAI-PRM: trustworthy AI\u2014project risk management framework towards Industry 5.0. AI Ethics (2024) https:\/\/doi.org\/10.1007\/s43681-023-00417-y","DOI":"10.1007\/s43681-023-00417-y"},{"key":"914_CR340","doi-asserted-by":"publisher","unstructured":"Perkins, M., Furze, L., Roe, J., Macvaugh, J.: The Artificial Intelligence Assessment Scale (AIAS): A Framework for Ethical Integration of Generative AI in Educational Assessment. J. Univ. Teach. Learn. Pract. 21(6). (2024) https:\/\/doi.org\/10.53761\/q3azde36","DOI":"10.53761\/q3azde36"},{"key":"914_CR341","doi-asserted-by":"publisher","unstructured":"Habli, I., Hawkins, R., Paterson, C., Ryan, P., Jia, Y., Sujan, M., et\u00a0al.: The BIG Argument for AI Safety Cases. arXiv:2503.11705 [cs]. (2025)https:\/\/doi.org\/10.48550\/arXiv.2503.11705","DOI":"10.48550\/arXiv.2503.11705"},{"key":"914_CR342","doi-asserted-by":"publisher","DOI":"10.1007\/s11023-023-09654-w","author":"AT Nemat","year":"2023","unstructured":"Nemat, A.T., Becker, S.J., Lucas, S., Thomas, S., Gadea, I., Charton, J.E.: The Principle-at-Risk Analysis (PaRA): operationalising digital ethics by bridging principles and operations of a digital ethics advisory panel. Mind. Mach. (2023). https:\/\/doi.org\/10.1007\/s11023-023-09654-w","journal-title":"Mind. Mach."},{"key":"914_CR343","doi-asserted-by":"publisher","unstructured":"Getir\u00a0Yaman, S., Ribeiro, P., Burholt, C., Jones, M., Cavalcanti, A., Calinescu, R.: Toolkit for specification, validation and verification of social, legal, ethical, empathetic and cultural requirements for autonomous agents. Sci. Comput. Program. 236. (2024) https:\/\/doi.org\/10.1016\/j.scico.2024.103118","DOI":"10.1016\/j.scico.2024.103118"},{"key":"914_CR344","doi-asserted-by":"publisher","unstructured":"Trotsyuk AA, Waeiss Q, Bhatia RT, Aponte BJ, Heffernan IML, Madgavkar D, et\u00a0al. Toward a framework for risk mitigation of potential misuse of artificial intelligence in biomedical research. Nat. Mach. Intell. pp. 1\u20138. (2024) https:\/\/doi.org\/10.1038\/s42256-024-00926-3","DOI":"10.1038\/s42256-024-00926-3"},{"key":"914_CR345","doi-asserted-by":"publisher","DOI":"10.1016\/j.compcom.2024.102831","volume":"71","author":"MA Vetter","year":"2024","unstructured":"Vetter, M.A., Lucia, B., Jiang, J., Othman, M.: Towards a framework for local interrogation of AI ethics: a case study on text generators, academic integrity, and composing with ChatGPT. Comput. Compos. 71, 102831 (2024). https:\/\/doi.org\/10.1016\/j.compcom.2024.102831","journal-title":"Comput. Compos."},{"key":"914_CR346","doi-asserted-by":"crossref","unstructured":"Kioskli, K., Bishop, L., Polemi, N., Ramfos, A.: Towards a Human-Centric AI Trustworthiness Risk Management Framework. Human Factors in Cybersecurity. 127(127) (2024)","DOI":"10.54941\/ahfe1004766"},{"key":"914_CR347","doi-asserted-by":"crossref","unstructured":"Xia, B., Lu, Q., Zhu, L., Lee, S.U., Liu, Y., Xing, Z.: Towards a Responsible AI Metrics Catalogue: A Collection of Metrics for AI Accountability. In: IEEE\/ACM 3rd International Conference on AI Engineering - Software Engineering for AI. Lisbon Portugal: ACM, pp. 100\u2013111 (2024)","DOI":"10.1145\/3644815.3644959"},{"key":"914_CR348","doi-asserted-by":"publisher","unstructured":"Berardinis, J.d., Porcaro, L., Mero\u00f1o-Pe\u00f1uela, A., Cangelosi, A., Buckley, T.: Towards Responsible AI Music: an Investigation of Trustworthy Features for Creative Systems. arXiv:2503.18814. https:\/\/doi.org\/10.48550\/arXiv.2503.18814 (2025)","DOI":"10.48550\/arXiv.2503.18814"},{"key":"914_CR349","doi-asserted-by":"publisher","DOI":"10.1136\/bmj-2023-078378","volume":"385","author":"GS Collins","year":"2024","unstructured":"Collins, G.S., Moons, K.G.M., Dhiman, P., Riley, R.D., Beam, A.L., Calster, B.V., et al.: TRIPOD+AI statement: updated guidance for reporting clinical prediction models that use regression or machine learning methods. BMJ 385, e078378 (2024). https:\/\/doi.org\/10.1136\/bmj-2023-078378","journal-title":"BMJ"},{"issue":"5","key":"914_CR350","doi-asserted-by":"publisher","first-page":"443","DOI":"10.1080\/09700161.2023.2288994","volume":"47","author":"S Sharma","year":"2023","unstructured":"Sharma, S.: Trustworthy artificial intelligence: design of AI governance framework. Strateg. Anal. 47(5), 443\u2013464 (2023). https:\/\/doi.org\/10.1080\/09700161.2023.2288994","journal-title":"Strateg. Anal."},{"key":"914_CR351","doi-asserted-by":"crossref","unstructured":"Teo, W., Teoh, Z., Arabi, D.A., Aboushadi, M., Lai, K., Ng, Z., et\u00a0al.: What Would You do? An Ethical AI Quiz. In: IEEE\/ACM 45th International Conference on Software Engineering: Companion Proceedings (ICSE-Companion), pp. 112\u2013116. (2023)","DOI":"10.1109\/ICSE-Companion58688.2023.00036"},{"issue":"4","key":"914_CR352","doi-asserted-by":"publisher","first-page":"28:1","DOI":"10.1145\/3625240","volume":"13","author":"MA Z\u00f6ller","year":"2023","unstructured":"Z\u00f6ller, M.A., Titov, W., Schlegel, T., Huber, M.F.: XAutoML: a visual analytics tool for understanding and validating automated machine learning. ACM Trans. Interact. Intell. Syst. 13(4), 28:1-28:39 (2023). https:\/\/doi.org\/10.1145\/3625240","journal-title":"ACM Trans. Interact. Intell. Syst."}],"container-title":["AI and Ethics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s43681-025-00914-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s43681-025-00914-2","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s43681-025-00914-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T10:02:26Z","timestamp":1774087346000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s43681-025-00914-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3,21]]},"references-count":352,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2026,4]]}},"alternative-id":["914"],"URL":"https:\/\/doi.org\/10.1007\/s43681-025-00914-2","relation":{},"ISSN":["2730-5953","2730-5961"],"issn-type":[{"value":"2730-5953","type":"print"},{"value":"2730-5961","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3,21]]},"assertion":[{"value":"13 August 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 November 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 March 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"210"}}