{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,10]],"date-time":"2026-04-10T02:11:35Z","timestamp":1775787095326,"version":"3.50.1"},"reference-count":99,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2025,11,26]],"date-time":"2025-11-26T00:00:00Z","timestamp":1764115200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,11,26]],"date-time":"2025-11-26T00:00:00Z","timestamp":1764115200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Sci Eng Ethics"],"DOI":"10.1007\/s11948-025-00567-8","type":"journal-article","created":{"date-parts":[[2025,11,26]],"date-time":"2025-11-26T08:42:23Z","timestamp":1764146543000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Addressing Autonomy Risks in Generative Chatbots with the Socratic Method"],"prefix":"10.1007","volume":"31","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-7573-1859","authenticated-orcid":false,"given":"Wencheng","family":"Lu","sequence":"first","affiliation":[]},{"given":"Zhenni","family":"Hu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,26]]},"reference":[{"issue":"1","key":"567_CR1","doi-asserted-by":"publisher","first-page":"85","DOI":"10.1007\/s10503-021-09556-0","volume":"36","author":"T Airaksinen","year":"2022","unstructured":"Airaksinen, T. (2022). Socratic irony and argumentation. Argumentation, 36(1), 85\u2013100. https:\/\/doi.org\/10.1007\/s10503-021-09556-0","journal-title":"Argumentation"},{"key":"567_CR2","doi-asserted-by":"publisher","unstructured":"Ang, B. H., Gollapalli, S. D., & Ng, S. K. (2023). Socratic question generation: A novel dataset, models, and evaluation. In Proceedings of the 17th conference of the European chapter of the association for computational linguistics, (pp. 147\u2013165). https:\/\/doi.org\/10.18653\/v1\/2023.eacl-main.12","DOI":"10.18653\/v1\/2023.eacl-main.12"},{"key":"567_CR3","doi-asserted-by":"publisher","first-page":"129","DOI":"10.5840\/jpr_2010_10","volume":"35","author":"AM Archie","year":"2010","unstructured":"Archie, A. M. (2010). The anatomy of a dialogue. Journal of Philosophical Research, 35, 129\u2013146. https:\/\/doi.org\/10.5840\/jpr_2010_10","journal-title":"Journal of Philosophical Research"},{"key":"567_CR4","doi-asserted-by":"publisher","DOI":"10.2139\/ssrn.4895486","author":"H Bastani","year":"2024","unstructured":"Bastani, H., Bastani, O., Sungu, A., Ge, H., Kabakc\u0131, \u00d6., & Mariman, R. (2024). Generative AI can harm learning. SSRN. https:\/\/doi.org\/10.2139\/ssrn.4895486","journal-title":"SSRN"},{"issue":"3","key":"567_CR5","doi-asserted-by":"publisher","first-page":"e1206","DOI":"10.1002\/ctm2.1206","volume":"13","author":"C Baumgartner","year":"2023","unstructured":"Baumgartner, C. (2023). The opportunities and pitfalls of ChatGPT in clinical and translational medicine. Clinical and Translational Medicine, 13(3), e1206. https:\/\/doi.org\/10.1002\/ctm2.1206","journal-title":"Clinical and Translational Medicine"},{"issue":"7","key":"567_CR6","doi-asserted-by":"publisher","first-page":"710","DOI":"10.1111\/j.1469-5812.2011.00773.x","volume":"44","author":"P Boghossian","year":"2012","unstructured":"Boghossian, P. (2012). Socratic pedagogy: Perplexity, humiliation, shame and a broken egg. Educational Philosophy and Theory, 44(7), 710\u2013720. https:\/\/doi.org\/10.1111\/j.1469-5812.2011.00773.x","journal-title":"Educational Philosophy and Theory"},{"key":"567_CR7","doi-asserted-by":"publisher","first-page":"819","DOI":"10.1007\/s11245-023-09922-5","volume":"42","author":"S Bonicalzi","year":"2023","unstructured":"Bonicalzi, S., De Caro, M., & Giovanola, B. (2023). Artificial intelligence and autonomy: On the ethical dimension of recommender systems. Topoi, 42, 819\u2013832. https:\/\/doi.org\/10.1007\/s11245-023-09922-5","journal-title":"Topoi"},{"key":"567_CR8","doi-asserted-by":"publisher","first-page":"315","DOI":"10.1007\/s43681-022-00157-5","volume":"3","author":"M Botes","year":"2023","unstructured":"Botes, M. (2023). Autonomy and the social dilemma of online manipulative behavior. AI and Ethics, 3, 315\u2013323. https:\/\/doi.org\/10.1007\/s43681-022-00157-5","journal-title":"AI and Ethics"},{"issue":"2","key":"567_CR9","doi-asserted-by":"publisher","first-page":"272","DOI":"10.1162\/daed_a_01915","volume":"151","author":"E Brynjolfsson","year":"2022","unstructured":"Brynjolfsson, E. (2022). The Turing trap: The promise & peril of human-like artificial intelligence. Daedalus, 151(2), 272\u2013287.","journal-title":"Daedalus"},{"key":"567_CR10","doi-asserted-by":"publisher","unstructured":"Bu\u00e7inca, Z., Malaya, M. B., & Gajos, K. Z. (2021). To trust or to think: Cognitive forcing functions can reduce overreliance on AI in AI-assisted decision-making. Proceedings of the ACM on Human-Computer Interaction, 5(CSCW1). https:\/\/doi.org\/10.1145\/3449287","DOI":"10.1145\/3449287"},{"issue":"1","key":"567_CR11","doi-asserted-by":"publisher","first-page":"205395171562251","DOI":"10.1177\/2053951715622512","volume":"3","author":"J Burrell","year":"2016","unstructured":"Burrell, J. (2016). How the machine \u2018thinks\u2019: Understanding opacity in machine learning algorithms. Big Data & Society, 3(1), 2053951715622512. https:\/\/doi.org\/10.1177\/2053951715622512","journal-title":"Big Data & Society"},{"key":"567_CR12","doi-asserted-by":"publisher","first-page":"107308","DOI":"10.1016\/j.chb.2022.107308","volume":"134","author":"C Candrian","year":"2022","unstructured":"Candrian, C., & Scherer, A. (2022). Rise of the machines: Delegating decisions to autonomous AI. Computers in Human Behavior, 134, 107308. https:\/\/doi.org\/10.1016\/j.chb.2022.107308","journal-title":"Computers in Human Behavior"},{"key":"567_CR14","doi-asserted-by":"publisher","unstructured":"Chang, E. Y. (2024). SocraSynth: Multi-LLM reasoning with conditional statistics (No. arXiv:2402.06634). arXiv. https:\/\/doi.org\/10.48550\/arXiv.2402.06634","DOI":"10.48550\/arXiv.2402.06634"},{"key":"567_CR13","doi-asserted-by":"publisher","unstructured":"Chang, E. Y. (2023). Prompting large language models with the Socratic method. In 2023 IEEE 13th annual computing and communication workshop and conference (CCWC) (pp. 0351\u20130360). https:\/\/doi.org\/10.1109\/CCWC57344.2023.10099179","DOI":"10.1109\/CCWC57344.2023.10099179"},{"key":"567_CR15","doi-asserted-by":"publisher","unstructured":"Chidambaram, S., Li, L. E., Bai, M., Li, X., Lin, K., Zhou, X., & Williams, A. C. (2024). Socratic human feedback (SoHF): Expert steering strategies for LLM code generation. In Y. Al-Onaizan, M. Bansal, & Y.-N. Chen (Eds.), Findings of the association for computational linguistics: EMNLP 2024 (pp. 15491\u201315502). Association for Computational Linguistics. https:\/\/doi.org\/10.18653\/v1\/2024.findings-emnlp.908","DOI":"10.18653\/v1\/2024.findings-emnlp.908"},{"key":"567_CR16","doi-asserted-by":"crossref","unstructured":"Christman, J. (2009). The politics of persons: Individual autonomy and socio-historical selves. Cambridge University Press.","DOI":"10.1017\/CBO9780511635571"},{"key":"567_CR17","unstructured":"Christman, J. (2020). Autonomy in moral and political philosophy. In E. N. Zalta (Ed.), The Stanford encyclopedia of philosophy (Fall 2020 edition). https:\/\/plato.stanford.edu\/archives\/fall2020\/entries\/autonomy-moral\/"},{"issue":"10","key":"567_CR18","doi-asserted-by":"publisher","first-page":"1851","DOI":"10.1038\/s41562-024-01991-9","volume":"8","author":"KM Collins","year":"2024","unstructured":"Collins, K. M., Sucholutsky, I., Bhatt, U., Chandra, K., Wong, L., Lee, M., Zhang, C. E., Zhi-Xuan, T., Ho, M., Mansinghka, V., Weller, A., Tenenbaum, J. B., & Griffiths, T. L. (2024). Building machines that learn and think with people. Nature Human Behaviour, 8(10), 1851\u20131863. https:\/\/doi.org\/10.1038\/s41562-024-01991-9","journal-title":"Nature Human Behaviour"},{"key":"567_CR19","doi-asserted-by":"publisher","unstructured":"Dai, S., Xu, C., Xu, S., Pang, L., Dong, Z., & Xu, J. (2024). Bias and unfairness in information retrieval systems: New challenges in the LLM era. In Proceedings of the 30th ACM SIGKDD conference on knowledge discovery and data mining (pp. 6437\u20136447). https:\/\/doi.org\/10.1145\/3637528.3671458","DOI":"10.1145\/3637528.3671458"},{"issue":"5","key":"567_CR20","doi-asserted-by":"publisher","first-page":"2479","DOI":"10.1007\/s00146-023-01720-2","volume":"39","author":"JI Del Valle","year":"2024","unstructured":"Del Valle, J. I., & Lara, F. (2024). AI-powered recommender systems and the preservation of personal autonomy. AI & SOCIETY, 39(5), 2479\u20132491. https:\/\/doi.org\/10.1007\/s00146-023-01720-2","journal-title":"AI & SOCIETY"},{"issue":"3","key":"567_CR21","doi-asserted-by":"publisher","first-page":"793","DOI":"10.1007\/s43681-022-00205-0","volume":"3","author":"RBL Dixon","year":"2023","unstructured":"Dixon, R. B. L. (2023). A principled governance for emerging AI regimes: Lessons from China, the European Union, and the United States. AI and Ethics, 3(3), 793\u2013810. https:\/\/doi.org\/10.1007\/s43681-022-00205-0","journal-title":"AI and Ethics"},{"key":"567_CR22","doi-asserted-by":"publisher","unstructured":"Dogruel, L. (2021). What is algorithm literacy? A conceptualization and challenges regarding its empirical measurement. In M. Taddicken & C. Schumann (Eds.), Algorithms and communication (pp. 67\u201393). Freie Universit\u00e4t Berlin. https:\/\/doi.org\/10.48541\/DCR.V9.3","DOI":"10.48541\/DCR.V9.3"},{"issue":"10","key":"567_CR23","doi-asserted-by":"publisher","first-page":"1361","DOI":"10.1007\/s11606-012-2077-6","volume":"27","author":"G Elwyn","year":"2012","unstructured":"Elwyn, G., Frosch, D., Thomson, R., Joseph-Williams, N., Lloyd, A., Kinnersley, P., Cording, E., Tomson, D., Dodd, C., Rollnick, S., Edwards, A., & Barry, M. (2012). Shared decision making: A model for clinical practice. Journal of General Internal Medicine, 27(10), 1361\u20131367. https:\/\/doi.org\/10.1007\/s11606-012-2077-6","journal-title":"Journal of General Internal Medicine"},{"key":"567_CR24","doi-asserted-by":"publisher","unstructured":"Favero, L., P\u00e9rez-Ortiz, J. A., K\u00e4ser, T., & Oliver, N. (2024). Enhancing critical thinking in education by means of a Socratic chatbot (No. arXiv:2409.05511). arXiv. https:\/\/doi.org\/10.48550\/arXiv.2409.05511","DOI":"10.48550\/arXiv.2409.05511"},{"key":"567_CR25","doi-asserted-by":"publisher","DOI":"10.1111\/phc3.12760","author":"S Fazelpour","year":"2021","unstructured":"Fazelpour, S., & Danks, D. (2021). Algorithmic bias: Senses, sources, solutions. Philosophy Compass. https:\/\/doi.org\/10.1111\/phc3.12760","journal-title":"Philosophy Compass"},{"issue":"4","key":"567_CR26","doi-asserted-by":"publisher","first-page":"346","DOI":"10.1287\/stsc.2024.0189","volume":"9","author":"T Felin","year":"2024","unstructured":"Felin, T., & Holweg, M. (2024). Theory is all you need: AI, human cognition, and causal reasoning. Strategy Science, 9(4), 346\u2013371. https:\/\/doi.org\/10.1287\/stsc.2024.0189","journal-title":"Strategy Science"},{"key":"567_CR27","doi-asserted-by":"publisher","unstructured":"Floridi, L., & Cowls, J. (2019). A unified framework of five principles for AI in society. Harvard Data Science Review, 1(1). https:\/\/doi.org\/10.1162\/99608f92.8cd550d1","DOI":"10.1162\/99608f92.8cd550d1"},{"key":"567_CR28","doi-asserted-by":"publisher","DOI":"10.1007\/s00146-024-01955-7","author":"F Fossa","year":"2024","unstructured":"Fossa, F. (2024). Artificial intelligence and human autonomy: The case of driving automation. AI & SOCIETY. https:\/\/doi.org\/10.1007\/s00146-024-01955-7","journal-title":"AI & SOCIETY"},{"key":"567_CR29","doi-asserted-by":"publisher","unstructured":"Gabriel, I., Manzini, A., Keeling, G., Hendricks, L. A., Rieser, V., Iqbal, H., Toma\u0161ev, N., Ktena, I., Kenton, Z., Rodriguez, M., El-Sayed, S., Brown, S., Akbulut, C., Trask, A., Hughes, E., Bergman, A. S., Shelby, R., Marchal, N., Griffin, C., & Manyika, J. (2024). The ethics of advanced AI assistants (No. arXiv:2404.16244). arXiv. https:\/\/doi.org\/10.48550\/arXiv.2404.16244","DOI":"10.48550\/arXiv.2404.16244"},{"key":"567_CR30","doi-asserted-by":"publisher","unstructured":"Gerlich, M. (2025). AI tools in society: Impacts on cognitive offloading and the future of critical thinking. Societies, 15(1). https:\/\/doi.org\/10.3390\/soc15010006","DOI":"10.3390\/soc15010006"},{"issue":"2","key":"567_CR31","doi-asserted-by":"publisher","first-page":"169","DOI":"10.1007\/s13347-017-0285-z","volume":"31","author":"A Giubilini","year":"2018","unstructured":"Giubilini, A., & Savulescu, J. (2018). The artificial moral advisor. The ideal observer meets artificial intelligence. Philosophy & Technology, 31(2), 169\u2013188. https:\/\/doi.org\/10.1007\/s13347-017-0285-z","journal-title":"Philosophy &amp; Technology"},{"issue":"4","key":"567_CR32","doi-asserted-by":"publisher","first-page":"045005","DOI":"10.1088\/1361-6552\/ad3d21","volume":"59","author":"B Gregorcic","year":"2024","unstructured":"Gregorcic, B., Polverini, G., & Sarlah, A. (2024). ChatGPT as a tool for honing teachers\u2019 Socratic dialogue skills. Physics Education, 59(4), 045005. https:\/\/doi.org\/10.1088\/1361-6552\/ad3d21","journal-title":"Physics Education"},{"key":"567_CR33","doi-asserted-by":"publisher","unstructured":"Gu, Y., Tafjord, O., & Clark, P. (2024). Digital Socrates: Evaluating LLMs through explanation critiques (No. arXiv:2311.09613). arXiv. https:\/\/doi.org\/10.48550\/arXiv.2311.09613","DOI":"10.48550\/arXiv.2311.09613"},{"key":"567_CR34","doi-asserted-by":"publisher","unstructured":"Hacker, P., Engel, A., & Mauer, M. (2023). Regulating ChatGPT and other large generative AI models. In Proceedings of the 2023 ACM conference on fairness accountability and transparency (pp. 1112\u20131123). https:\/\/doi.org\/10.1145\/3593013.3594067","DOI":"10.1145\/3593013.3594067"},{"key":"567_CR35","doi-asserted-by":"publisher","first-page":"104512","DOI":"10.1016\/j.ebiom.2023.104512","volume":"90","author":"S Harrer","year":"2023","unstructured":"Harrer, S. (2023). Attention is not all you need: The complicated case of ethically using large language models in healthcare and medicine. eBioMedicine, 90, 104512. https:\/\/doi.org\/10.1016\/j.ebiom.2023.104512","journal-title":"eBioMedicine"},{"key":"567_CR36","doi-asserted-by":"publisher","first-page":"1819","DOI":"10.1007\/s00146-020-01024-9","volume":"38","author":"M H\u00e9der","year":"2023","unstructured":"H\u00e9der, M. (2023). The epistemic opacity of autonomous systems and the ethical consequences. AI & SOCIETY, 38, 1819\u20131827. https:\/\/doi.org\/10.1007\/s00146-020-01024-9","journal-title":"AI & SOCIETY"},{"issue":"2","key":"567_CR37","doi-asserted-by":"publisher","first-page":"67","DOI":"10.24818\/ejis.2023.17","volume":"15","author":"S H\u00f6ller","year":"2023","unstructured":"H\u00f6ller, S., Dilger, T., Spiess, T., Ploder, C., & Bernsteiner, R. (2023). Awareness of unethical artificial intelligence and its mitigation measures. European Journal of Interdisciplinary Studies, 15(2), 67\u201389. https:\/\/doi.org\/10.24818\/ejis.2023.17","journal-title":"European Journal of Interdisciplinary Studies"},{"issue":"1","key":"567_CR38","doi-asserted-by":"publisher","first-page":"201","DOI":"10.1162\/dint_a_00243","volume":"6","author":"S Hua","year":"2024","unstructured":"Hua, S., Jin, S., & Jiang, S. (2024). The limitations and ethical considerations of ChatGPT. Data Intelligence, 6(1), 201\u2013239. https:\/\/doi.org\/10.1162\/dint_a_00243","journal-title":"Data Intelligence"},{"issue":"3","key":"567_CR39","doi-asserted-by":"publisher","first-page":"833","DOI":"10.1007\/s11245-023-09940-3","volume":"42","author":"M Ienca","year":"2023","unstructured":"Ienca, M. (2023). On artificial intelligence and manipulation. Topoi, 42(3), 833\u2013842. https:\/\/doi.org\/10.1007\/s11245-023-09940-3","journal-title":"Topoi"},{"key":"567_CR40","doi-asserted-by":"publisher","unstructured":"Izumi, K., Tanaka, H., Shidara, K., Adachi, H., Kanayama, D., Kudo, T., & Nakamura, S. (2024). Response generation for cognitive behavioral therapy with large language models: Comparative study with Socratic questioning (No. arXiv:2401.15966). arXiv. https:\/\/doi.org\/10.48550\/arXiv.2401.15966","DOI":"10.48550\/arXiv.2401.15966"},{"issue":"9","key":"567_CR41","doi-asserted-by":"publisher","first-page":"389","DOI":"10.1038\/s42256-019-0088-2","volume":"1","author":"A Jobin","year":"2019","unstructured":"Jobin, A., Ienca, M., & Vayena, E. (2019). The global landscape of AI ethics guidelines. Nature Machine Intelligence, 1(9), 389\u2013399. https:\/\/doi.org\/10.1038\/s42256-019-0088-2","journal-title":"Nature Machine Intelligence"},{"issue":"3","key":"567_CR42","doi-asserted-by":"publisher","first-page":"93","DOI":"10.1007\/s11229-024-04716-7","volume":"204","author":"B Karlan","year":"2024","unstructured":"Karlan, B. (2024). Authenticity in algorithm-aided decision-making. Synthese, 204(3), 93. https:\/\/doi.org\/10.1007\/s11229-024-04716-7","journal-title":"Synthese"},{"key":"567_CR43","doi-asserted-by":"crossref","unstructured":"King, N. L. (2021). The excellent mind: Intellectual virtues for everyday life. Oxford University Press.","DOI":"10.1093\/oso\/9780190096250.001.0001"},{"issue":"1","key":"567_CR44","doi-asserted-by":"publisher","first-page":"9","DOI":"10.1007\/s10676-024-09745-x","volume":"26","author":"M Klenk","year":"2024","unstructured":"Klenk, M. (2024). Ethics of generative AI and manipulation: A design-oriented research agenda. Ethics and Information Technology, 26(1), 9. https:\/\/doi.org\/10.1007\/s10676-024-09745-x","journal-title":"Ethics and Information Technology"},{"key":"567_CR45","doi-asserted-by":"publisher","unstructured":"Kosmyna, N., Hauptmann, E., Yuan, Y. T., Situ, J., Liao, X. H., Beresnitzky, A. V., Braunstein, I., & Maes, P. (2025). Your brain on ChatGPT: Accumulation of cognitive debt when using an AI assistant for essay writing task (No. arXiv:2506.08872). arXiv. https:\/\/doi.org\/10.48550\/arXiv.2506.08872","DOI":"10.48550\/arXiv.2506.08872"},{"key":"567_CR46","doi-asserted-by":"publisher","DOI":"10.1007\/s00146-025-02397-5","author":"J Krook","year":"2025","unstructured":"Krook, J. (2025). When autonomy breaks: The hidden existential risk of AI. AI & SOCIETY. https:\/\/doi.org\/10.1007\/s00146-025-02397-5","journal-title":"AI & SOCIETY"},{"key":"567_CR47","doi-asserted-by":"publisher","DOI":"10.3389\/frai.2021.705164","author":"A Laitinen","year":"2021","unstructured":"Laitinen, A., & Sahlgren, O. (2021). AI systems and respect for human autonomy. Frontiers in Artificial Intelligence. https:\/\/doi.org\/10.3389\/frai.2021.705164. 4.","journal-title":"Frontiers in Artificial Intelligence"},{"key":"567_CR48","doi-asserted-by":"publisher","first-page":"42","DOI":"10.1007\/s11948-021-00318-5","volume":"27","author":"F Lara","year":"2021","unstructured":"Lara, F. (2021). Why a virtual assistant for moral enhancement when we could have a Socrates? Science and Engineering Ethics, 27, 42. https:\/\/doi.org\/10.1007\/s11948-021-00318-5","journal-title":"Science and Engineering Ethics"},{"issue":"3","key":"567_CR49","doi-asserted-by":"publisher","first-page":"275","DOI":"10.1007\/s12152-019-09401-y","volume":"13","author":"F Lara","year":"2020","unstructured":"Lara, F., & Deckers, J. (2020). Artificial intelligence as a Socratic assistant for moral enhancement. Neuroethics, 13(3), 275\u2013287. https:\/\/doi.org\/10.1007\/s12152-019-09401-y","journal-title":"Neuroethics"},{"key":"567_CR50","doi-asserted-by":"publisher","unstructured":"Lee, H. P. (Hank), Sarkar, A., Tankelevitch, L., Drosos, I., Rintel, S., Banks, R., & Wilson, N. (2025). The impact of generative AI on critical thinking: Self-reported reductions in cognitive effort and confidence effects from a survey of knowledge workers. Proceedings of the 2025 CHI Conference on Human Factors in Computing Systems, 1\u201322. https:\/\/doi.org\/10.1145\/3706598.3713778","DOI":"10.1145\/3706598.3713778"},{"key":"567_CR51","doi-asserted-by":"publisher","unstructured":"Leslie, D., & Meng, X. L. (2024). Future shock: Grappling with the generative AI revolution. Harvard Data Science Review, Special Issue 5. https:\/\/doi.org\/10.1162\/99608f92.fad6d25c","DOI":"10.1162\/99608f92.fad6d25c"},{"key":"567_CR52","unstructured":"Liu, J., Huang, Z., Xiao, T., Sha, J., Wu, J., Liu, Q., Wang, S., & Chen, E. (2024, November 6). SocraticLM: Exploring Socratic personalized teaching with large language models. In The thirty-eighth annual conference on neural information processing systems. https:\/\/openreview.net\/forum?id=qkoZgJhxsA"},{"key":"567_CR53","doi-asserted-by":"publisher","unstructured":"Locke, E. A., & Latham, G. P. (2002). Building a practically useful theory of goal setting and task motivation: A 35-year odyssey. American Psychologist, 57(9), Article 9. https:\/\/doi.org\/10.1037\/0003-066X.57.9.705","DOI":"10.1037\/0003-066X.57.9.705"},{"key":"567_CR54","doi-asserted-by":"publisher","unstructured":"Lu, W. (2024). Inevitable challenges of autonomy: Ethical concerns in personalized algorithmic decision-making. Humanities and Social Sciences Communications, 11. https:\/\/doi.org\/10.1057\/s41599-024-03864-y","DOI":"10.1057\/s41599-024-03864-y"},{"key":"567_CR55","doi-asserted-by":"publisher","unstructured":"Mackenzie, C. (2014). Three dimensions of autonomy: A relational analysis. In A. Veltman, & M. Piper (Eds.), Autonomy, oppression, and gender (pp. 15\u201341). Oxford University Press. https:\/\/doi.org\/10.1093\/acprof:oso\/9780199969104.003.0002","DOI":"10.1093\/acprof:oso\/9780199969104.003.0002"},{"key":"567_CR56","doi-asserted-by":"publisher","unstructured":"Marchegiani, B. (2025). Anthropomorphism, false beliefs, and conversational: How chatbots undermine users\u2019 autonomy. Journal of Applied Philosophy, japp.70008. https:\/\/doi.org\/10.1111\/japp.70008","DOI":"10.1111\/japp.70008"},{"key":"567_CR57","doi-asserted-by":"crossref","unstructured":"Matheson, J., & Lougheed, K. (2021). Introduction: Puzzles concerning epistemic autonomy. In Epistemic autonomy. Routledge.","DOI":"10.4324\/9781003003465"},{"issue":"1","key":"567_CR58","doi-asserted-by":"publisher","first-page":"4692","DOI":"10.1038\/s41598-024-53755-0","volume":"14","author":"SC Matz","year":"2024","unstructured":"Matz, S. C., Teeny, J. D., Vaid, S. S., Peters, H., Harari, G. M., & Cerf, M. (2024). The potential of generative AI for personalized persuasion at scale. Scientific Reports, 14(1), 4692. https:\/\/doi.org\/10.1038\/s41598-024-53755-0","journal-title":"Scientific Reports"},{"issue":"4","key":"567_CR59","doi-asserted-by":"publisher","first-page":"1085","DOI":"10.1007\/s43681-023-00289-2","volume":"4","author":"J M\u00f6kander","year":"2024","unstructured":"M\u00f6kander, J., Schuett, J., Kirk, H. R., & Floridi, L. (2024). Auditing large language models: A three-layered approach. AI and Ethics, 4(4), 1085\u20131115. https:\/\/doi.org\/10.1007\/s43681-023-00289-2","journal-title":"AI and Ethics"},{"issue":"1","key":"567_CR60","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1007\/s11127-023-01097-2","volume":"198","author":"F Motoki","year":"2024","unstructured":"Motoki, F., Neto, P., V., & Rodrigues, V. (2024). More human than human: Measuring ChatGPT political bias. Public Choice, 198(1), 3\u201323. https:\/\/doi.org\/10.1007\/s11127-023-01097-2","journal-title":"Public Choice"},{"issue":"1","key":"567_CR61","doi-asserted-by":"publisher","first-page":"76","DOI":"10.1017\/S0963180123000464","volume":"33","author":"S Nyholm","year":"2024","unstructured":"Nyholm, S. (2024). Artificial intelligence and human enhancement: Can AI technologies make us more (artificially) intelligent? Cambridge Quarterly of Healthcare Ethics, 33(1), 76\u201388. https:\/\/doi.org\/10.1017\/S0963180123000464","journal-title":"Cambridge Quarterly of Healthcare Ethics"},{"issue":"1","key":"567_CR62","doi-asserted-by":"publisher","first-page":"225","DOI":"10.1038\/s41746-023-00965-x","volume":"6","author":"D Oniani","year":"2023","unstructured":"Oniani, D., Hilsman, J., Peng, Y., Poropatich, R. K., Pamplin, J. C., Legault, G. L., & Wang, Y. (2023). Adopting and expanding ethical principles for generative artificial intelligence from military to healthcare. Npj Digital Medicine, 6(1), 225. https:\/\/doi.org\/10.1038\/s41746-023-00965-x","journal-title":"Npj Digital Medicine"},{"issue":"1","key":"567_CR63","doi-asserted-by":"publisher","first-page":"67","DOI":"10.1037\/0033-3204.30.1.67","volume":"30","author":"JC Overholser","year":"1993","unstructured":"Overholser, J. C. (1993a). Elements of the Socratic method: I. Systematic questioning. Psychotherapy: Theory Research Practice Training, 30(1), 67\u201374. https:\/\/doi.org\/10.1037\/0033-3204.30.1.67","journal-title":"Psychotherapy: Theory Research Practice Training"},{"issue":"1","key":"567_CR64","doi-asserted-by":"publisher","first-page":"75","DOI":"10.1037\/0033-3204.30.1.75","volume":"30","author":"JC Overholser","year":"1993","unstructured":"Overholser, J. C. (1993b). Elements of the Socratic method: II. Inductive reasoning. Psychotherapy: Theory Research Practice Training, 30(1), 75\u201385. https:\/\/doi.org\/10.1037\/0033-3204.30.1.75","journal-title":"Psychotherapy: Theory Research Practice Training"},{"issue":"2","key":"567_CR65","doi-asserted-by":"publisher","first-page":"283","DOI":"10.1037\/0033-3204.32.2.283","volume":"32","author":"JC Overholser","year":"1995","unstructured":"Overholser, J. C. (1995). Elements of the Socratic method: IV. Disavowal of knowledge. Psychotherapy: Theory Research Practice Training, 32(2), 283\u2013292. https:\/\/doi.org\/10.1037\/0033-3204.32.2.283","journal-title":"Psychotherapy: Theory Research Practice Training"},{"key":"567_CR66","unstructured":"Paul, R., & Elder, L. (2019). The thinker\u2019s guide to Socratic questioning. Rowman & Littlefield."},{"issue":"3","key":"567_CR67","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1007\/s11023-024-09665-1","volume":"34","author":"C Prunkl","year":"2024","unstructured":"Prunkl, C. (2024). Human autonomy at risk? An analysis of the challenges from AI. Minds and Machines, 34(3), 26. https:\/\/doi.org\/10.1007\/s11023-024-09665-1","journal-title":"Minds and Machines"},{"key":"567_CR68","doi-asserted-by":"publisher","first-page":"103301","DOI":"10.1016\/j.ijhcs.2024.103301","volume":"189","author":"M Raees","year":"2024","unstructured":"Raees, M., Meijerink, I., Lykourentzou, I., Khan, V. J., & Papangelis, K. (2024). From explainable to interactive AI: A literature review on current trends in human-AI interaction. International Journal of Human-Computer Studies, 189, 103301. https:\/\/doi.org\/10.1016\/j.ijhcs.2024.103301","journal-title":"International Journal of Human-Computer Studies"},{"issue":"10","key":"567_CR69","doi-asserted-by":"publisher","first-page":"17","DOI":"10.1080\/15265161.2023.2233358","volume":"23","author":"V Rahimzadeh","year":"2023","unstructured":"Rahimzadeh, V., Kostick-Quenet, K., Barby, B., J., Amy, L., & McGuire (2023). Ethics education for healthcare professionals in the era of ChatGPT and other large language models: Do we still need it? The American Journal of Bioethics, 23(10), 17\u201327. https:\/\/doi.org\/10.1080\/15265161.2023.2233358","journal-title":"The American Journal of Bioethics"},{"issue":"4","key":"567_CR70","doi-asserted-by":"publisher","first-page":"1377","DOI":"10.1007\/s43681-023-00320-6","volume":"4","author":"S Robbins","year":"2024","unstructured":"Robbins, S. (2024). The many meanings of meaningful human control. AI and Ethics, 4(4), 1377\u20131388. https:\/\/doi.org\/10.1007\/s43681-023-00320-6","journal-title":"AI and Ethics"},{"issue":"3","key":"567_CR71","doi-asserted-by":"publisher","first-page":"235","DOI":"10.1007\/s10730-017-9344-1","volume":"30","author":"JTF Roberts","year":"2018","unstructured":"Roberts, J. T. F. (2018). Autonomy, competence and non-interference. Hec Forum, 30(3), 235\u2013252. https:\/\/doi.org\/10.1007\/s10730-017-9344-1","journal-title":"Hec Forum"},{"key":"567_CR72","doi-asserted-by":"publisher","first-page":"110273","DOI":"10.1016\/j.knosys.2023.110273","volume":"263","author":"W Saeed","year":"2023","unstructured":"Saeed, W., & Omlin, C. (2023). Explainable AI (XAI): A systematic meta-survey of current challenges and future opportunities. Knowledge-Based Systems, 263, 110273. https:\/\/doi.org\/10.1016\/j.knosys.2023.110273","journal-title":"Knowledge-Based Systems"},{"issue":"8","key":"567_CR73","doi-asserted-by":"publisher","first-page":"1645","DOI":"10.1038\/s41562-025-02194-6","volume":"9","author":"F Salvi","year":"2025","unstructured":"Salvi, F., Horta Ribeiro, M., Gallotti, R., & West, R. (2025). On the conversational persuasiveness of GPT-4. Nature Human Behaviour, 9(8), 1645\u20131653. https:\/\/doi.org\/10.1038\/s41562-025-02194-6","journal-title":"Nature Human Behaviour"},{"issue":"4","key":"567_CR74","doi-asserted-by":"publisher","first-page":"289","DOI":"10.1007\/s11017-009-9114-4","volume":"30","author":"L Sandman","year":"2009","unstructured":"Sandman, L., & Munthe, C. (2009). Shared decision-making and patient autonomy. Theoretical Medicine and Bioethics, 30(4), 289\u2013310. https:\/\/doi.org\/10.1007\/s11017-009-9114-4","journal-title":"Theoretical Medicine and Bioethics"},{"key":"567_CR75","doi-asserted-by":"publisher","unstructured":"Savulescu, J., & Maslen, H. (2015). Moral enhancement and artificial intelligence: Moral AI? In J. Romportl, E. Zackova, & J. Kelemen (Eds.), Beyond artificial intelligence: The disappearing human-machine divide (pp. 79\u201395). Springer. https:\/\/doi.org\/10.1007\/978-3-319-09668-1_6","DOI":"10.1007\/978-3-319-09668-1_6"},{"key":"567_CR76","doi-asserted-by":"publisher","first-page":"1947","DOI":"10.1007\/s00146-023-01649-6","volume":"39","author":"G Schaap","year":"2024","unstructured":"Schaap, G., Bosse, T., & Hendriks Vettehen, P. (2024). The ABC of algorithmic aversion: Not agent, but benefits and control determine the acceptance of automated decision-making. AI & SOCIETY, 39, 1947\u20131960. https:\/\/doi.org\/10.1007\/s00146-023-01649-6","journal-title":"AI & SOCIETY"},{"issue":"1","key":"567_CR77","doi-asserted-by":"publisher","first-page":"73","DOI":"10.1007\/s12152-016-9258-7","volume":"12","author":"GO Schaefer","year":"2019","unstructured":"Schaefer, G. O., & Savulescu, J. (2019). Procedural moral enhancement. Neuroethics, 12(1), 73\u201384. https:\/\/doi.org\/10.1007\/s12152-016-9258-7","journal-title":"Neuroethics"},{"issue":"5","key":"567_CR78","doi-asserted-by":"publisher","first-page":"570","DOI":"10.1080\/02691728.2025.2500030","volume":"39","author":"S Schneider","year":"2025","unstructured":"Schneider, S. (2025). Chatbot epistemology. Social Epistemology, 39(5), 570\u2013589. https:\/\/doi.org\/10.1080\/02691728.2025.2500030","journal-title":"Social Epistemology"},{"key":"567_CR79","doi-asserted-by":"publisher","unstructured":"Sharma, M., Tong, M., Korbak, T., Duvenaud, D., Askell, A., Bowman, S. R., Cheng, N., Durmus, E., Hatfield-Dodds, Z., Johnston, S. R., Kravec, S., Maxwell, T., McCandlish, S., Ndousse, K., Rausch, O., Schiefer, N., Yan, D., Zhang, M., & Perez, E. (2025). Towards understanding sycophancy in language models (No. arXiv:2310.13548). arXiv. https:\/\/doi.org\/10.48550\/arXiv.2310.13548","DOI":"10.48550\/arXiv.2310.13548"},{"issue":"17","key":"567_CR80","doi-asserted-by":"publisher","first-page":"4853","DOI":"10.1080\/10447318.2023.2225931","volume":"40","author":"AJG Sison","year":"2024","unstructured":"Sison, A. J. G., Daza, M. T., Gozalo-Brizuela, R., & Garrido-Merch\u00e1n, E. C. (2024). ChatGPT: More than a weapon of mass deception ethical challenges and responses from the human-centered artificial intelligence (HCAI) perspective. International Journal of Human\u2013Computer Interaction, 40(17), 4853\u20134872. https:\/\/doi.org\/10.1080\/10447318.2023.2225931","journal-title":"International Journal of Human\u2013Computer Interaction"},{"key":"567_CR81","doi-asserted-by":"publisher","unstructured":"Sokoloff, W. W. (2020). Against the Socratic method. In W. W. Sokoloff (Ed.), Political science pedagogy: A critical, radical and utopian perspective (pp. 51\u201368). Springer. https:\/\/doi.org\/10.1007\/978-3-030-23831-5_3","DOI":"10.1007\/978-3-030-23831-5_3"},{"key":"567_CR82","doi-asserted-by":"publisher","first-page":"23","DOI":"10.1007\/s00146-021-01148-6","volume":"37","author":"BC Stahl","year":"2022","unstructured":"Stahl, B. C., Antoniou, J., Ryan, M., Macnish, K., & Jiya, T. (2022). Organisational responses to the ethical issues of artificial intelligence. AI & SOCIETY, 37, 23\u201337. https:\/\/doi.org\/10.1007\/s00146-021-01148-6","journal-title":"AI & SOCIETY"},{"issue":"2","key":"567_CR83","doi-asserted-by":"publisher","first-page":"204","DOI":"10.1177\/0306312718772094","volume":"48","author":"L Stark","year":"2018","unstructured":"Stark, L. (2018). Algorithmic psychometrics and the scalable subject. Social Studies of Science, 48(2), 204\u2013231. https:\/\/doi.org\/10.1177\/0306312718772094","journal-title":"Social Studies of Science"},{"issue":"1","key":"567_CR84","doi-asserted-by":"publisher","first-page":"20220155","DOI":"10.1515\/opis-2022-0155","volume":"7","author":"J Steinerov\u00e1","year":"2023","unstructured":"Steinerov\u00e1, J. (2023). Ethical issues of human information behaviour and human information interactions. Open Information Science, 7(1), 20220155. https:\/\/doi.org\/10.1515\/opis-2022-0155","journal-title":"Open Information Science"},{"issue":"5","key":"567_CR85","doi-asserted-by":"publisher","first-page":"722","DOI":"10.1177\/17456916231181102","volume":"19","author":"M Steyvers","year":"2024","unstructured":"Steyvers, M., & Kumar, A. (2024). Three challenges for AI-assisted decision-making. Perspectives on Psychological Science, 19(5), 722\u2013734. https:\/\/doi.org\/10.1177\/17456916231181102","journal-title":"Perspectives on Psychological Science"},{"issue":"9","key":"567_CR86","doi-asserted-by":"publisher","first-page":"1092","DOI":"10.1007\/s11606-016-3722-2","volume":"31","author":"HA Stoddard","year":"2016","unstructured":"Stoddard, H. A., & O\u2019Dell, D. V. (2016). Would Socrates have actually used the Socratic method for clinical teaching? Journal of General Internal Medicine, 31(9), 1092\u20131096. https:\/\/doi.org\/10.1007\/s11606-016-3722-2","journal-title":"Journal of General Internal Medicine"},{"key":"567_CR87","doi-asserted-by":"publisher","DOI":"10.1007\/s11098-024-02259-8","author":"C Tarsney","year":"2025","unstructured":"Tarsney, C. (2025). Deception and manipulation in generative AI. Philosophical Studies. https:\/\/doi.org\/10.1007\/s11098-024-02259-8","journal-title":"Philosophical Studies"},{"issue":"3","key":"567_CR88","doi-asserted-by":"publisher","first-page":"22","DOI":"10.1007\/s11948-024-00479-z","volume":"30","author":"S Tiribelli","year":"2024","unstructured":"Tiribelli, S., & Calvaresi, D. (2024). Rethinking health recommender systems for active aging: An autonomy-based ethical analysis. Science and Engineering Ethics, 30(3), 22. https:\/\/doi.org\/10.1007\/s11948-024-00479-z","journal-title":"Science and Engineering Ethics"},{"key":"567_CR89","doi-asserted-by":"publisher","DOI":"10.31219\/osf.io\/grh4b","author":"H Underwood","year":"2024","unstructured":"Underwood, H., & Fenwick, Z. (2024). Implementing an automated Socratic method to reduce hallucinations in large language models. OSF. https:\/\/doi.org\/10.31219\/osf.io\/grh4b","journal-title":"OSF"},{"issue":"4","key":"567_CR90","doi-asserted-by":"publisher","first-page":"88","DOI":"10.1007\/s13347-022-00577-5","volume":"35","author":"B Vaassen","year":"2022","unstructured":"Vaassen, B. (2022). AI, opacity, and personal autonomy. Philosophy & Technology, 35(4), 88. https:\/\/doi.org\/10.1007\/s13347-022-00577-5","journal-title":"Philosophy & Technology"},{"key":"567_CR91","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1007\/s11948-023-00428-2","volume":"29","author":"R Volkman","year":"2023","unstructured":"Volkman, R., & Gabriels, K. (2023). AI moral enhancement: Upgrading the socio-technical system of moral engagement. Science and Engineering Ethics, 29, 11. https:\/\/doi.org\/10.1007\/s11948-023-00428-2","journal-title":"Science and Engineering Ethics"},{"key":"567_CR92","doi-asserted-by":"publisher","unstructured":"Weidinger, L., Uesato, J., Rauh, M., Griffin, C., Huang, P. S., Mellor, J., Glaese, A., Cheng, M., Balle, B., Kasirzadeh, A., Biles, C., Brown, S., Kenton, Z., Hawkins, W., Stepleton, T., Birhane, A., Hendricks, L. A., Rimell, L., Isaac, W., et al. (2022). Taxonomy of risks posed by language models. In Proceedings of the 2022 ACM conference on fairness accountability and transparency (pp. 214\u2013229). https:\/\/doi.org\/10.1145\/3531146.3533088","DOI":"10.1145\/3531146.3533088"},{"key":"567_CR93","doi-asserted-by":"publisher","unstructured":"Williams, R. T. (2024). The ethical implications of using generative chatbots in higher education. Frontiers in Education, 8. https:\/\/doi.org\/10.3389\/feduc.2023.1331607","DOI":"10.3389\/feduc.2023.1331607"},{"key":"567_CR94","doi-asserted-by":"publisher","first-page":"1382234","DOI":"10.3389\/fpsyg.2024.1382234","volume":"15","author":"Y Yamamoto","year":"2024","unstructured":"Yamamoto, Y. (2024). Suggestive answers strategy in human-chatbot interaction: A route to engaged critical decision making. Frontiers in Psychology, 15, 1382234. https:\/\/doi.org\/10.3389\/fpsyg.2024.1382234","journal-title":"Frontiers in Psychology"},{"issue":"1","key":"567_CR95","doi-asserted-by":"publisher","first-page":"28","DOI":"10.1186\/s40561-024-00316-7","volume":"11","author":"C Zhai","year":"2024","unstructured":"Zhai, C., Wibowo, S., & Li, L. D. (2024). The effects of over-reliance on AI dialogue systems on students\u2019 cognitive abilities: A systematic review. Smart Learning Environments, 11(1), 28. https:\/\/doi.org\/10.1186\/s40561-024-00316-7","journal-title":"Smart Learning Environments"},{"key":"567_CR96","doi-asserted-by":"publisher","first-page":"100978","DOI":"10.1016\/j.iheduc.2024.100978","volume":"65","author":"L Zhang","year":"2025","unstructured":"Zhang, L., & Xu, J. (2025). The paradox of self-efficacy and technological dependence: Unraveling generative impact on university students\u2019 task completion. The Internet and Higher Education, 65, 100978. https:\/\/doi.org\/10.1016\/j.iheduc.2024.100978","journal-title":"The Internet and Higher Education"},{"issue":"17","key":"567_CR97","doi-asserted-by":"publisher","first-page":"19724","DOI":"10.1609\/aaai.v38i17.29946","volume":"38","author":"W Zhong","year":"2024","unstructured":"Zhong, W., Guo, L., Gao, Q., Ye, H., & Wang, Y. (2024). MemoryBank: Enhancing large language models with long-term memory. Proceedings of the AAAI Conference on Artificial Intelligence, 38(17), 19724\u201319731. https:\/\/doi.org\/10.1609\/aaai.v38i17.29946","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence"},{"issue":"17","key":"567_CR98","doi-asserted-by":"publisher","first-page":"3417","DOI":"10.3390\/electronics13173417","volume":"13","author":"J Zhou","year":"2024","unstructured":"Zhou, J., M\u00fcller, H., Holzinger, A., & Chen, F. (2024). Ethical ChatGPT: Concerns, challenges, and commandments. Electronics, 13(17), 3417. https:\/\/doi.org\/10.3390\/electronics13173417","journal-title":"Electronics"},{"issue":"2","key":"567_CR99","doi-asserted-by":"publisher","first-page":"263","DOI":"10.1515\/icom-2024-0020","volume":"23","author":"J Ziegler","year":"2024","unstructured":"Ziegler, J., & Donkers, T. (2024). From explanations to human-AI co-evolution: Charting trajectories towards future user-centric AI. I-Com, 23(2), 263\u2013272. https:\/\/doi.org\/10.1515\/icom-2024-0020","journal-title":"I-Com"}],"container-title":["Science and Engineering Ethics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11948-025-00567-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11948-025-00567-8","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11948-025-00567-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,24]],"date-time":"2025-12-24T09:24:26Z","timestamp":1766568266000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11948-025-00567-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,26]]},"references-count":99,"journal-issue":{"issue":"6","published-online":{"date-parts":[[2025,12]]}},"alternative-id":["567"],"URL":"https:\/\/doi.org\/10.1007\/s11948-025-00567-8","relation":{},"ISSN":["1471-5546"],"issn-type":[{"value":"1471-5546","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,26]]},"assertion":[{"value":"20 April 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 October 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"26 November 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declared no potential conflicts of interest with respect to the research, authorship, and\/or publication of this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing Interests"}}],"article-number":"41"}}