{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T07:42:01Z","timestamp":1774510921359,"version":"3.50.1"},"reference-count":46,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T00:00:00Z","timestamp":1764028800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T00:00:00Z","timestamp":1764028800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Ethics Inf Technol"],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1007\/s10676-025-09881-y","type":"journal-article","created":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T05:46:33Z","timestamp":1764049593000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Examining popular arguments against AI existential risk: a philosophical analysis"],"prefix":"10.1007","volume":"28","author":[{"given":"Torben","family":"Swoboda","sequence":"first","affiliation":[]},{"given":"Risto","family":"Uuk","sequence":"additional","affiliation":[]},{"given":"Lode","family":"Lauwaert","sequence":"additional","affiliation":[]},{"given":"Andrew P.","family":"Rebera","sequence":"additional","affiliation":[]},{"given":"Ann-Katrien","family":"Oimann","sequence":"additional","affiliation":[]},{"given":"Bartlomiej","family":"Chomanski","sequence":"additional","affiliation":[]},{"given":"Carina","family":"Prunkl","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,25]]},"reference":[{"key":"9881_CR1","unstructured":"Allyn, B. (2024, September 29). California Gov. Newsom vetoes AI safety bill that divided Silicon Valley. NPR. https:\/\/www.npr.org\/2024\/09\/20\/nx-s1-5119792\/newsom-ai-bill-california-sb1047-tech"},{"key":"9881_CR2","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2303.03885","author":"VM Ambartsoumean","year":"2023","unstructured":"Ambartsoumean, V. M., & Yampolskiy, R. V. (2023). AI risk skepticism, a comprehensive survey. arXiv. https:\/\/doi.org\/10.48550\/arXiv.2303.03885","journal-title":"arXiv"},{"issue":"4","key":"9881_CR3","doi-asserted-by":"publisher","first-page":"111","DOI":"10.1353\/jod.2023.a907692","volume":"34","author":"Y Bengio","year":"2023","unstructured":"Bengio, Y. (2023). AI and catastrophic risk. Journal of Democracy, 34(4), 111\u2013121. https:\/\/doi.org\/10.1353\/jod.2023.a907692","journal-title":"Journal of Democracy"},{"issue":"6698","key":"9881_CR4","doi-asserted-by":"publisher","first-page":"842","DOI":"10.1126\/science.adn0117","volume":"384","author":"Y Bengio","year":"2024","unstructured":"Bengio, Y., Hinton, G., Yao, A., Song, D., Abbeel, P., Darrell, T., Harari, Y. N., Zhang, Y. Q., Xue, L., Shalev-Shwartz, S., Hadfield, G., Clune, J., Maharaj, T., Hutter, F., Baydin, A. G., McIlraith, S., Gao, Q., Acharya, A., Krueger, D., & Mindermann, S. (2024). Managing extreme AI risks amid rapid progress. Science, 384(6698), 842\u2013845. https:\/\/doi.org\/10.1126\/science.adn0117","journal-title":"Science"},{"key":"9881_CR5","volume-title":"Superintelligence: Paths, Dangers, Strategies","author":"N Bostrom","year":"2014","unstructured":"Bostrom, N. (2014). Superintelligence: Paths, Dangers, Strategies. Oxford University Press."},{"key":"9881_CR6","unstructured":"Brundage, M., Avin, S., Clark, J., Toner, H., Eckersley, P., Garfinkel, B., Dafoe, A., Scharre, P., Zeitzoff, T., Filar, B., Anderson, H., Roff, H., Allen, G. C., Steinhardt, J., Flynn, C., H\u00c9igeartaigh, S. \u00d3., Beard, S., Belfield, H., Farquhar, S., & Amodei, D. (2018). The Malicious Use of Artificial Intelligence: Forecasting, Prevention, and Mitigation. arXiv. https:\/\/doi.org\/arXiv: 1802.07228"},{"key":"9881_CR7","unstructured":"Buolamwini, J. (2023). Unmasking AI: My mission to protect what is human in a world of machines. Random House."},{"key":"9881_CR8","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2206.13353","author":"J Carlsmith","year":"2024","unstructured":"Carlsmith, J. (2024). Is power-seeking AI an existential risk? arXiv. https:\/\/doi.org\/10.48550\/arXiv.2206.13353","journal-title":"arXiv"},{"key":"9881_CR9","doi-asserted-by":"publisher","DOI":"10.1007\/s00146-024-01930-2","author":"L Dung","year":"2024","unstructured":"Dung, L. (2024). The argument for near-term human disempowerment through AI. AI and Society. https:\/\/doi.org\/10.1007\/s00146-024-01930-2","journal-title":"AI and Society"},{"key":"9881_CR10","unstructured":"Elias, J. (2024, February 28). Google CEO tells employees Gemini AI blunder is \u2018unacceptable\u2019. NBC News. https:\/\/www.nbcnews.com\/tech\/tech-news\/google-ceo-tells-employees-gemini-ai-blunder-unacceptable-rcna140926"},{"key":"9881_CR11","unstructured":"Future of Life Institute (2023, March 22). Pause Giant AI Experiments: An Open Letter. Future of Life Institute. https:\/\/futureoflife.org\/open-letter\/pause-giant-ai-experiments\/"},{"key":"9881_CR12","doi-asserted-by":"publisher","unstructured":"Ganguli, D., Hernandez, D., Lovitt, L., Askell, A., Bai, Y., Chen, A., Conerly, T., Dassarma, N., Drain, D., Elhage, N., El Showk, S., Fort, S., Hatfield-Dodds, Z., Henighan, T., Johnston, S., Jones, A., Joseph, N., Kernian, J., Kravec, S., & Clark, J. (2022). Predictability and surprise in large generative models. Proceedings of the 2022 ACM Conference on Fairness Accountability and Transparency, 1747-1764. https:\/\/doi.org\/10.1145\/3531146.3533229","DOI":"10.1145\/3531146.3533229"},{"key":"9881_CR13","doi-asserted-by":"crossref","unstructured":"Good, I. J. (1965). Speculations Concerning the First Ultraintelligent Machine. In F. Alt & M. Ruminoff (Eds.), Advances in Computers, volume 6. Academic Press.","DOI":"10.1016\/S0065-2458(08)60418-0"},{"key":"9881_CR14","unstructured":"Grunewald, E. (2023, December 21). Attention on Existential Risk from AI Likely Hasn\u2019t Distracted from Current Harms from AI. Erich Grunewald\u2019s Blog. https:\/\/www.erichgrunewald.com\/posts\/attention-on-existential-risk-from-ai-likely-hasnt-distracted-from-current-harms-from-ai\/"},{"key":"9881_CR15","unstructured":"Heaven, W. D. (2023, May 2). Geoffrey Hinton tells us why he\u2019s now scared of the tech he helped build. MIT Technology Review. https:\/\/www.technologyreview.com\/2023\/05\/02\/1072528\/geoffrey-hinton-google-why-scared-ai\/"},{"key":"9881_CR16","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2306.12001","author":"D Hendrycks","year":"2023","unstructured":"Hendrycks, D., Mazeika, M., & Woodside, T. (2023). An overview of catastrophic AI risks. arXiv. https:\/\/doi.org\/10.48550\/arXiv.2306.12001","journal-title":"arXiv"},{"issue":"2","key":"9881_CR17","doi-asserted-by":"publisher","DOI":"10.1007\/s13347-023-00606-x","volume":"36","author":"A Kasirzadeh","year":"2023","unstructured":"Kasirzadeh, A., & Gabriel, I. (2023). In conversation with artificial intelligence: Aligning language models with human values. Philosophy & Technology, 36(2), Article 27. https:\/\/doi.org\/10.1007\/s13347-023-00606-x","journal-title":"Philosophy & Technology"},{"key":"9881_CR18","unstructured":"Lee, W. (2024, September 17). Gov. Newsom signs AI-related bills regulating Hollywood actor replicas and deep fakes. Los Angeles Times. https:\/\/www.latimes.com\/entertainment-arts\/business\/story\/2024-09-17\/newsom-ai-bills-sag-aftra"},{"key":"9881_CR19","unstructured":"Levin, P. L. (2024, January 24). The real issue with artificial intelligence: The misalignment problem. The Hill. https:\/\/thehill.com\/opinion\/4427702-the-real-issue-with-artificial-intelligence-the-misalignment-problem\/"},{"key":"9881_CR20","unstructured":"Manancourt, V., & Bristow, T. (2024, September 20). Meta\u2019s Nick Clegg tears into Rishi Sunak\u2019s AI doomerism. POLITICO. https:\/\/www.politico.eu\/article\/meta-nick-clegg-tears-rishi-sunak-ai-doomerism-ai-summit-national-security\/"},{"key":"9881_CR21","unstructured":"Maslej, N., Fattorini, L., Perrault, R., Parli, V., Reuel, A., Brynjolfsson, E., Etchemendy, J., Ligett, K., Lyons, T., Manyika, J., Niebles, J. C., Shoham, Y., Wald, R., & Clark, J. (2024). The AI Index 2024 Annual Report. AI Index Steering Committee, Institute for Human-Centered AI, Stanford University, Stanford, CA. https:\/\/aiindex.stanford.edu\/report\/"},{"issue":"5","key":"9881_CR22","doi-asserted-by":"publisher","first-page":"649","DOI":"10.1080\/0952813X.2021.1964003","volume":"35","author":"S McLean","year":"2023","unstructured":"McLean, S., Read, G. J. M., Thompson, J., Baber, C., Stanton, N. A., & Salmon, P. M. (2023). The risks associated with artificial general intelligence: A systematic review. Journal of Experimental and Theoretical Artificial Intelligence, 35(5), 649\u2013663. https:\/\/doi.org\/10.1080\/0952813X.2021.1964003","journal-title":"Journal of Experimental and Theoretical Artificial Intelligence"},{"key":"9881_CR23","unstructured":"Milmo, D. (2023a, October 24). AI risk must be treated as seriously as climate crisis, says Google DeepMind chief. The Guardian. https:\/\/www.theguardian.com\/technology\/2023\/oct\/24\/ai-risk-climate-crisis-google-deepmind-chief-demis-hassabis-regulation"},{"key":"9881_CR24","unstructured":"Milmo, D. (2023b, October 29). AI doomsday warnings a distraction from the danger it already poses, warns expert. The Guardian. https:\/\/www.theguardian.com\/technology\/2023\/oct\/29\/ai-doomsday-warnings-a-distraction-from-the-danger-it-already-poses-warns-expert"},{"issue":"1","key":"9881_CR25","doi-asserted-by":"publisher","first-page":"25","DOI":"10.1111\/rati.12320","volume":"35","author":"VC M\u00fcller","year":"2022","unstructured":"M\u00fcller, V. C., & Cannon, M. (2022). Existential risk from AI and orthogonality: Can we have it both ways ? Ratio, 35(1), 25\u201336. https:\/\/doi.org\/10.1111\/rati.12320","journal-title":"Ratio"},{"key":"9881_CR26","unstructured":"Nolan, B. (2023, May 25). Ex-Google CEO Eric Schmidt says AI poses an \u2018existential risk\u2019 that could kill or harm \u2018many, many people\u2019. Business Insider. https:\/\/www.businessinsider.com\/google-eric-schmidt-ai-poses-an-existential-risk-kill-people-2023-5"},{"key":"9881_CR27","doi-asserted-by":"publisher","unstructured":"Omohundro, S. M. (2008). The Basic AI Drives. Proceedings of the 2008 Conference on Artificial General Intelligence 2008: Proceedings of the First AGI Conference, 483\u2013492. https:\/\/doi.org\/10.5555\/1566174.1566226","DOI":"10.5555\/1566174.1566226"},{"key":"9881_CR28","unstructured":"Ord, T. (2020). The precipice: Existential risk and the future of humanity. Hachette Books."},{"key":"9881_CR29","unstructured":"Pascual, M. G. (2024, April 15). Melanie Mitchell: \u2018The big leap in artificial intelligence will come when it is inserted into robots that experience the world like a child\u2019. EL PA\u00cdS English. https:\/\/english.elpais.com\/technology\/2024-04-14\/melanie-mitchell-the-big-leap-in-artificial-intelligence-will-come-when-it-is-inserted-into-robots-that-experience-the-world-like-a-child.html"},{"key":"9881_CR30","unstructured":"Pinker, S. (2019). Enlightenment now: The case for reason, science, humanism and progress. Penguin Books."},{"key":"9881_CR31","unstructured":"Richards, B., Arcas, B. A., y, Lajoie, G., & Sridhar, D. (2023, July 18). The Illusion Of AI\u2019s Existential Risk. NOEMA. https:\/\/www.noemamag.com\/the-illusion-of-ais-existential-risk"},{"key":"9881_CR32","volume-title":"Human Compatible: Artificial Intelligence and the Problem of Control","author":"S Russell","year":"2019","unstructured":"Russell, S. (2019). Human Compatible: Artificial Intelligence and the Problem of Control. Penguin."},{"key":"9881_CR33","unstructured":"Ryan-Mosley, T. (2023, June 12). It\u2019s time to talk about the real AI risks. MIT Technology Review. https:\/\/www.technologyreview.com\/2023\/06\/12\/1074449\/real-ai-risks\/"},{"key":"9881_CR34","unstructured":"Sandbrink, J. B. (2023). Artificial intelligence and biological misuse: Differentiating risks of language models and biological design tools. arXiv . https:\/\/arxiv.org\/abs\/2306.13952"},{"key":"9881_CR35","unstructured":"Science Media Centre (2023, May 30). Expert reaction to a statement on the existential threat of AI published on the Centre for AI Safety website. https:\/\/www.sciencemediacentre.org\/expert-reaction-to-a-statement-on-the-existential-threat-of-ai-published-on-the-centre-for-ai-safety-website\/"},{"key":"9881_CR36","doi-asserted-by":"publisher","unstructured":"Soice, E. H., Rocha, R., Cordova, K., Specter, M., & Esvelt, K. M. (2023). Can large language models democratize access to dual-use biotechnology? arXiv. https:\/\/doi.org\/10.48550\/arXiv.2306.03809","DOI":"10.48550\/arXiv.2306.03809"},{"key":"9881_CR37","volume-title":"Life 3.0: Being human in the age of artificial intelligence","author":"M Tegmark","year":"2018","unstructured":"Tegmark, M. (2018). Life 3.0: Being human in the age of artificial intelligence (First Vintage books edition). Vintage Books.","edition":"First Vintage b"},{"issue":"7967","key":"9881_CR38","doi-asserted-by":"publisher","first-page":"885","DOI":"10.1038\/d41586-023-02094-7","volume":"618","author":"The editorial board","year":"2023","unstructured":"The editorial board. (2023). Stop talking about tomorrow\u2019s AI doomsday when AI poses risks today. Nature, 618(7967), 885\u2013886. https:\/\/doi.org\/10.1038\/d41586-023-02094-7","journal-title":"Nature"},{"key":"9881_CR39","unstructured":"Thorstad, D. (2024, March 22). Harms (Part 1: Distraction). Reflective Altruism. https:\/\/reflectivealtruism.com\/2024\/03\/22\/harms-part-1-distraction\/"},{"key":"9881_CR40","unstructured":"Tung, L. (2016). Google Alphabet\u2019s Schmidt: Ignore Elon Musk\u2019s AI fears\u2014He\u2019s no computer scientist. ZDNET. https:\/\/www.zdnet.com\/article\/google-alphabets-schmidt-ignore-elon-musks-ai-fears-hes-no-computer-scientist\/"},{"issue":"3","key":"9881_CR41","doi-asserted-by":"publisher","first-page":"189","DOI":"10.1038\/s42256-022-00465-9","volume":"4","author":"F Urbina","year":"2022","unstructured":"Urbina, F., Lentzos, F., Invernizzi, C., & Ekins, S. (2022). Dual use of artificial-intelligence-powered drug discovery. Nature Machine Intelligence, 4(3), 189\u2013191. https:\/\/doi.org\/10.1038\/s42256-022-00465-9","journal-title":"Nature Machine Intelligence"},{"key":"9881_CR42","unstructured":"Verma, P., Zakrzewski, C., & Tiku, N. (2024, July 13). OpenAI illegally barred staff from airing safety risks, whistleblowers say. Washington Post. https:\/\/www.washingtonpost.com\/technology\/2024\/07\/13\/openai-safety-risks-whistleblower-sec\/"},{"key":"9881_CR43","unstructured":"Vermeer, M. J. D., Lathrop, E., & Moon, A. (2025). On the Extinction Risk from Artificial Intelligence. https:\/\/www.rand.org\/pubs\/research_reports\/RRA3034-1.html"},{"key":"9881_CR44","unstructured":"Wei, J., Tay, Y., Bommasani, R., Raffel, C., Zoph, B., Borgeaud, S., Yogatama, D., Bosma, M., Zhou, D., Metzler, D., Chi, E. H., Hashimoto, T., Vinyals, O., Liang, P., Dean, J., & Fedus, W. (2022). Emergent Abilities of Large Language Models. Transactions on Machine Learning Research. https:\/\/openreview.net\/forum?id=yzkSU5zdwD."},{"key":"9881_CR45","unstructured":"Wilmoth, P. (2024, March 11). Is AI an Existential Risk? Q&A with RAND Experts. RAND Research & Commentary. https:\/\/www.rand.org\/pubs\/commentary\/2024\/03\/is-ai-an-existential-risk-qa-with-rand-experts.html"},{"key":"9881_CR46","doi-asserted-by":"publisher","DOI":"10.1007\/s43681-024-00420-x","author":"RV Yampolskiy","year":"2024","unstructured":"Yampolskiy, R. V. (2024). On monitorability of AI. AI and Ethics. https:\/\/doi.org\/10.1007\/s43681-024-00420-x","journal-title":"AI and Ethics"}],"container-title":["Ethics and Information Technology"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10676-025-09881-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10676-025-09881-y","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10676-025-09881-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T05:51:01Z","timestamp":1774504261000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10676-025-09881-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,25]]},"references-count":46,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,3]]}},"alternative-id":["9881"],"URL":"https:\/\/doi.org\/10.1007\/s10676-025-09881-y","relation":{},"ISSN":["1388-1957","1572-8439"],"issn-type":[{"value":"1388-1957","type":"print"},{"value":"1572-8439","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,25]]},"assertion":[{"value":"25 November 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}},{"value":"No funding was received for conducting this study.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"7"}}