{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T07:22:33Z","timestamp":1774596153059,"version":"3.50.1"},"reference-count":282,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,2,18]],"date-time":"2026-02-18T00:00:00Z","timestamp":1771372800000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/100018189","name":"Center for Big Data Analytics, University of Texas at Austin","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100018189","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100006013","name":"United Arab Emirates University","doi-asserted-by":"publisher","award":["12R316"],"award-info":[{"award-number":["12R316"]}],"id":[{"id":"10.13039\/501100006013","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Information Fusion"],"published-print":{"date-parts":[[2026,8]]},"DOI":"10.1016\/j.inffus.2026.104241","type":"journal-article","created":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T08:13:43Z","timestamp":1771488823000},"page":"104241","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Security and privacy in LLMs: A comprehensive survey of threats and mitigation strategies"],"prefix":"10.1016","volume":"132","author":[{"given":"Aymen Dia Eddine","family":"Berini","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7363-1466","authenticated-orcid":false,"given":"Norziana","family":"Jamil","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2182-6664","authenticated-orcid":false,"given":"Ala-Eddine","family":"Benrazek","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4725-8634","authenticated-orcid":false,"given":"Abderrahmane","family":"Lakas","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0946-1818","authenticated-orcid":false,"given":"Leila","family":"Ismail","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0632-3172","authenticated-orcid":false,"given":"Mohamed Amine","family":"Ferrag","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7479-7970","authenticated-orcid":false,"given":"Kwok-Yan","family":"Lam","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.inffus.2026.104241_bib0001","unstructured":"Z. Z. Chen, J. Ma, X. Zhang, N. Hao, A. Yan, A. Nourbakhsh, X. Yang, J. McAuley, L. Petzold, W. Y. Wang, A survey on large language models for critical societal domains: finance, healthcare, and law, arXiv preprint arXiv: 2405.01769(2024a)."},{"key":"10.1016\/j.inffus.2026.104241_bib0002","doi-asserted-by":"crossref","DOI":"10.1016\/j.cose.2024.104016","article-title":"A survey of large language models for cyber threat detection","volume":"145","author":"Chen","year":"2024","journal-title":"Comput. Secur."},{"key":"10.1016\/j.inffus.2026.104241_bib0003","series-title":"IEEE CCWC","first-page":"1","article-title":"Privacy and security challenges in large language models","author":"Rathod","year":"2025"},{"key":"10.1016\/j.inffus.2026.104241_bib0004","unstructured":"H. Debar, S. Dietrich, P. Laskov, E.C. Lupu, E. Ntoutsi, Emerging security challenges of large language models, preprint arXiv:arXiv: 2412.17614(2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0005","doi-asserted-by":"crossref","unstructured":"A. Purpura, S. Wadhwa, J. Zymet, A. Gupta, A. Luo, M.K. Rad, S. Shinde, M.S. Sorower, Building safe GenAI applications: an end-to-end overview of red teaming for large language models, arXiv preprint arXiv: 2503.01742(2025).","DOI":"10.18653\/v1\/2025.trustnlp-main.23"},{"key":"10.1016\/j.inffus.2026.104241_bib0006","doi-asserted-by":"crossref","unstructured":"K. Chen, X. Zhou, Y. Lin, S. Feng, L. Shen, P. Wu, A survey on privacy risks and protection in large language models, arXiv preprint arXiv: 2505.01976(2025).","DOI":"10.1007\/s44443-025-00177-1"},{"key":"10.1016\/j.inffus.2026.104241_bib0007","series-title":"2024 IEEE International Conferences on Internet of Things (iThings) and IEEE Green Computing & Communications (GreenCom) and IEEE Cyber, Physical & Social Computing (CPSCom) and IEEE Smart Data (SmartData) and IEEE Congress on Cybermatics","first-page":"543","article-title":"On the security and privacy implications of large language models: in-depth threat analysis","author":"Ruhl\u00e4nder","year":"2024"},{"issue":"2","key":"10.1016\/j.inffus.2026.104241_bib0008","doi-asserted-by":"crossref","DOI":"10.1016\/j.hcc.2024.100211","article-title":"A survey on large language model (LLM) security and privacy: the good, the bad, and the ugly","volume":"4","author":"Yao","year":"2024","journal-title":"High Confid. Comput."},{"key":"10.1016\/j.inffus.2026.104241_bib0009","doi-asserted-by":"crossref","unstructured":"A.S. Inamdar, S. Eswaran, A comprehensive review of security and privacy issues in large language models, SSRN Electronic Journal (2024) 10.2139\/ssrn.4825655.","DOI":"10.2139\/ssrn.4825655"},{"key":"10.1016\/j.inffus.2026.104241_bib0010","unstructured":"S. Wang, T. Zhu, B. Liu, M. Ding, X. Guo, D. Ye, W. Zhou, P.S. Yu, Unique security and privacy threats of large language model: A comprehensive survey, arXiv preprint arXiv: 2406.07973(2024)."},{"issue":"4","key":"10.1016\/j.inffus.2026.104241_bib0011","article-title":"Privacy in large language models: attacks, defenses and future directions","volume":"37","author":"Li","year":"2024","journal-title":"J. ACM"},{"issue":"7","key":"10.1016\/j.inffus.2026.104241_bib0012","doi-asserted-by":"crossref","first-page":"175","DOI":"10.1007\/s10462-024-10824-0","article-title":"A survey of safety and trustworthiness of large language models through the lens of verification and validation","volume":"57","author":"Huang","year":"2024","journal-title":"Artif. Intell. Rev."},{"key":"10.1016\/j.inffus.2026.104241_bib0013","doi-asserted-by":"crossref","unstructured":"Y. Dong, R. Mu, Y. Zhang, S. Sun, T. Zhang, C. Wu, G. Jin, Y. Qi, J. Hu, J. Meng, et al., Safeguarding large language models: a survey, arXiv preprint arXiv: 2406.02622(2024).","DOI":"10.1007\/s10462-025-11389-2"},{"key":"10.1016\/j.inffus.2026.104241_bib0014","unstructured":"Y. Liu, Y. Yao, J.-F. Ton, X. Zhang, R. Guo, H. Cheng, Y. Klochkov, M.F. Taufiq, H. Li, Trustworthy LLMS: a survey and guideline for evaluating large language models\u2019 alignment, arXiv preprint arXiv: 2308.05374(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0015","series-title":"Proceedings of the 2022 ACM Conference on Fairness, Accountability, and Transparency","first-page":"214","article-title":"Taxonomy of risks posed by language models","author":"Weidinger","year":"2022"},{"key":"10.1016\/j.inffus.2026.104241_bib0016","unstructured":"E. Shayegani, et al., Survey of vulnerabilities in large language models revealed by adversarial attacks, arXiv preprint arXiv: 2310.10844(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0017","unstructured":"S. Abdali, R. Anarfi, C.J. Barberan, J. He, Securing large language models: threats, vulnerabilities and responsible practices, arXiv preprint arXiv: 2403.12503(2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0018","doi-asserted-by":"crossref","first-page":"5799","DOI":"10.1109\/OJCOMS.2024.3456549","article-title":"LLM-based edge intelligence: a comprehensive survey on architectures, applications, security and trustworthiness","volume":"5","author":"Friha","year":"2024","journal-title":"IEEE Open J. Commun. Soc."},{"key":"10.1016\/j.inffus.2026.104241_bib0019","unstructured":"A.G. Chowdhury, M.M. Islam, V. Kumar, F.H. Shezan, V. Jain, A. Chadha, Breaking down the defenses: a comparative survey of attacks on large language models, arXiv: 2403.04786(2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0020","doi-asserted-by":"crossref","unstructured":"B. Yan, K. Li, M. Xu, Y. Dong, Y. Zhang, Z. Ren, X. Cheng, On protecting the data privacy of large language models (LLMs): a survey, arXiv: 2403.05156(2024).","DOI":"10.1109\/ICMC60390.2024.00008"},{"issue":"6","key":"10.1016\/j.inffus.2026.104241_bib0021","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3712001","article-title":"Security and privacy challenges of large language models: a survey","volume":"57","author":"Das","year":"2025","journal-title":"ACM Comput. Surv."},{"key":"10.1016\/j.inffus.2026.104241_bib0022","unstructured":"P. Kumar, S. Mishra, Robustness in large language models: a survey of mitigation strategies and evaluation metrics, arXiv: 2505.18658(2025)."},{"key":"10.1016\/j.inffus.2026.104241_bib0023","unstructured":"A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A.N. Gomez, L. Kaiser, I. Polosukhin, Attention Is all you need, 2023, https:\/\/arxiv.org\/abs\/1706.03762."},{"key":"10.1016\/j.inffus.2026.104241_bib0024","unstructured":"T.B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, S. Agarwal, A. Herbert-Voss, G. Krueger, T. Henighan, R. Child, A. Ramesh, D.M. Ziegler, J. Wu, C. Winter, C. Hesse, M. Chen, E. Sigler, M. Litwin, S. Gray, B. Chess, J. Clark, C. Berner, S. McCandlish, A. Radford, I. Sutskever, D. Amodei, Language models are few-shot learners, 2020, https:\/\/arxiv.org\/abs\/2005.14165."},{"key":"10.1016\/j.inffus.2026.104241_bib0025","series-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)","first-page":"4171","article-title":"BERT: pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2019"},{"issue":"8","key":"10.1016\/j.inffus.2026.104241_bib0026","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"10.1016\/j.inffus.2026.104241_bib0027","unstructured":"GPT-4 technical report, 2024, https:\/\/arxiv.org\/abs\/2303.08774."},{"key":"10.1016\/j.inffus.2026.104241_bib0028","unstructured":"H. Touvron, T. Lavril, G. Izacard, X. Martinet, M.-A. Lachaux, T. Lacroix, B. Rozi\u00e8re, N. Goyal, E. Hambro, F. Azhar, A. Rodriguez, A. Joulin, E. Grave, G. Lample, LLaMA: open and efficient foundation language models, 2023, https:\/\/arxiv.org\/abs\/2302.13971."},{"key":"10.1016\/j.inffus.2026.104241_bib0029","doi-asserted-by":"crossref","unstructured":"J. Hoffmann, S. Borgeaud, A. Mensch, E. Buchatskaya, T. Cai, E. Rutherford, D. de Las Casas, L.A. Hendricks, J. Welbl, A. Clark, T. Hennigan, E. Noland, K. Millican, G. van den Driessche, B. Damoc, A. Guy, S. Osindero, K. Simonyan, E. Elsen, J.W. Rae, O. Vinyals, L. Sifre, Training compute-optimal large language models, 2022, https:\/\/arxiv.org\/abs\/2203.15556.","DOI":"10.52202\/068431-2176"},{"key":"10.1016\/j.inffus.2026.104241_bib0030","unstructured":"R. Sennrich, B. Haddow, A. Birch, Neural machine translation of rare words with subword units, arXiv: 1508.07909(2015)."},{"key":"10.1016\/j.inffus.2026.104241_bib0031","doi-asserted-by":"crossref","unstructured":"T. Kudo, J. Richardson, Sentencepiece: a simple and language independent subword tokenizer and detokenizer for neural text processing, arXiv: 1808.06226(2018).","DOI":"10.18653\/v1\/D18-2012"},{"key":"10.1016\/j.inffus.2026.104241_bib0032","series-title":"Proceedings of the 33rd International Conference on Neural Information Processing Systems","first-page":"11","article-title":"XLNet: generalized autoregressive pretraining for language understanding","volume":"32","author":"Yang","year":"2019"},{"key":"10.1016\/j.inffus.2026.104241_bib0033","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0034","doi-asserted-by":"crossref","unstructured":"A. Wang, A. Singh, J. Michael, F. Hill, O. Levy, S.R. Bowman, GLUE: a multi-task benchmark and analysis platform for natural language understanding, arXiv: 1804.07461(2018).","DOI":"10.18653\/v1\/W18-5446"},{"key":"10.1016\/j.inffus.2026.104241_bib0035","unstructured":"D. Hendrycks, C. Burns, S. Basart, A. Zou, M. Mazeika, D. Song, J. Steinhardt, Measuring massive multitask language understanding, arXiv: 2009.03300(2020)."},{"key":"10.1016\/j.inffus.2026.104241_bib0036","unstructured":"P. Liang, R. Bommasani, T. Lee, D. Tsipras, D. Soylu, M. Yasunaga, Y. Zhang, D. Narayanan, Y. Wu, A. Kumar, et al., Holistic evaluation of language models, arXiv: 2211.09110(2022)."},{"issue":"7972","key":"10.1016\/j.inffus.2026.104241_bib0037","doi-asserted-by":"crossref","first-page":"172","DOI":"10.1038\/s41586-023-06291-2","article-title":"Large language models encode clinical knowledge","volume":"620","author":"Singhal","year":"2023","journal-title":"Nature"},{"issue":"6","key":"10.1016\/j.inffus.2026.104241_bib0038","doi-asserted-by":"crossref","DOI":"10.1093\/bib\/bbac409","article-title":"BioGPT: generative pre-trained transformer for biomedical text generation and mining","volume":"23","author":"Luo","year":"2022","journal-title":"Brief. Bioinform."},{"key":"10.1016\/j.inffus.2026.104241_bib0039","unstructured":"S. Wu, O. Irsoy, S. Lu, V. Dabravolski, M. Dredze, S. Gehrmann, P. Kambadur, D. Rosenberg, G. Mann, BloombergGPT: a large language model for finance, arXiv: 2303.17564(2023)."},{"issue":"1-9","key":"10.1016\/j.inffus.2026.104241_bib0040","first-page":"16","article-title":"Exploiting machine learning to subvert your spam filter","volume":"8","author":"Nelson","year":"2008","journal-title":"LEET"},{"key":"10.1016\/j.inffus.2026.104241_bib0041","series-title":"2024 IEEE Symposium on Security and Privacy (SP)","first-page":"407","article-title":"Poisoning web-scale training datasets is practical","author":"Carlini","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0042","series-title":"International Conference on Machine Learning","first-page":"35413","article-title":"Poisoning language models during instruction tuning","author":"Wan","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0043","first-page":"61836","article-title":"On the exploitability of instruction tuning","volume":"36","author":"Shu","year":"2023","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0044","series-title":"2021 IEEE European Symposium on Security and Privacy (EuroS&P)","first-page":"179","article-title":"Trojaning language models for fun and profit","author":"Zhang","year":"2021"},{"key":"10.1016\/j.inffus.2026.104241_bib0045","series-title":"2024 IEEE Symposium on Security and Privacy (SP)","first-page":"1122","article-title":"TrojanPuzzle: covertly poisoning code-suggestion models","author":"Aghakhani","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0046","series-title":"30th USENIX Security Symposium (USENIX Security 21)","first-page":"1559","article-title":"You autocomplete me: poisoning vulnerabilities in neural code completion","author":"Schuster","year":"2021"},{"key":"10.1016\/j.inffus.2026.104241_bib0047","series-title":"33rd USENIX Security Symposium (USENIX Security 24)","first-page":"1795","article-title":"An {LLM-assisted}{Easy-to-Trigger} backdoor attack on code completion models: injecting disguised vulnerabilities against strong detection","author":"Yan","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0048","doi-asserted-by":"crossref","unstructured":"C. Mansfield, A. Paullada, K. Howell, Behind the mask: demographic bias in name detection for PII masking, arXiv: 2205.04505(2022).","DOI":"10.18653\/v1\/2022.ltedi-1.10"},{"key":"10.1016\/j.inffus.2026.104241_bib0049","unstructured":"S. Zhao, Y. Yang, Z. Wang, Z. He, L.K. Qiu, L. Qiu, Retrieval augmented generation (RAG) and beyond: a comprehensive survey on how to make your LLMs use external data more wisely, arXiv: 2409.14924(2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0050","series-title":"Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining","first-page":"6491","article-title":"A survey on rag meeting LLMs: towards retrieval-augmented large language models","author":"Fan","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0051","doi-asserted-by":"crossref","first-page":"130185","DOI":"10.52202\/079017-4136","article-title":"AgentPoison: red-teaming LLM agents via poisoning memory or knowledge bases","volume":"37","author":"Chen","year":"2024","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0052","unstructured":"Y. Xu, J. Yao, M. Shu, Y. Sun, Z. Wu, N. Yu, T. Goldstein, F. Huang, ShadowCast: stealthy data poisoning attacks against vision-language models, arXiv: 2402.06659(2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0053","series-title":"2024 IEEE Symposium on Security and Privacy (SP)","first-page":"807","article-title":"NightShade: prompt-specific poisoning attacks on text-to-image generative models","author":"Shan","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0054","unstructured":"J. Frankenfield, How personally identifiable information (PII) works, 2023, https:\/\/www.investopedia.com\/terms\/p\/personally-identifiable-information-pii.asp."},{"key":"10.1016\/j.inffus.2026.104241_bib0055","series-title":"30th USENIX Security Symposium (USENIX Security 21)","first-page":"2633","article-title":"Extracting training data from large language models","author":"Carlini","year":"2021"},{"key":"10.1016\/j.inffus.2026.104241_bib0056","unstructured":"OpenAI, March 20, chatGPT outage: Here\u2019s what happened, 2023, https:\/\/openai.com\/index\/march-20-chatgpt-outage\/."},{"key":"10.1016\/j.inffus.2026.104241_bib0057","series-title":"IEEE Symposium on Security and Privacy (S&P)","first-page":"1","article-title":"Membership inference attacks against generative models","author":"Song","year":"2020"},{"key":"10.1016\/j.inffus.2026.104241_bib0058","series-title":"Proceedings of the 40th International Conference on Machine Learning (ICML)","first-page":"1","article-title":"Quantifying memorization across neural language models","author":"Carlini","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0059","series-title":"NDSS","first-page":"1","article-title":"Towards achieving user-level privacy in federated learning","author":"Song","year":"2021"},{"key":"10.1016\/j.inffus.2026.104241_bib0060","series-title":"2023 IEEE Symposium on Security and Privacy (SP)","first-page":"346","article-title":"Analyzing leakage of personally identifiable information in language models","author":"Lukas","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0061","series-title":"IEEE Symposium on Security and Privacy","first-page":"1","article-title":"Leakage and forgetting in language models","author":"Zanella-B\u00e9guelin","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0062","unstructured":"facebookresearch, GitHub - facebookresearch\/metaseq: repo for external large-scale work, 2024, https:\/\/github.com\/facebookresearch\/metaseq."},{"key":"10.1016\/j.inffus.2026.104241_bib0063","first-page":"20750","article-title":"ProPILE: probing privacy leakage in large language models","volume":"36","author":"Kim","year":"2023","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0064","unstructured":"Y. Nakamura, S. Hanaoka, Y. Nomura, N. Hayashi, O. Abe, S. Yada, S. Wakamiya, E. Aramaki, KART: parameterization of privacy leakage scenarios from pre-trained language models, arXiv: 2101.00036(2020)."},{"key":"10.1016\/j.inffus.2026.104241_bib0065","doi-asserted-by":"crossref","unstructured":"K.K. Nakka, A. Frikha, R. Mendes, X. Jiang, X. Zhou, PII-compass: guiding LLM training data extraction prompts towards the target PII via grounding, arXiv: 2407.02943(2024).","DOI":"10.18653\/v1\/2024.privatenlp-1.7"},{"key":"10.1016\/j.inffus.2026.104241_bib0066","doi-asserted-by":"crossref","unstructured":"J. Huang, H. Shao, K.C.-C. Chang, Are large pre-trained language models leaking your personal information?, arXiv: 2205.12628(2022).","DOI":"10.18653\/v1\/2022.findings-emnlp.148"},{"key":"10.1016\/j.inffus.2026.104241_bib0067","article-title":"An evaluation on large language model outputs: discourse and memorization","volume":"4","author":"De Wynter","year":"2023","journal-title":"Nat. Lang. Process. J."},{"key":"10.1016\/j.inffus.2026.104241_bib0068","first-page":"28072","article-title":"Emergent and predictable memorization in large language models","volume":"36","author":"Biderman","year":"2023","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0069","unstructured":"M. Nasr, N. Carlini, J. Hayase, M. Jagielski, A.F. Cooper, D. Ippolito, C.A. Choquette-Choo, E. Wallace, F. Tram\u00e8r, K. Lee, Scalable extraction of training data from (production) language models, arXiv: 2311.17035(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0070","series-title":"Proceedings of the 2016 ACM SIGSAC Conference on Computer and Communications Security","first-page":"308","article-title":"Deep learning with differential privacy","author":"Abadi","year":"2016"},{"key":"10.1016\/j.inffus.2026.104241_bib0071","unstructured":"J. Borkar, What can we learn from data leakage and unlearning for law?, arXiv: 2307.10476(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0072","unstructured":"L. Birch, W. Hackett, S. Trawicki, N. Suri, P. Garraghan, Model leeching: an extraction attack targeting LLMs, arXiv: 2309.10544(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0073","series-title":"25th USENIX Security Symposium (USENIX Security 16)","first-page":"601","article-title":"Stealing machine learning models via prediction {APIs}","author":"Tram\u00e8r","year":"2016"},{"key":"10.1016\/j.inffus.2026.104241_bib0074","doi-asserted-by":"crossref","unstructured":"E. Wallace, M. Stern, D. Song, Imitation attacks and defenses for black-box machine translation systems, arXiv: 2004.15015(2020).","DOI":"10.18653\/v1\/2020.emnlp-main.446"},{"key":"10.1016\/j.inffus.2026.104241_bib0075","doi-asserted-by":"crossref","unstructured":"W. Peng, J. Yi, F. Wu, S. Wu, B. Zhu, L. Lyu, B. Jiao, T. Xu, G. Sun, X. Xie, Are you copying my model? Protecting the copyright of large language models for eaas via backdoor watermark, arXiv: 2305.10036(2023).","DOI":"10.18653\/v1\/2023.acl-long.423"},{"key":"10.1016\/j.inffus.2026.104241_bib0076","unstructured":"W.M. Si, M. Backes, Y. Zhang, Mondrian: prompt abstraction attack against large language models for cheaper API pricing, arXiv: 2308.03558(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0077","series-title":"Proceedings of the 22nd ACM SIGSAC Conference on Computer and Communications Security","first-page":"1322","article-title":"Model inversion attacks that exploit confidence information and basic countermeasures","author":"Fredrikson","year":"2015"},{"key":"10.1016\/j.inffus.2026.104241_bib0078","series-title":"Advances in Neural Information Processing Systems","article-title":"Deep leakage from gradients","volume":"32","author":"Zhu","year":"2019"},{"key":"10.1016\/j.inffus.2026.104241_bib0079","series-title":"Advances in Neural Information Processing Systems","first-page":"16937","article-title":"Inverting gradients\u2013How easy is it to break privacy in federated learning?","volume":"33","author":"Geiping","year":"2020"},{"key":"10.1016\/j.inffus.2026.104241_bib0080","unstructured":"Y. Li, H. Huang, Y. Zhao, X. Ma, J. Sun, BackdoorLLM: a comprehensive benchmark for backdoor attacks on large language models, arXiv: 2408.12798(2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0081","unstructured":"W. Cai, et al., BadPrompt: backdoor attacks in continuous prompts, arXiv: 2302.12173(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0082","series-title":"NeurIPS","first-page":"1","article-title":"P-tuning V2: prompt tuning can be comparable to fine-tuning universally across scales and tasks","author":"Liu","year":"2021"},{"key":"10.1016\/j.inffus.2026.104241_bib0083","unstructured":"N. Zhang, L. Li, X. Chen, S. Deng, Z. Bi, C. Tan, F. Huang, H. Chen, Differentiable prompt makes pre-trained language models better few-shot learners, arXiv: 2108.13161(2021)."},{"key":"10.1016\/j.inffus.2026.104241_bib0084","doi-asserted-by":"crossref","unstructured":"S. Zhao, J. Wen, L.A. Tuan, J. Zhao, J. Fu, Prompt as triggers for backdoor attack: examining the vulnerability in language models, arXiv: 2305.01219(2023).","DOI":"10.18653\/v1\/2023.emnlp-main.757"},{"key":"10.1016\/j.inffus.2026.104241_sbref0092","first-page":"2835","article-title":"A Survey of Recent Backdoor Attacks and Defenses in Large Language Models","author":"Zhao","year":"2025","journal-title":"Transact. mach. learn. res."},{"key":"10.1016\/j.inffus.2026.104241_bib0086","doi-asserted-by":"crossref","unstructured":"W. You, Z. Hammoudeh, D. Lowd, Large language models are better adversaries: exploring generative clean-label backdoor attacks against text classifiers, arXiv: 2310.18603(2023).","DOI":"10.18653\/v1\/2023.findings-emnlp.833"},{"key":"10.1016\/j.inffus.2026.104241_bib0087","unstructured":"J. Shi, Y. Liu, P. Zhou, L. Sun, BadGPT: exploring security vulnerabilities of chatgpt via backdoor attacks to instructGPT, arXiv: 2304.12298(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0088","doi-asserted-by":"crossref","unstructured":"Y. Wang, D. Xue, S. Zhang, S. Qian, BadAgent: inserting and activating backdoor attacks in LLM agents, arXiv: 2406.03007(2024).","DOI":"10.18653\/v1\/2024.acl-long.530"},{"key":"10.1016\/j.inffus.2026.104241_bib0089","unstructured":"J. Li, Y. Yang, Z. Wu, V.G. Vydiswaran, C. Xiao, ChatGPT as an attack tool: stealthy textual backdoor attack via blackbox generative model trigger, arXiv: 2304.14475(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0090","unstructured":"H. Huang, Z. Zhao, M. Backes, Y. Shen, Y. Zhang, Composite backdoor attacks against large language models, arXiv: 2310.07676(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0091","unstructured":"H. Touvron, L. Martin, K. Stone, P. Albert, A. Almahairi, Y. Babaei, N. Bashlykov, S. Batra, P. Bhargava, S. Bhosale, et al., LLaMA 2: open foundation and fine-tuned chat models, arXiv: 2307.09288(2023)."},{"issue":"14","key":"10.1016\/j.inffus.2026.104241_bib0092","doi-asserted-by":"crossref","first-page":"2858","DOI":"10.3390\/electronics13142858","article-title":"Data stealing attacks against large language models via backdooring","volume":"13","author":"He","year":"2024","journal-title":"Electronics"},{"key":"10.1016\/j.inffus.2026.104241_bib0093","doi-asserted-by":"crossref","unstructured":"Z. Feng, et al., CodeBERT: a pre-trained model for programming and natural languages, arXiv: 2002.08155(2020).","DOI":"10.18653\/v1\/2020.findings-emnlp.139"},{"key":"10.1016\/j.inffus.2026.104241_bib0094","doi-asserted-by":"crossref","unstructured":"Y. Wang, W. Wang, S. Joty, S.C.H. Hoi, Codet5: identifier-aware unified pre-trained encoder-decoder models for code understanding and generation, arXiv: 2109.00859(2021).","DOI":"10.18653\/v1\/2021.emnlp-main.685"},{"issue":"4","key":"10.1016\/j.inffus.2026.104241_bib0095","doi-asserted-by":"crossref","first-page":"721","DOI":"10.1109\/TSE.2024.3361661","article-title":"Stealthy backdoor attack for code models","volume":"50","author":"Yang","year":"2024","journal-title":"IEEE Trans. Softw. Eng."},{"key":"10.1016\/j.inffus.2026.104241_bib0096","unstructured":"N. Kandpal, M. Jagielski, F. Tram\u00e8r, N. Carlini, Backdoor attacks for in-context learning with language models, arXiv: 2307.14692(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0097","series-title":"Proceedings of the ACM Web Conference 2023","first-page":"2198","article-title":"Training-free lexical backdoor attacks on language models","author":"Huang","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0098","unstructured":"Y. Li, T. Li, K. Chen, J. Zhang, S. Liu, W. Wang, T. Zhang, Y. Liu, Badedit: backdooring large language models by model editing, arXiv: 2403.13355(2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0099","unstructured":"A. Mittal, Prompt hacking and misuse of LLMs, 2023, (https:\/\/www.unite.ai\/prompt-hacking-and-misuse-of-llm\/). Accessed: 2025-05-27."},{"key":"10.1016\/j.inffus.2026.104241_bib0100","unstructured":"owasap, OWASP top 10 for large language model applications, 2023, https:\/\/owasp.org\/www-project-top-10-for-large-language-modelapplications\/. Accessed: 2025-05-27."},{"key":"10.1016\/j.inffus.2026.104241_bib0101","unstructured":"Owasp, OWASP top 10 for LLM applications 2025, 2025, https:\/\/genai.owasp.org\/resource\/owasp-top-10-for-llm-applications-2025\/. Accessed: 2025-05-27."},{"key":"10.1016\/j.inffus.2026.104241_bib0102","series-title":"Proceedings of the 16th ACM Workshop on Artificial Intelligence and Security","first-page":"79","article-title":"Not what you\u2019ve signed up for: compromising real-world LLM-integrated applications with indirect prompt injection","author":"Greshake","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0103","unstructured":"F. Perez, I. Ribeiro, Ignore previous prompt: attack techniques for language models, arXiv: 2211.09527(2022)."},{"key":"10.1016\/j.inffus.2026.104241_bib0104","unstructured":"Y. Liu, G. Deng, Y. Li, K. Wang, Z. Wang, X. Wang, T. Zhang, Y. Liu, H. Wang, Y. Zheng, et al., Prompt injection attack against LLM-integrated applications, arXiv: 2306.05499(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0105","series-title":"2024 IEEE Security and Privacy Workshops (SPW)","first-page":"132","article-title":"Exploiting programmatic behavior of LLMs: dual-use through standard security attacks","author":"Kang","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0106","unstructured":"B. Greshake Tzovaras, et al., More than you\u2019ve asked for: a comprehensive analysis of prompt injection attacks in LLM-augmented systems, arXiv: 2302.12173(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0107","unstructured":"J. Wang, Z. Liu, K.H. Park, Z. Jiang, Z. Zheng, Z. Wu, M. Chen, C. Xiao, Adversarial demonstration attacks on large language models, arXiv: 2305.14950(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0108","unstructured":"X. Liu, Z. Yu, Y. Zhang, N. Zhang, C. Xiao, Automatic and universal prompt injection attacks against large language models, arXiv: 2403.04957(2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0109","doi-asserted-by":"crossref","unstructured":"H. Li, D. Guo, W. Fan, M. Xu, J. Huang, F. Meng, Y. Song, Multi-step jailbreaking privacy attacks on chatgpt, arXiv: 2304.05197(2023).","DOI":"10.18653\/v1\/2023.findings-emnlp.272"},{"key":"10.1016\/j.inffus.2026.104241_bib0110","series-title":"Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security","first-page":"1671","article-title":"\u201c Do anything now\u201d: characterizing and evaluating in-the-wild jailbreak prompts on large language models","author":"Shen","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0111","unstructured":"A. Rao, S. Vashistha, A. Naik, S. Aditya, M. Choudhury, Tricking LLMs into disobedience: formalizing, analyzing, and detecting jailbreaks, arXiv: 2305.14965(2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0112","doi-asserted-by":"crossref","unstructured":"G. Deng, Y. Liu, Y. Li, K. Wang, Y. Zhang, Z. Li, H. Wang, T. Zhang, Y. Liu, MasterKey: automated jailbreak across multiple large language model chatbots, arXiv: 2307.08715(2023).","DOI":"10.14722\/ndss.2024.24188"},{"key":"10.1016\/j.inffus.2026.104241_bib0113","unstructured":"J. Yu, X. Lin, Z. Yu, X. Xing, GPTFUZZER: red teaming large language models with auto-generated jailbreak prompts, https:\/\/arxiv.org\/abs\/2309.10253. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0114","unstructured":"X. Liu, N. Xu, M. Chen, C. Xiao, AutoDAN: generating stealthy jailbreak prompts on aligned large language models, https:\/\/arxiv.org\/abs\/2310.04451. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0115","series-title":"33rd USENIX Security Symposium (USENIX Security 24)","first-page":"4711","article-title":"Making them ask and answer: jailbreaking large language models in few queries via disguise and reconstruction","author":"Liu","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0116","series-title":"2025 IEEE Conference on Secure and Trustworthy Machine Learning (SaTML)","first-page":"23","article-title":"Jailbreaking black box large language models in twenty queries","author":"Chao","year":"2025"},{"key":"10.1016\/j.inffus.2026.104241_bib0117","unstructured":"P. Ding, J. Kuang, D. Ma, X. Cao, Y. Xian, J. Chen, S. Huang, A Wolf in Sheep\u2019s clothing: generalized nested jailbreak prompts can fool large language models easily, https:\/\/arxiv.org\/abs\/2311.08268. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0118","unstructured":"Y. Liu, X. He, M. Xiong, J. Fu, S. Deng, B. Hooi, FlipAttack: jailbreak LLMs via flipping, https:\/\/arxiv.org\/abs\/2410.02832. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0119","series-title":"2021 IEEE European Symposium on Security and Privacy (EuroS&P)","first-page":"212","article-title":"Sponge examples: energy-latency attacks on neural networks","author":"Shumailov","year":"2021"},{"key":"10.1016\/j.inffus.2026.104241_bib0120","unstructured":"K. Gao, T. Pang, C. Du, Y. Yang, S.-T. Xia, M. Lin, Denial-of-service poisoning attacks on large language models, https:\/\/arxiv.org\/abs\/2410.10760. (2024)."},{"issue":"2","key":"10.1016\/j.inffus.2026.104241_bib0121","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3703155","article-title":"A survey on hallucination in large language models: principles, taxonomy, challenges, and open questions","volume":"43","author":"Huang","year":"2025","journal-title":"ACM Trans. Inf. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0122","doi-asserted-by":"crossref","unstructured":"A. Pal, L.K. Umapathi, M. Sankarasubbu, Med-halt: medical domain hallucination test for large language models, https:\/\/arxiv.org\/abs\/2307.15343. (2023).","DOI":"10.18653\/v1\/2023.conll-1.21"},{"key":"10.1016\/j.inffus.2026.104241_bib0123","doi-asserted-by":"crossref","unstructured":"W. Wang, Z. Ma, Z. Wang, C. Wu, W. Chen, X. Li, Y. Yuan, A survey of LLM-based agents in medicine: how far are we from baymax?, https:\/\/arxiv.org\/abs\/2502.11211. (2025).","DOI":"10.18653\/v1\/2025.findings-acl.539"},{"key":"10.1016\/j.inffus.2026.104241_bib0124","unstructured":"H. Kang, X.-Y. Liu, Deficiency of large language models in finance: an empirical examination of hallucination, https:\/\/arxiv.org\/abs\/2311.15548. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0125","series-title":"Proceedings of the 32nd ACM International Conference on Information and Knowledge Management","first-page":"245","article-title":"Hallucination detection: robustly discerning reliable answers in large language models","author":"Chen","year":"2023"},{"issue":"7","key":"10.1016\/j.inffus.2026.104241_bib0126","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3716846","article-title":"Hallucination detection in foundation models for decision-making: a flexible definition and review of the state of the art","volume":"57","author":"Chakraborty","year":"2025","journal-title":"ACM Comput. Surv."},{"issue":"6","key":"10.1016\/j.inffus.2026.104241_bib0127","doi-asserted-by":"crossref","first-page":"461","DOI":"10.1038\/s42256-021-00359-2","article-title":"Large language models associate muslims with violence","volume":"3","author":"Abid","year":"2021","journal-title":"Nat. Mach. Intell."},{"key":"10.1016\/j.inffus.2026.104241_bib0128","unstructured":"R. Taylor, M. Kardas, G. Cucurull, T. Scialom, A. Hartshorn, E. Saravia, A. Poulton, V. Kerkez, R. Stojnic, Galactica: a large language model for science, 2022, arXiv: 2211.09085. https:\/\/arxiv.org\/abs\/2211.09085."},{"key":"10.1016\/j.inffus.2026.104241_bib0129","series-title":"Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining","first-page":"526","article-title":"Neural retrievers are biased towards LLM-generated content","author":"Dai","year":"2024"},{"issue":"1","key":"10.1016\/j.inffus.2026.104241_bib0130","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3724117","article-title":"Bias testing and mitigation in llm-based code generation","volume":"35","author":"Huang","year":"2025","journal-title":"ACM Trans. Softw. Eng. Method."},{"key":"10.1016\/j.inffus.2026.104241_bib0131","series-title":"Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency","first-page":"862","article-title":"Bold: dataset and metrics for measuring biases in open-ended language generation","author":"Dhamala","year":"2021"},{"key":"10.1016\/j.inffus.2026.104241_bib0132","unstructured":"A. Tamkin, A. Askell, L. Lovitt, E. Durmus, N. Joseph, S. Kravec, K. Nguyen, J. Kaplan, D. Ganguli, Evaluating and mitigating discrimination in language model decisions, https:\/\/arxiv.org\/abs\/2312.03689. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0133","unstructured":"S. Wang, Y. Zhao, Z. Liu, Q. Zou, H. Wang, SoK: understanding vulnerabilities in the large language model supply chain, 2025, arXiv: 2502.12497. https:\/\/arxiv.org\/abs\/2502.12497."},{"key":"10.1016\/j.inffus.2026.104241_bib0134","series-title":"Proceedings of the 2023 ACM SIGSAC Conference on Computer and Communications Security","first-page":"1865","article-title":"Large language models for code: security hardening and adversarial testing","author":"He","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0135","unstructured":"Y. Nie, Y. Wang, J. Jia, M.J. De Lucia, N.D. Bastian, W. Guo, D. Song, TrojFM: resource-efficient backdoor attacks against very large foundation models, https:\/\/arxiv.org\/abs\/2405.16783. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0136","series-title":"Proceedings of the AAAI\/ACM Conference on AI, Ethics, and Society","first-page":"611","article-title":"Llm platform security: applying a systematic evaluation framework to openai\u2019s chatgpt plugins","volume":"7","author":"Iqbal","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0137","first-page":"1","article-title":"SpeechCraft: an integrated data generation pipeline from videos for LLM finetuning","author":"Jyothi Swaroop Arlagadda","year":"2024","journal-title":"2024 29th International Conference on Automation and Computing (ICAC)"},{"key":"10.1016\/j.inffus.2026.104241_bib0138","series-title":"Theory of Cryptography Conference","first-page":"265","article-title":"Calibrating noise to sensitivity in private data analysis","author":"Dwork","year":"2006"},{"key":"10.1016\/j.inffus.2026.104241_bib0139","doi-asserted-by":"crossref","first-page":"710","DOI":"10.1214\/12-EJS690","article-title":"Efficient distribution estimation for data with unobserved sub-population identifiers","volume":"6","author":"Ma","year":"2012","journal-title":"Electron. J. Stat."},{"issue":"2","key":"10.1016\/j.inffus.2026.104241_bib0140","doi-asserted-by":"crossref","first-page":"374","DOI":"10.3390\/electronics14020374","article-title":"A novel data sanitization method based on dynamic dataset partition and inspection against data poisoning attacks","volume":"14","author":"Lee","year":"2025","journal-title":"Electronics"},{"key":"10.1016\/j.inffus.2026.104241_bib0141","unstructured":"J. Xu, M.D. Ma, F. Wang, C. Xiao, M. Chen, Instructions as backdoors: backdoor vulnerabilities of instruction tuning for large language models, https:\/\/arxiv.org\/abs\/2305.14710. (2023)."},{"issue":"2","key":"10.1016\/j.inffus.2026.104241_bib0142","doi-asserted-by":"crossref","first-page":"1563","DOI":"10.1109\/TPAMI.2022.3162397","article-title":"Dataset security for machine learning: data poisoning, backdoor attacks, and defenses","volume":"45","author":"Goldblum","year":"2022","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.inffus.2026.104241_bib0143","series-title":"Proceedings of the 10th ACM Workshop on Artificial Intelligence and Security","first-page":"103","article-title":"Mitigating poisoning attacks on machine learning models: a data provenance based approach","author":"Baracaldo","year":"2017"},{"key":"10.1016\/j.inffus.2026.104241_bib0144","unstructured":"E. Wallace, T.Z. Zhao, S. Feng, S. Singh, Concealed data poisoning attacks on NLP models, https:\/\/arxiv.org\/abs\/2010.12563. (2020)."},{"key":"10.1016\/j.inffus.2026.104241_bib0145","unstructured":"Y. Zhang, Y. Li, L. Cui, D. Cai, L. Liu, T. Fu, X. Huang, E. Zhao, Y. Zhang, Y. Chen, et al., Siren\u2019s song in the AI ocean: a survey on hallucination in large language models, https:\/\/arxiv.org\/abs\/2309.01219. (2023)."},{"issue":"3","key":"10.1016\/j.inffus.2026.104241_bib0146","first-page":"413","article-title":"Why the problem with learning is unlearning","volume":"85","author":"Bonchek","year":"2016","journal-title":"Harv. Bus. Rev."},{"issue":"2","key":"10.1016\/j.inffus.2026.104241_bib0147","first-page":"6","article-title":"Assessment methods and protection strategies for data leakage risks in large language models","volume":"3","author":"Xiao","year":"2025","journal-title":"J. Ind. Eng. Appl. Sci."},{"key":"10.1016\/j.inffus.2026.104241_bib0148","unstructured":"D. Venditti, E.S. Ruzzetti, G.A. Xompero, C. Giannone, A. Favalli, R. Romagnoli, F.M. Zanzotto, Enhancing data privacy in large language models through private association editing, https:\/\/arxiv.org\/abs\/2406.18221. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0149","unstructured":"K. Zhou, Y. Zhu, Z. Chen, W. Chen, W.X. Zhao, X. Chen, Y. Lin, J.-R. Wen, J. Han, Don\u2019t make your LLM an evaluation benchmark cheater, https:\/\/arxiv.org\/abs\/2311.01964. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0150","series-title":"International Conference on Machine Learning","first-page":"17061","article-title":"A watermark for large language models","author":"Kirchenbauer","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0151","unstructured":"G. Hinton, O. Vinyals, J. Dean, Distilling the knowledge in a neural network, https:\/\/arxiv.org\/abs\/1503.02531. (2015)."},{"key":"10.1016\/j.inffus.2026.104241_bib0152","series-title":"12th USENIX Symposium on Operating Systems Design and Implementation (OSDI 16)","first-page":"689","article-title":"{SCONE}: secure linux containers with intel {SGX}","author":"Arnautov","year":"2016"},{"key":"10.1016\/j.inffus.2026.104241_bib0153","unstructured":"Z. Sha, X. He, P. Berrang, M. Humbert, Y. Zhang, Fine-tuning is all you need to mitigate backdoor attacks, https:\/\/arxiv.org\/abs\/2212.09067. (2022)."},{"key":"10.1016\/j.inffus.2026.104241_bib0154","unstructured":"Y. Li, X. Lyu, N. Koren, L. Lyu, B. Li, X. Ma, Neural attention distillation: erasing backdoor triggers from deep neural networks, https:\/\/arxiv.org\/abs\/2101.05930. (2021)."},{"key":"10.1016\/j.inffus.2026.104241_bib0155","doi-asserted-by":"crossref","unstructured":"C. Wei, W. Meng, Z. Zhang, M. Chen, M. Zhao, W. Fang, L. Wang, Z. Zhang, W. Chen, LMSanitator: defending prompt-tuning against task-agnostic backdoors, https:\/\/arxiv.org\/abs\/2308.13904. (2023).","DOI":"10.14722\/ndss.2024.23238"},{"key":"10.1016\/j.inffus.2026.104241_bib0156","unstructured":"D. Garcia-soto, H. Chen, F. Koushanfar, PerD: perturbation sensitivity-based neural trojan detection framework on NLP applications, https:\/\/arxiv.org\/abs\/2208.04943. (2022)."},{"key":"10.1016\/j.inffus.2026.104241_bib0157","series-title":"International Conference on Machine Learning","first-page":"19879","article-title":"Constrained optimization with dynamic bound-scaling for effective NLP backdoor defense","author":"Shen","year":"2022"},{"key":"10.1016\/j.inffus.2026.104241_bib0158","series-title":"International Symposium on Research in Attacks, Intrusions, and Defenses","first-page":"273","article-title":"Fine-pruning: defending against backdooring attacks on deep neural networks","author":"Liu","year":"2018"},{"key":"10.1016\/j.inffus.2026.104241_bib0159","unstructured":"N. Jain, A. Schwarzschild, Y. Wen, G. Somepalli, J. Kirchenbauer, P.-y. Chiang, M. Goldblum, A. Saha, J. Geiping, T. Goldstein, Baseline defenses for adversarial attacks against aligned language models, https:\/\/arxiv.org\/abs\/2309.00614. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0160","unstructured":"S. Schulhoff, Instruction defense, strengthen AI prompts against hacking, 2024, (https:\/\/learnprompting.org\/docs\/prompt_hacking\/defensive_measures\/instruction). Accessed: 2025-06-20."},{"key":"10.1016\/j.inffus.2026.104241_bib0161","first-page":"80079","article-title":"Jailbroken: how does LLM safety training fail?","volume":"36","author":"Wei","year":"2023","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0162","unstructured":"A. Kumar, C. Agarwal, S. Srinivas, A.J. Li, S. Feizi, H. Lakkaraju, Certifying LLM safety against adversarial prompting, https:\/\/arxiv.org\/abs\/2309.02705. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0163","unstructured":"R. Pedro, D. Castro, P. Carreira, N. Santos, From prompt injections to SQL injection attacks: how protected is your LLM-integrated web application?, https:\/\/arxiv.org\/abs\/2308.01990. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0164","series-title":"Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing","first-page":"1631","article-title":"Recursive deep models for semantic compositionality over a sentiment treebank","author":"Socher","year":"2013"},{"key":"10.1016\/j.inffus.2026.104241_bib0165","unstructured":"Y. Wang, J. Deng, A. Sun, X. Meng, Perplexity from PLM is unreliable for evaluating text quality, https:\/\/arxiv.org\/abs\/2210.05892. (2022)."},{"key":"10.1016\/j.inffus.2026.104241_bib0166","doi-asserted-by":"crossref","unstructured":"H. Gonen, S. Iyer, T. Blevins, N.A. Smith, L. Zettlemoyer, Demystifying prompts in language models via perplexity estimation, https:\/\/arxiv.org\/abs\/2212.04037. (2022).","DOI":"10.18653\/v1\/2023.findings-emnlp.679"},{"key":"10.1016\/j.inffus.2026.104241_bib0167","unstructured":"J. Selvi, Exploring prompt injection attacks, 2023, (https:\/\/www.nccgroup.com\/us\/research-blog\/exploring-prompt-injection-attacks\/). Accessed: 2025-06-26."},{"issue":"12","key":"10.1016\/j.inffus.2026.104241_bib0168","doi-asserted-by":"crossref","first-page":"1486","DOI":"10.1038\/s42256-023-00765-8","article-title":"Defending chatGPT against jailbreak attack via self-reminders","volume":"5","author":"Xie","year":"2023","journal-title":"Nat. Mach. Intell."},{"key":"10.1016\/j.inffus.2026.104241_bib0169","unstructured":"Z. Zhang, J. Yang, P. Ke, F. Mi, H. Wang, M. Huang, Defending large language models against jailbreaking attacks through goal prioritization, https:\/\/arxiv.org\/abs\/2311.09096. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0170","unstructured":"A. Zou, Z. Wang, N. Carlini, M. Nasr, J.Z. Kolter, M. Fredrikson, Universal and transferable adversarial attacks on aligned language models, https:\/\/arxiv.org\/abs\/2307.15043. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0171","unstructured":"P. Chao, A. Robey, E. Dobriban, H. Hassani, G.J. Pappas, E. Wong, Jailbreaking black box large language models in twenty queries, https:\/\/arxiv.org\/abs\/2310.08419. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0172","unstructured":"H. Inan, K. Upasani, J. Chi, R. Rungta, K. Iyer, Y. Mao, M. Tontchev, Q. Hu, B. Fuller, D. Testuggine, et al., LLaMA guard: LLM-based input-output safeguard for human-ai conversations, https:\/\/arxiv.org\/abs\/2312.06674. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0173","unstructured":"W. Nie, B. Guo, Y. Huang, C. Xiao, A. Vahdat, A. Anandkumar, Diffusion models for adversarial purification, https:\/\/arxiv.org\/abs\/2205.07460. (2022)."},{"key":"10.1016\/j.inffus.2026.104241_bib0174","series-title":"33rd USENIX security symposium (USENIX security 24)","first-page":"1831","article-title":"Formalizing and benchmarking prompt injection attacks and defenses","author":"Liu","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0175","series-title":"Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security","first-page":"660","article-title":"Optimization-based prompt injection attack to LLM-as-a-judge","author":"Shi","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0176","series-title":"2023 IEEE Symposium on Security and Privacy (SP)","first-page":"1289","article-title":"Sok: certified robustness for deep neural networks","author":"Li","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0177","unstructured":"Y. Wu, Y. Huang, Y. Liu, X. Li, P. Zhou, L. Sun, Can large language models automatically jailbreak GPT-4V?, https:\/\/arxiv.org\/abs\/2407.16686. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0178","unstructured":"X. Li, Z. Zhou, J. Zhu, J. Yao, T. Liu, B. Han, Deepinception: hypnotize large language model to be jailbreaker, https:\/\/arxiv.org\/abs\/2311.03191. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0179","unstructured":"OpenAI, OpenAI Platform \u2014 platform.openai.com, 2025, https:\/\/platform.openai.com\/docs\/guides\/rate-limits. [Accessed 26-06-2025]."},{"key":"10.1016\/j.inffus.2026.104241_bib0180","unstructured":"H. Face, Inference endpoints - hugging face \u2014 huggingface.co, 2025, (https:\/\/huggingface.co\/inference-endpoints\/dedicated). [Accessed 26-06-2025]."},{"key":"10.1016\/j.inffus.2026.104241_bib0181","first-page":"606","article-title":"Efficiently scaling transformer inference","volume":"5","author":"Pope","year":"2023","journal-title":"Proc. Mach. Learn. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0182","unstructured":"dlepow, API management documentation \u2014 learn.microsoft.com, 2025, https:\/\/learn.microsoft.com\/en-us\/azure\/api-management\/. [Accessed 26-06-2025]."},{"key":"10.1016\/j.inffus.2026.104241_bib0183","series-title":"2024 IEEE\/ACM 46th International Conference on Software Engineering (ICSE)","first-page":"944","article-title":"Traces of memorisation in large language models for code","author":"Al-Kaswan","year":"2023"},{"issue":"2","key":"10.1016\/j.inffus.2026.104241_bib0184","doi-asserted-by":"crossref","first-page":"33","DOI":"10.70393\/616a736d.323732","article-title":"A differential privacy-based mechanism for preventing data leakage in large language model training","volume":"3","author":"Xiao","year":"2025","journal-title":"Acad. J. Sociol. Manage."},{"key":"10.1016\/j.inffus.2026.104241_bib0185","unstructured":"T. Fu, M. Sharma, P. Torr, S.B. Cohen, D. Krueger, F. Barez, Poisonbench: assessing large language model vulnerability to data poisoning, https:\/\/arxiv.org\/abs\/2410.08811. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0186","series-title":"30th USENIX Security Symposium (USENIX Security 21)","first-page":"1937","article-title":"Entangled watermarks as a defense against model extraction","author":"Jia","year":"2021"},{"key":"10.1016\/j.inffus.2026.104241_bib0187","doi-asserted-by":"crossref","first-page":"4469","DOI":"10.1109\/TIFS.2025.3560557","article-title":"TrapNet: model inversion defense via trapdoor","volume":"20","author":"Ma","year":"2025","journal-title":"IEEE Trans. Inf. Forensics Secur."},{"key":"10.1016\/j.inffus.2026.104241_bib0188","unstructured":"Y. Chen, S. Shao, E. Huang, Y. Li, P.-Y. Chen, Z. Qin, K. Ren, Refine: inversion-free backdoor defense via model reprogramming, https:\/\/arxiv.org\/abs\/2502.18508. (2025a)."},{"key":"10.1016\/j.inffus.2026.104241_bib0189","series-title":"Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","first-page":"18331","article-title":"Defense against prompt injection attack by leveraging attack techniques","author":"Chen","year":"2025"},{"key":"10.1016\/j.inffus.2026.104241_bib0190","unstructured":"T.-C. Liu, C.-Y. Hsu, K.-Y. Lee, C.-A. Fu, H.-y. Lee, AEGIS: automated co-evolutionary framework for guarding prompt injections schema, https:\/\/arxiv.org\/abs\/2509.00088. (2025)."},{"key":"10.1016\/j.inffus.2026.104241_bib0191","unstructured":"W. Li, W. Wu, M. Chen, J. Liu, X. Xiao, H. Wu, Faithfulness in natural language generation: a systematic survey of analysis, evaluation and optimization methods, https:\/\/arxiv.org\/abs\/2203.05227. (2022)."},{"key":"10.1016\/j.inffus.2026.104241_bib0192","series-title":"Proceedings of the 2021 ACM Conference on Fairness, Accountability, and Transparency","first-page":"610","article-title":"On the dangers of stochastic parrots: can language models be too big?","author":"Bender","year":"2021"},{"key":"10.1016\/j.inffus.2026.104241_bib0193","unstructured":"A.K. Nandibhatla, OWASP LLM Top 10: addressing supply chain risks in AI systems, 2025, (https:\/\/scrumgit.com\/supply-chain-risks-securing-ai-components-from-external-threats-dc6c04c3fc5f). Accessed: 2025-07-20."},{"key":"10.1016\/j.inffus.2026.104241_bib0194","unstructured":"OWaSP, LLM03:2025 supply chain, 2025, https:\/\/genai.owasp.org\/llmrisk\/llm032025-supply-chain\/. Accessed: 2025-07-20."},{"key":"10.1016\/j.inffus.2026.104241_bib0195","series-title":"2023 International Symposium on Networks, Computers and Communications (ISNCC)","first-page":"1","article-title":"ChatGPT, let us chat sign language: experiments, architectural elements, challenges and research directions","author":"Shahin","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0196","unstructured":"A. Radford, K. Narasimhan, T. Salimans, I. Sutskever, et al., Improving language understanding by generative pre-training, 2018https:\/\/www.bibsonomy.org\/bibtex\/273ced32c0d4588eb95b6986dc2c8147c\/jonaskaiser."},{"key":"10.1016\/j.inffus.2026.104241_bib0197","unstructured":"M. Lewis, Y. Liu, N. Goyal, M. Ghazvininejad, A. Mohamed, O. Levy, V. Stoyanov, L. Zettlemoyer, BART: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension, 2019, arXiv: 1910.13461. https:\/\/arxiv.org\/abs\/1910.13461."},{"key":"10.1016\/j.inffus.2026.104241_bib0198","unstructured":"openAI, Introducing GPT-5 \u2014 openai.com, 2025https:\/\/openai.com\/index\/introducing-gpt-5\/,[Accessed 10-09-2025]."},{"key":"10.1016\/j.inffus.2026.104241_bib0199","doi-asserted-by":"crossref","unstructured":"M.A. Ferrag, F. Alwahedi, A. Battah, B. Cherif, A. Mechri, N. Tihanyi, T. Bisztray, M. Debbah, Generative AI in cybersecurity: A comprehensive review of LLM applications and vulnerabilities, Internet Things Cyber-Phys. Syst. 5 (2025) 1\u201346https:\/\/www.sciencedirect.com\/science\/article\/pii\/S266734522500008210.1016\/j.iotcps.2025.01.001.","DOI":"10.1016\/j.iotcps.2025.01.001"},{"issue":"3","key":"10.1016\/j.inffus.2026.104241_bib0200","doi-asserted-by":"crossref","first-page":"65","DOI":"10.1007\/s10664-025-10614-4","article-title":"Bugs in large language models generated code: an empirical study","volume":"30","author":"Tambon","year":"2025","journal-title":"Empirical Softw. Eng."},{"key":"10.1016\/j.inffus.2026.104241_bib0201","doi-asserted-by":"crossref","unstructured":"Z. Chu, S. Wang, J. Xie, T. Zhu, Y. Yan, J. Ye, A. Zhong, X. Hu, J. Liang, P.S. Yu, et al., LLM agents for education: advances and applications, https:\/\/arxiv.org\/abs\/2503.11733. (2025).","DOI":"10.18653\/v1\/2025.findings-emnlp.743"},{"issue":"2","key":"10.1016\/j.inffus.2026.104241_bib0202","article-title":"Utilizing large language models for advanced service management: potential applications and operational challenges","volume":"4","author":"Peddinti","year":"2023","journal-title":"J. Sci. Technol."},{"key":"10.1016\/j.inffus.2026.104241_bib0203","series-title":"2024 IEEE 32nd International Requirements Engineering Conference (RE)","first-page":"507","article-title":"Enhancing legal compliance and regulation analysis with large language models","author":"Hassani","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0204","doi-asserted-by":"crossref","unstructured":"F.V. Jedrzejewski, D. Fucci, O. Adamov, ThreMoLIA: threat modeling of large language model-integrated applications, ArXiv abs\/2504.18369(2025). 10.48550\/arXiv.2504.18369.","DOI":"10.1109\/ESEM64174.2025.00068"},{"key":"10.1016\/j.inffus.2026.104241_bib0205","doi-asserted-by":"crossref","unstructured":"N. Nagaraja, H. Bahsi, Cyber threat modeling of an LLM-based healthcare system, arXiv preprint (2024).","DOI":"10.5220\/0013289700003899"},{"key":"10.1016\/j.inffus.2026.104241_bib0206","unstructured":"S.B. Tete, Threat modelling and risk analysis for large language model (llm)-powered applications, https:\/\/arxiv.org\/abs\/2406.11007. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0207","series-title":"Prompt injection attacks on large language models: a systematic literature review","author":"Bayhan","year":"2025"},{"key":"10.1016\/j.inffus.2026.104241_bib0208","unstructured":"A. Verma, S. Krishna, S. Gehrmann, M. Seshadri, A. Pradhan, T. Ault, L. Barrett, D. Rabinowitz, J. Doucette, N. Phan, Operationalizing a threat model for red-teaming large language models (LLMs), https:\/\/arxiv.org\/abs\/2407.14937. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0209","series-title":"Workshop on AI Systems with Confidential Computing","first-page":"1","article-title":"Facilitating threat modeling by leveraging large language models","author":"Elsharef","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0210","doi-asserted-by":"crossref","unstructured":"F. Qi, Y. Chen, X. Zhang, M. Li, Z. Liu, M. Sun, Mind the style of text! adversarial and backdoor attacks based on text style transfer, https:\/\/arxiv.org\/abs\/2110.07139. (2021).","DOI":"10.18653\/v1\/2021.emnlp-main.374"},{"key":"10.1016\/j.inffus.2026.104241_bib0211","unstructured":"X. Chen, et al., BadChain: backdoor chain-of-thought prompting for large language models, https:\/\/arxiv.org\/abs\/2308.XXXXX. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0212","unstructured":"J. Wei, X. Wang, D. Schuurmans, et al., Chain-of-thought prompting elicits reasoning in large language models, https:\/\/arxiv.org\/abs\/2201.11903. (2022)."},{"key":"10.1016\/j.inffus.2026.104241_bib0213","series-title":"International Conference on Ubiquitous Security","first-page":"76","article-title":"A comprehensive survey of attack techniques, implementation, and mitigation strategies in large language models","author":"Esmradi","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0214","series-title":"International Conference on Machine Learning","first-page":"12278","article-title":"Grey-box extraction of natural language models","author":"Zanella-Beguelin","year":"2021"},{"key":"10.1016\/j.inffus.2026.104241_bib0215","series-title":"Proceedings of the 2023 ACM SIGSAC Conference on Computer and Communications Security","first-page":"2665","article-title":"DP-forward: fine-tuning and inference on language models with differential privacy in forward pass","author":"Du","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0216","unstructured":"L. Gao, S. Biderman, S. Black, L. Golding, T. Hoppe, C. Foster, J. Phang, H. He, A. Thite, N. Nabeshima, et al., The pile: an 800GB dataset of diverse text for language modeling, https:\/\/arxiv.org\/abs\/2101.00027. (2020)."},{"key":"10.1016\/j.inffus.2026.104241_bib0217","unstructured":"Y. Li, S. Bubeck, R. Eldan, A. Del Giorno, S. Gunasekar, Y.T. Lee, Textbooks are all you need II: phi-1.5 technical report, https:\/\/arxiv.org\/abs\/2309.05463. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0218","unstructured":"S. Gunasekar, Y. Zhang, J. Aneja, C.C.T. Mendes, A. Del Giorno, S. Gopi, M. Javaheripi, P. Kauffmann, G. de Rosa, O. Saarikivi, et al., Textbooks are all you need, https:\/\/arxiv.org\/abs\/2306.11644. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0219","unstructured":"G. Penedo, Q. Malartic, D. Hesslow, R. Cojocaru, A. Cappelli, H. Alobeidli, B. Pannier, E. Almazrouei, J. Launay, The RefinedWeb dataset for Falcon LLM: outperforming curated corpora with web data, and web data only, https:\/\/arxiv.org\/abs\/2306.01116. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0220","unstructured":"A. Abbas, K. Tirumala, D. Simig, S. Ganguli, A.S. Morcos, SemDeDup: data-efficient learning at web-scale through semantic deduplication, https:\/\/arxiv.org\/abs\/2303.09540. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0221","first-page":"55006","article-title":"Lima: less is more for alignment","volume":"36","author":"Zhou","year":"2023","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0222","unstructured":"J. Wei, D. Huang, Y. Lu, D. Zhou, Q.V. Le, Simple synthetic data reduces sycophancy in large language models, https:\/\/arxiv.org\/abs\/2308.03958. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0223","unstructured":"S. Lin, J. Hilton, O. Evans, TruthfulQA: measuring how models mimic human falsehoods, https:\/\/arxiv.org\/abs\/2109.07958. (2021)."},{"key":"10.1016\/j.inffus.2026.104241_bib0224","unstructured":"L. Ouyang, J. Wu, X. Jiang, D. Almeida, C.L. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray, J. Schulman, J. Hilton, F. Kelton, L. Miller, M. Simens, A. Askell, P. Welinder, P. Christiano, J. Leike, R. Lowe, Training language models to follow instructions with human feedback, 2022, arXiv: 2203.02155. https:\/\/arxiv.org\/abs\/2203.02155."},{"key":"10.1016\/j.inffus.2026.104241_bib0225","unstructured":"O. Blog, Introducing chatGPT, Internet: https:\/\/openai. com\/blog\/chatgpt 5 (2022)."},{"key":"10.1016\/j.inffus.2026.104241_bib0226","unstructured":"Z. Sun, S. Shen, S. Cao, H. Liu, C. Li, Y. Shen, C. Gan, L.-Y. Gui, Y.-X. Wang, Y. Yang, et al., Aligning large multimodal models with factually augmented RLHF, https:\/\/arxiv.org\/abs\/2309.14525. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0227","unstructured":"T. Shen, R. Jin, Y. Huang, C. Liu, W. Dong, Z. Guo, X. Wu, Y. Liu, D. Xiong, Large language model alignment: a survey, https:\/\/arxiv.org\/abs\/2309.15025. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0228","first-page":"53728","article-title":"Direct preference optimization: your language model is secretly a reward model","volume":"36","author":"Rafailov","year":"2023","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0229","first-page":"18990","article-title":"Preference ranking optimization for human alignment","volume":"38","author":"Song","year":"2024","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.inffus.2026.104241_bib0230","unstructured":"Z. Yuan, H. Yuan, C. Tan, W. Wang, S. Huang, F. Huang, RRHF: rank responses to align language models with human feedback without tears, https:\/\/arxiv.org\/abs\/2304.05302. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0231","unstructured":"H. Liu, C. Sferrazza, P. Abbeel, Chain of hindsight aligns language models with feedback, https:\/\/arxiv.org\/abs\/2302.02676. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0232","first-page":"181","article-title":"Second thoughts are best: learning to re-align with human values from text edits","volume":"35","author":"Liu","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0233","unstructured":"R. Liu, R. Yang, C. Jia, G. Zhang, D. Zhou, A.M. Dai, D. Yang, S. Vosoughi, Training socially aligned language models on simulated social interactions, 2023, arXiv: 2305.16960. https:\/\/arxiv.org\/abs\/2305.16960."},{"key":"10.1016\/j.inffus.2026.104241_bib0234","series-title":"Proceedings of the 10th Hellenic Conference on Artificial Intelligence","first-page":"1","article-title":"Convolutional neural networks for toxic comment classification","author":"Georgakopoulos","year":"2018"},{"key":"10.1016\/j.inffus.2026.104241_bib0235","unstructured":"S. Schulhoff, Perspective API documentation, 2021, (https:\/\/github.com\/conversationai\/perspectiveapi). Accessed: 2025-07-20."},{"key":"10.1016\/j.inffus.2026.104241_bib0236","unstructured":"A.I. Azure, Azure AI content safety, 2023, (https:\/\/azure.microsoft.com\/en-us\/products\/ai-services\/ai-content-safety). Accessed: 2025-07-20."},{"key":"10.1016\/j.inffus.2026.104241_bib0237","doi-asserted-by":"crossref","unstructured":"J. Zhao, T. Wang, M. Yatskar, R. Cotterell, V. Ordonez, K.-W. Chang, Gender bias in contextualized word embeddings, https:\/\/arxiv.org\/abs\/1904.03310. (2019).","DOI":"10.18653\/v1\/N19-1064"},{"key":"10.1016\/j.inffus.2026.104241_bib0238","unstructured":"R.H. Maudslay, H. Gonen, R. Cotterell, S. Teufel, It\u2019s all in the name: mitigating gender bias with name-based counterfactual data substitution, https:\/\/arxiv.org\/abs\/1909.00871. (2019)."},{"key":"10.1016\/j.inffus.2026.104241_bib0239","unstructured":"C. Wald, L. Pfahler, Exposing bias in online communities through large-scale language models, https:\/\/arxiv.org\/abs\/2306.02294. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0240","doi-asserted-by":"crossref","DOI":"10.1016\/j.cose.2024.104151","article-title":"SecureQwen: leveraging LLMs for vulnerability detection in python codebases","volume":"148","author":"Mechri","year":"2025","journal-title":"Comput. Secur."},{"key":"10.1016\/j.inffus.2026.104241_bib0241","unstructured":"A. Linskens, The OWASP LLM top 10 and sonatype: supply chain security, 2025, https:\/\/www.sonatype.com\/blog\/the-owasp-llm-top-10-and-sonatype-supply-chain-security. Accessed: 2025-07-20."},{"key":"10.1016\/j.inffus.2026.104241_bib0242","unstructured":"S. Moore, LLM security: top 10 risks and 7 security best practices, 2025, https:\/\/www.exabeam.com\/explainers\/ai-cyber-security\/llm-security-top-10-risks-and-7-security-best-practices\/Accessed: 2025-07-20."},{"key":"10.1016\/j.inffus.2026.104241_bib0243","series-title":"The EU General Data Protection Regulation (GDPR)","author":"Paul","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0244","unstructured":"X. Ma, Y. Gao, Y. Wang, R. Wang, X. Wang, Y. Sun, Y. Ding, H. Xu, Y. Chen, Y. Zhao, H. Huang, Y. Li, J. Zhang, X. Zheng, Y. Bai, Z. Wu, X. Qiu, J. Zhang, Y. Li, X. Han, H. Li, et al., Safety at scale: a comprehensive survey of large model safety, https:\/\/arxiv.org\/abs\/2502.05206. (2025)."},{"issue":"1","key":"10.1016\/j.inffus.2026.104241_bib0245","doi-asserted-by":"crossref","first-page":"33","DOI":"10.1007\/s10462-024-11024-6","article-title":"Generative AI model privacy: a survey","volume":"58","author":"Liu","year":"2024","journal-title":"Artif. Intell. Rev."},{"key":"10.1016\/j.inffus.2026.104241_bib0246","unstructured":"Y. Xu, Machine unlearning for traditional models and large language models: a short survey, https:\/\/arxiv.org\/abs\/2404.01206. (2024)."},{"issue":"8","key":"10.1016\/j.inffus.2026.104241_bib0247","doi-asserted-by":"crossref","first-page":"14581","DOI":"10.1109\/TNNLS.2024.3514607","article-title":"Toward efficient target-level machine unlearning based on essential graph","volume":"36","author":"Xu","year":"2025","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"10.1016\/j.inffus.2026.104241_bib0248","series-title":"European Conference on Computer Vision","first-page":"165","article-title":"Multidelete for multimodal machine unlearning","author":"Cheng","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0249","unstructured":"OWASP Foundation, OWASP top 10 for large language model applications 2025, Technical Report, Open worldwide application security project, 2025. Version 2.0, https:\/\/owasp.org\/www-project-top-10-for-large-language-model-applications\/."},{"key":"10.1016\/j.inffus.2026.104241_bib0250","unstructured":"H. Li, Y. Chen, J. Luo, J. Wang, H. Peng, Y. Kang, X. Zhang, Q. Hu, C. Chan, Z. Xu, et al., Privacy in large language models: attacks, defenses and future directions, https:\/\/arxiv.org\/abs\/2310.10383. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0251","unstructured":"Z. Chen, Y. Deng, H. Yuan, K. Ji, Q. Gu, Self-play fine-tuning converts weak language models to strong language models, https:\/\/arxiv.org\/abs\/2401.01335. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0252","doi-asserted-by":"crossref","unstructured":"J. Hong, N. Lee, J. Thorne, ORPO: monolithic preference optimization without reference model, https:\/\/arxiv.org\/abs\/2403.07691. (2024).","DOI":"10.18653\/v1\/2024.emnlp-main.626"},{"key":"10.1016\/j.inffus.2026.104241_bib0253","series-title":"Proceedings of the Computer Vision and Pattern Recognition Conference","first-page":"19836","article-title":"ODE: open-set evaluation of hallucinations in multimodal large language models","author":"Tu","year":"2025"},{"key":"10.1016\/j.inffus.2026.104241_bib0254","unstructured":"C. Zhu, N. Chen, Y. Gao, Y. Zhang, P. Tiwari, B. Wang, Is your LLM outdated? A deep look at temporal generalization, https:\/\/arxiv.org\/abs\/2405.08460. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0255","unstructured":"Y. Dubois, B. Galambosi, P. Liang, T.B. Hashimoto, Length-controlled alpacaeval: a simple way to debias automatic evaluators, https:\/\/arxiv.org\/abs\/2404.04475. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0256","unstructured":"T. Li, W.-L. Chiang, E. Frick, L. Dunlap, T. Wu, B. Zhu, J.E. Gonzalez, I. Stoica, From crowdsourced data to high-quality benchmarks: arena-hard and benchbuilder pipeline, https:\/\/arxiv.org\/abs\/2406.11939. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0257","unstructured":"R. Raju, S. Jain, B. Li, J. Li, U. Thakker, Constructing domain-specific evaluation sets for LLM-as-a-judge, https:\/\/arxiv.org\/abs\/2408.08808. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0258","unstructured":"Y. Huang, L. Sun, H. Wang, S. Wu, Q. Zhang, Y. Li, C. Gao, Y. Huang, W. Lyu, Y. Zhang, et al., TrustLLM: trustworthiness in large language models, https:\/\/arxiv.org\/abs\/2401.05561. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0259","unstructured":"T. Ali, P. Kostakos, HuntGPT: integrating machine learning-based anomaly detection and explainable ai with large language models (LLMs), https:\/\/arxiv.org\/abs\/2309.16021. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0260","unstructured":"Q.V. Liao, J.W. Vaughan, Ai transparency in the age of LLMs: a human-centered research roadmap, https:\/\/arxiv.org\/abs\/2306.01941. 10 (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0261","first-page":"21619","article-title":"Sparsity-guided holistic explanation for llms with interpretable inference-time intervention","volume":"38","author":"Tan","year":"2024","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.inffus.2026.104241_bib0262","doi-asserted-by":"crossref","first-page":"82","DOI":"10.1016\/j.inffus.2019.12.012","article-title":"Explainable artificial intelligence (XAI): concepts, taxonomies, opportunities and challenges toward responsible AI","volume":"58","author":"Arrieta","year":"2020","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.inffus.2026.104241_bib0263","series-title":"Technical Report","article-title":"Explainable AI in Cybersecurity Operations: Lessons Learned from xAI Tool Deployment","author":"Nyre-Yu","year":"2022"},{"key":"10.1016\/j.inffus.2026.104241_bib0264","unstructured":"K. Wang, G. Zhang, Z. Zhou, J. Wu, M. Yu, S. Zhao, C. Yin, J. Fu, Y. Yan, H. Luo, L. Lin, Z. Xu, H. Lu, et al., A comprehensive survey in LLM(-agent) full stack safety: data, training and deployment, 2025, https:\/\/arxiv.org\/abs\/2504.15585."},{"key":"10.1016\/j.inffus.2026.104241_bib0265","doi-asserted-by":"crossref","unstructured":"Z. Zhang, M. Fang, L. Chen, M.-R. Namazi-Rad, J. Wang, How do large language models capture the ever-changing world knowledge? A review of recent advances, https:\/\/arxiv.org\/abs\/2310.07343. (2023).","DOI":"10.18653\/v1\/2023.emnlp-main.516"},{"issue":"8","key":"10.1016\/j.inffus.2026.104241_bib0266","first-page":"1","article-title":"Towards lifelong learning of large language models: a survey","volume":"57","author":"Zheng","year":"2025","journal-title":"ACM Comput. Surv."},{"key":"10.1016\/j.inffus.2026.104241_bib0267","series-title":"Proceedings of the Third Workshop on Privacy in Natural Language Processing","first-page":"1","article-title":"Understanding unintended memorization in language models under federated learning","author":"Thakkar","year":"2021"},{"key":"10.1016\/j.inffus.2026.104241_bib0268","series-title":"Artificial Intelligence and Statistics","first-page":"1273","article-title":"Communication-efficient learning of deep networks from decentralized data","author":"McMahan","year":"2017"},{"key":"10.1016\/j.inffus.2026.104241_bib0269","unstructured":"T. Fan, Y. Kang, G. Ma, W. Chen, W. Wei, L. Fan, Q. Yang, Fate-LLM: a industrial grade federated learning framework for large language models, https:\/\/arxiv.org\/abs\/2310.10049. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0270","series-title":"Proceedings of the Eighth Workshop on Data Management for End-to-End Machine Learning","first-page":"39","article-title":"Federated fine-tuning of llms on the very edge: the good, the bad, the ugly","author":"Woisetschl\u00e4ger","year":"2024"},{"key":"10.1016\/j.inffus.2026.104241_bib0271","unstructured":"M. Miranda, E.S. Ruzzetti, A. Santilli, F.M. Zanzotto, S. Brati\u00e8res, E. Rodol\u00e0, Preserving privacy in large language models: a survey on current threats and solutions, https:\/\/arxiv.org\/abs\/2408.05212. (2024)."},{"key":"10.1016\/j.inffus.2026.104241_bib0272","unstructured":"M. Sun, Z. Liu, A. Bair, J.Z. Kolter, A simple and effective pruning approach for large language models, https:\/\/arxiv.org\/abs\/2306.11695. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0273","unstructured":"E. Frantar, S. Ashkboos, T. Hoefler, D. Alistarh, GPTQ: accurate post-training quantization for generative pre-trained transformers, https:\/\/arxiv.org\/abs\/2210.17323. (2022)."},{"key":"10.1016\/j.inffus.2026.104241_bib0274","unstructured":"Y. Youn, Z. Hu, J. Ziani, J. Abernethy, Randomized quantization is all you need for differential privacy in federated learning, https:\/\/arxiv.org\/abs\/2306.11913. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0275","series-title":"Proceedings of the 2023 ACM on Internet Measurement Conference","first-page":"478","article-title":"An LLM-based framework for fingerprinting internet-connected devices","author":"Sarabi","year":"2023"},{"key":"10.1016\/j.inffus.2026.104241_bib0276","unstructured":"L. Tang, G. Uberti, T. Shlomi, Baselines for identifying watermarked large language models, https:\/\/arxiv.org\/abs\/2305.18456. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0277","unstructured":"V.S. Sadasivan, A. Kumar, S. Balasubramanian, W. Wang, S. Feizi, Can AI-generated text be reliably detected?, https:\/\/arxiv.org\/abs\/2303.11156. (2023)."},{"key":"10.1016\/j.inffus.2026.104241_bib0278","unstructured":"Y.-Y. Tsai, C. Guo, J. Yang, L. van der Maaten, RoFL: robust fingerprinting of language models, https:\/\/arxiv.org\/abs\/2505.12682. (2025)."},{"issue":"4","key":"10.1016\/j.inffus.2026.104241_bib0279","doi-asserted-by":"crossref","first-page":"5115","DOI":"10.1109\/TNSM.2023.3282740","article-title":"A survey on explainable artificial intelligence for cybersecurity","volume":"20","author":"Rjoub","year":"2023","journal-title":"IEEE Trans. Netw. Serv. Manage."},{"issue":"13s","key":"10.1016\/j.inffus.2026.104241_bib0280","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3583558","article-title":"From anecdotal evidence to quantitative evaluation methods: a systematic review on evaluating explainable ai","volume":"55","author":"Nauta","year":"2023","journal-title":"ACM Comput. Surv."},{"key":"10.1016\/j.inffus.2026.104241_bib0281","unstructured":"G. Srivastava, R.H. Jhaveri, S. Bhattacharya, S. Pandya, P.K.R. Maddikunta, G. Yenduri, J.G. Hall, M. Alazab, T.R. Gadekallu, et al., XAI for cybersecurity: state of the art, challenges, open issues and future directions, (2022) arxiv: 2206.03585."},{"issue":"1","key":"10.1016\/j.inffus.2026.104241_bib0282","doi-asserted-by":"crossref","first-page":"86","DOI":"10.1145\/3468507.3468519","article-title":"Adversarial attacks and defenses: an interpretation perspective","volume":"23","author":"Liu","year":"2021","journal-title":"ACM SIGKDD Explorations Newsl."}],"container-title":["Information Fusion"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S156625352600120X?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S156625352600120X?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T06:02:04Z","timestamp":1774591324000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S156625352600120X"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,8]]},"references-count":282,"alternative-id":["S156625352600120X"],"URL":"https:\/\/doi.org\/10.1016\/j.inffus.2026.104241","relation":{},"ISSN":["1566-2535"],"issn-type":[{"value":"1566-2535","type":"print"}],"subject":[],"published":{"date-parts":[[2026,8]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Security and privacy in LLMs: A comprehensive survey of threats and mitigation strategies","name":"articletitle","label":"Article Title"},{"value":"Information Fusion","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.inffus.2026.104241","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 The Author(s). Published by Elsevier B.V.","name":"copyright","label":"Copyright"}],"article-number":"104241"}}