{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,20]],"date-time":"2026-02-20T07:10:14Z","timestamp":1771571414108,"version":"3.50.1"},"reference-count":33,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T00:00:00Z","timestamp":1764028800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T00:00:00Z","timestamp":1764028800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,11,25]]},"DOI":"10.1109\/fllm67465.2025.11391111","type":"proceedings-article","created":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T20:55:46Z","timestamp":1771534546000},"page":"172-179","source":"Crossref","is-referenced-by-count":0,"title":["Towards Safe and Secure LLM Systems: Defense Strategies and Assessment Against Inference-Time Security Risks"],"prefix":"10.1109","author":[{"given":"Vladyslava","family":"Tyshchenko","sequence":"first","affiliation":[{"name":"SoftServe Inc.,Wroclaw,Poland"}]},{"given":"Nazarii","family":"Drushchak","sequence":"additional","affiliation":[{"name":"SoftServe Inc. and Ukrainian Catholic University,Lviv,Ukraine"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Challenges and applications of large language models","author":"Kaddour","year":"2023"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3461001.3471147"},{"issue":"2","key":"ref3","first-page":"100211","article-title":"A survey on large language model (llm) security and privacy: The good, the bad, and the ugly","volume-title":"High-Confidence Computing","volume":"4","author":"Yao","year":"2024"},{"key":"ref4","article-title":"Unique security and privacy threats of large language model: A comprehensive survey","author":"Wang","year":"2024"},{"key":"ref5","doi-asserted-by":"crossref","DOI":"10.18653\/v1\/2024.findings-acl.267","article-title":"The good and the bad: Exploring privacy issues in retrieval-augmented generation (rag)","author":"Zeng","year":"2024"},{"key":"ref6","article-title":"Trojanrag: Retrieval-augmented generation can be backdoor driver in large language models","author":"Cheng","year":"2024"},{"key":"ref7","article-title":"Rag and roll: An end-to-end evaluation of indirect prompt manipulations in llm-based application frameworks","author":"Stefano","year":"2024"},{"key":"ref8","article-title":"Confusedpilot: Confused deputy risks in rag-based llms","author":"RoyChowdhury","year":"2024"},{"key":"ref9","article-title":"Navigating the risks: A survey of security, privacy, and ethics threats in llm-based agents","author":"Gan","year":"2024"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3716628"},{"key":"ref11","article-title":"Commercial llm agents are already vulnerable to simple yet dangerous attacks","author":"Li","year":"2025"},{"key":"ref12","article-title":"Securing large language models: Threats, vulnerabilities and responsible practices","author":"Abdali","year":"2024"},{"key":"ref13","doi-asserted-by":"crossref","DOI":"10.1609\/aies.v7i1.31647","article-title":"Red-teaming for generative ai: Silver bullet or security theater?","author":"Feffer","year":"2024"},{"key":"ref14","article-title":"Against the achilles\u2019 heel: A survey on red teaming for generative models","author":"Lin","year":"2024"},{"key":"ref15","article-title":"Use of llms for illicit purposes: Threats, prevention measures, and vulnerabilities","author":"Mozes","year":"2023"},{"key":"ref16","article-title":"Building guardrails for large language models","author":"Dong","year":"2024"},{"key":"ref17","article-title":"Privacy-aware rag: Secure and isolated knowledge retrieval","author":"Zhou","year":"2025"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.17148\/ijarcce.2025.14114"},{"key":"ref19","article-title":"A new era in llm security: Exploring security concerns in real-world llm-based systems","author":"Wu","year":"2024"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.5220\/0013289700003899"},{"key":"ref21","article-title":"No free lunch with guardrails","author":"Kumar","year":"2025"},{"key":"ref22","article-title":"Adversarial prompt evaluation: Systematic benchmarking of guardrails against prompt input attacks on llms","author":"Zizzo","year":"2025"},{"key":"ref23","doi-asserted-by":"crossref","DOI":"10.3390\/make3020020","article-title":"Towards crisp-ml(q): A machine learning process model with quality assurance methodology","author":"Studer","year":"2021"},{"key":"ref24","article-title":"Sok: Evaluating jailbreak guardrails for large language models","author":"Wang","year":"2025"},{"key":"ref25","article-title":"Bypassing prompt injection and jailbreak detection in llm guardrails","author":"Hackett","year":"2025"},{"key":"ref26","article-title":"Claude 3.5 sonnet model card addendum","year":"2024"},{"key":"ref27","article-title":"garak: A Framework for Security Probing Large Language Models","author":"Derczynski","year":"2024"},{"key":"ref28","article-title":"Openai gpt-4.5 system card","year":"2025"},{"key":"ref29","article-title":"Gemini: A family of highly capable multimodal models","author":"Team","year":"2024"},{"key":"ref30","doi-asserted-by":"crossref","DOI":"10.18653\/v1\/2023.emnlp-demo.40","article-title":"Nemo guardrails: A toolkit for controllable and safe llm applications with programmable rails","author":"Rebedea","year":"2023"},{"key":"ref31","article-title":"Llama guard: Llm-based input-output safeguard for human-ai conversations","author":"Inan","year":"2023"},{"key":"ref32","doi-asserted-by":"crossref","DOI":"10.1145\/3660799","article-title":"Glitch tokens in large language models: Categorization taxonomy and effective detection","author":"Li","year":"2024"},{"key":"ref33","article-title":"Lessons from red teaming 100 generative ai products","author":"Bullwinkel","year":"2025"}],"event":{"name":"2025 3rd International Conference on Foundation and Large Language Models (FLLM)","location":"Vienna, Austria","start":{"date-parts":[[2025,11,25]]},"end":{"date-parts":[[2025,11,28]]}},"container-title":["2025 3rd International Conference on Foundation and Large Language Models (FLLM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11390736\/11390873\/11391111.pdf?arnumber=11391111","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,20]],"date-time":"2026-02-20T06:42:35Z","timestamp":1771569755000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11391111\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,25]]},"references-count":33,"URL":"https:\/\/doi.org\/10.1109\/fllm67465.2025.11391111","relation":{},"subject":[],"published":{"date-parts":[[2025,11,25]]}}}