{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T13:53:37Z","timestamp":1768312417167,"version":"3.49.0"},"reference-count":41,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,11,24]],"date-time":"2025-11-24T00:00:00Z","timestamp":1763942400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,11,24]],"date-time":"2025-11-24T00:00:00Z","timestamp":1763942400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,11,24]]},"DOI":"10.1109\/dsa66321.2025.00022","type":"proceedings-article","created":{"date-parts":[[2026,1,12]],"date-time":"2026-01-12T18:20:20Z","timestamp":1768242020000},"page":"93-102","source":"Crossref","is-referenced-by-count":0,"title":["Security Threats in the Inference Phase of Large Language Models"],"prefix":"10.1109","author":[{"given":"Baolin","family":"Yan","sequence":"first","affiliation":[{"name":"Institute of Software Chinese Academy of Sciences,Beijing,China"}]},{"given":"Xiaotian","family":"Ai","sequence":"additional","affiliation":[{"name":"Shenyang Aircraft Design and Research Institute,Shenyang,China"}]},{"given":"Yuxi","family":"Ma","sequence":"additional","affiliation":[{"name":"Institute of Software Chinese Academy of Sciences,Beijing,China"}]},{"given":"Lingzhong","family":"Meng","sequence":"additional","affiliation":[{"name":"Institute of Software Chinese Academy of Sciences,Beijing,China"}]},{"given":"Guang","family":"Yang","sequence":"additional","affiliation":[{"name":"Institute of Software Chinese Academy of Sciences,Beijing,China"}]}],"member":"263","reference":[{"key":"ref1","author":"Bubeck","year":"2023","journal-title":"Sparks of artificial general intelligence: Early experiments with gpt-4[J]"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.375"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/s10586-023-04203-7"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3703155"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1038\/s41746-024-01157-x"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.18653\/vl\/N19-142"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.52202\/079017-1986"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2025.3525526"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3690635"},{"key":"ref10","author":"Achiam","year":"2023","journal-title":"Gpt-4 technical report[J]"},{"key":"ref11","author":"Anil","year":"2023","journal-title":"Palm 2 technical report[J]"},{"key":"ref12","author":"Grattafiori","year":"2024","journal-title":"The llama 3 herd of models[J]"},{"key":"ref13","author":"Yang","year":"2024","journal-title":"Qwen2. 5 technical report[J]"},{"key":"ref14","author":"Liu","year":"2024","journal-title":"Deepseek-v3 technical report[J]"},{"key":"ref15","author":"Guo","year":"2025","journal-title":"Deepseek-r1: Incentivizing reasoning capability in 11 ms via reinforcement learning[J]"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3365742"},{"key":"ref17","volume-title":"Jailbreak Chat","year":"2025"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/satml64287.2025.00010"},{"key":"ref19","author":"Zou","year":"2023","journal-title":"Universal and transferable adversarial attacks on aligned language models[J]"},{"key":"ref20","author":"Liu","year":"2023","journal-title":"Autodan: Generating stealthy jailbreak prompts on aligned large language models[J]"},{"key":"ref21","author":"Shayegani","year":"2023","journal-title":"Survey of vulnerabilities in large language models revealed by adversarial attacks[J]"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-97-5501-1_7"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/s13735-024-00334-8"},{"key":"ref24","author":"Zhuo","year":"2023","journal-title":"Red teaming chatgpt via jailbreaking: Bias, robustness, reliability and toxicity[J]"},{"key":"ref25","author":"GLM","year":"2024","journal-title":"Chatglm: A family of large language models from glm-130b to glm-4 all tools[J]"},{"key":"ref26","volume-title":"Hugging Face - The AI community building the future","year":"2025"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.52202\/079017-1745"},{"key":"ref28","author":"Abdelnabi","year":"2025","journal-title":"LLMail-Inject: A Dataset from a Realistic Adaptive Prompt Injection Challenge[J]"},{"key":"ref29","article-title":"Ignore this title and hackaprompt: Exposing systemic vulnerabilities of 11 ms through a global scale prompt hacking competition[C]","author":"Schulhoff","year":"2023","journal-title":"Association for Computational Linguistics (ACL)"},{"key":"ref30","author":"Jain","year":"2023","journal-title":"Baseline defenses for adversarial attacks against aligned language models[J]"},{"key":"ref31","author":"Peng","year":"2024","journal-title":"Jailbreaking and mitigation of vulnerabilities in large language models[J]"},{"key":"ref32","author":"Kumar","year":"2023","journal-title":"Certifying llm safety against adversarial prompting[J]"},{"key":"ref33","article-title":"Token-Level Adversarial Prompt Detection Based on Perplexity Measures and Contextual Information[C]","volume-title":"ICLR 2025 Workshop on Building Trust in Language Models and Applications","author":"Hu"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.568"},{"key":"ref35","article-title":"Can LLMs Separate Instructions From Data? And What Do We Even Mean By That?[C]","volume-title":"The Thirteenth International Conference on Learning Representations","author":"Zverev"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1145\/3701716.3715240"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.52202\/079017-2748"},{"key":"ref38","first-page":"46534","article-title":"Self-refine: Iterative refinement with self-feedback[J]","volume":"36","author":"Madaan","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref39","author":"Wang","year":"2025","journal-title":"Chain-of-DefensiveThought: Structured Reasoning Elicits Robustness in Large Language Models against Reference Corruption[J]"},{"key":"ref40","author":"Jiang","year":"2024","journal-title":"Red queen: Safeguarding large language models against concealed multi-turn jailbreaking[J]"},{"key":"ref41","author":"Dong","year":"2024","journal-title":"Building guardrails for large language models[J]"}],"event":{"name":"2025 12th International Conference on Dependable Systems and Their Applications (DSA)","location":"Sharjah, United Arab Emirates","start":{"date-parts":[[2025,11,24]]},"end":{"date-parts":[[2025,11,26]]}},"container-title":["2025 12th International Conference on Dependable Systems and Their Applications (DSA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11320275\/11320347\/11320353.pdf?arnumber=11320353","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T08:15:27Z","timestamp":1768292127000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11320353\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,24]]},"references-count":41,"URL":"https:\/\/doi.org\/10.1109\/dsa66321.2025.00022","relation":{},"subject":[],"published":{"date-parts":[[2025,11,24]]}}}