{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,4]],"date-time":"2026-02-04T07:18:20Z","timestamp":1770189500906,"version":"3.49.0"},"reference-count":77,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62302441"],"award-info":[{"award-number":["62302441"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key Research and Development Program Project of Ningbo","award":["2025Z029"],"award-info":[{"award-number":["2025Z029"]}]},{"name":"Open Fund of the Anhui Province Key Laboratory of Cyberspace Security Situation Awareness and Evaluation"},{"name":"Zhejiang University Education Foundation Qizhen Scholar Foundation"},{"name":"Information Technology Center of Zhejiang University and the Supercomputing Center of Hangzhou City University"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans.Inform.Forensic Secur."],"published-print":{"date-parts":[[2026]]},"DOI":"10.1109\/tifs.2026.3657898","type":"journal-article","created":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T05:42:47Z","timestamp":1769492567000},"page":"1577-1592","source":"Crossref","is-referenced-by-count":0,"title":["Dialogue Injection Attack: Jailbreaking LLMs Through Context Manipulation"],"prefix":"10.1109","volume":"21","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-2613-0676","authenticated-orcid":false,"given":"Wenlong","family":"Meng","sequence":"first","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]},{"given":"Fan","family":"Zhang","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]},{"given":"Wendao","family":"Yao","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]},{"given":"Zhenyuan","family":"Guo","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8878-510X","authenticated-orcid":false,"given":"Yuwei","family":"Li","sequence":"additional","affiliation":[{"name":"College of Electronic Engineering, National University of Defense Technology, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8849-8808","authenticated-orcid":false,"given":"Chengkun","family":"Wei","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1674-4701","authenticated-orcid":false,"given":"Wenzhi","family":"Chen","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"GPT-4 technical report","volume-title":"arXiv:2303.08774","author":"Achiam","year":"2023"},{"issue":"240","key":"ref2","first-page":"1","article-title":"PaLM: Scaling language modeling with pathways","volume":"24","author":"Chowdhery","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3611643.3617850"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.27"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3626772.3657807"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671458"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.826"},{"key":"ref8","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","author":"Ouyang"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2022.3219342"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/OJCOMS.2024.3371871"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/Allerton63246.2024.10735295"},{"key":"ref12","article-title":"\u2018Do anything now\u2019: Characterizing and evaluating in-the-wild jailbreak prompts on large language models","author":"Shen","year":"2023","journal-title":"arXiv:2308.03825"},{"key":"ref13","article-title":"Don\u2019t listen to me: Understanding and exploring jailbreak prompts of large language models","author":"Yu","year":"2024","journal-title":"arXiv:2403.17336"},{"key":"ref14","article-title":"DeepInception: Hypnotize large language model to be jailbreaker","author":"Li","year":"2023","journal-title":"arXiv:2311.03191"},{"key":"ref15","first-page":"4711","article-title":"Making them ask and answer: Jailbreaking large language models in few queries via disguise and reconstruction","volume-title":"Proc. 33rd USENIX Secur. Symp.","author":"Liu"},{"key":"ref16","article-title":"Does refusal training in LLMs generalize to the past tense?","author":"Andriushchenko","year":"2024","journal-title":"arXiv:2407.11969"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.272"},{"key":"ref18","article-title":"Malla: Demystifying real-world large language model integrated malicious services","author":"Lin","year":"2024","journal-title":"arXiv:2401.03315"},{"key":"ref19","first-page":"2421","article-title":"Great, now write an article about that: The crescendo multi-turn LLM jailbreak attack","volume-title":"Proc. 34th USENIX Secur. Symp. (USENIX Secur.)","author":"Russinovich"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i22.34553"},{"key":"ref21","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv:2302.13971"},{"key":"ref22","article-title":"Gemma 2: Improving open language models at a practical size","author":"Team","year":"2024","journal-title":"arXiv:2408.00118"},{"key":"ref23","first-page":"53728","article-title":"Direct preference optimization: Your language model is secretly a reward model","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Rafailov"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.626"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3600006.3613165"},{"key":"ref26","volume-title":"Ollama: A Toolkit for Large Language Models","year":"2024"},{"key":"ref27","article-title":"Universal and transferable adversarial attacks on aligned language models","author":"Zou","year":"2023","journal-title":"arXiv:2307.15043"},{"key":"ref28","article-title":"Jailbreaking leading safety-aligned LLMs with simple adaptive attacks","author":"Andriushchenko","year":"2024","journal-title":"arXiv:2404.02151"},{"key":"ref29","article-title":"AdaPPA: Adaptive position pre-fill jailbreak attack approach targeting LLMs","author":"Lv","year":"2024","journal-title":"arXiv:2409.07503"},{"key":"ref30","article-title":"Ignore previous prompt: Attack techniques for language models","author":"Perez","year":"2022","journal-title":"arXiv:2211.09527"},{"key":"ref31","article-title":"YaRN: Efficient context window extension of large language models","author":"Peng","year":"2023","journal-title":"arXiv:2309.00071"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.260"},{"key":"ref33","article-title":"RazorAttention: Efficient KV cache compression through retrieval heads","author":"Tang","year":"2024","journal-title":"arXiv:2407.15891"},{"key":"ref34","article-title":"Open WebUI: An open, extensible, and usable interface for AI interaction","author":"Baek","year":"2025","journal-title":"arXiv:2510.02546"},{"key":"ref35","volume-title":"Lobechat: An Open-Source, Modern Design CHATGPT\/LLMS UI\/framework","year":"2025"},{"key":"ref36","volume-title":"Text Generation WEBUI: The Definitive Web UI For Local AI, With Powerful Features and Easy Setup","year":"2025"},{"key":"ref37","volume-title":"Streamlit: A Faster Way To Build and Share Data Apps","year":"2025"},{"key":"ref38","volume-title":"Gradio: Build and Share Delightful Machine Learning Apps, All in Python","year":"2025"},{"key":"ref39","volume-title":"Jan is an Open Source Alternative To Chatgpt That Runs 100% Offline on Your Computer","year":"2025"},{"key":"ref40","volume-title":"Chatbox: User-Friendly Desktop Client App for Ai Models\/llms","year":"2025"},{"key":"ref41","volume-title":"Librechat","year":"2025"},{"key":"ref42","volume-title":"Sillytavern: Llm Frontend for Power Users","year":"2025"},{"key":"ref43","volume-title":"Chat-UI: Open Source Codebase Powering the Huggingchat App","author":"Face","year":"2025"},{"key":"ref44","volume-title":"Bionic-GPT: Bionic is an On-Premise Replacement for Chatgpt, Offering the Advantages of Generative Ai While Maintaining Strict Data Confidentiality","year":"2025"},{"key":"ref45","article-title":"Safety alignment should be made more than just a few tokens deep","author":"Qi","year":"2024","journal-title":"arXiv:2406.05946"},{"key":"ref46","volume-title":"No Robots","author":"Rajani","year":"2023"},{"key":"ref47","article-title":"Length-controlled AlpacaEval: A simple way to debias automatic evaluators","author":"Dubois","year":"2024","journal-title":"arXiv:2404.04475"},{"key":"ref48","volume-title":"Open LLM Leaderboard","year":"2025"},{"key":"ref49","first-page":"34661","article-title":"H2O: heavy-hitter Oracle for efficient generative inference of large language models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Zhang"},{"key":"ref50","article-title":"JailbreakBench: An open robustness benchmark for jailbreaking large language models","author":"Chao","year":"2024","journal-title":"arXiv:2404.01318"},{"key":"ref51","volume-title":"Natural Language Processing With Python: Analyzing Text With the Natural Language Toolkit","author":"Bird","year":"2009"},{"key":"ref52","article-title":"Defending against alignment-breaking attacks via robustly aligned LLM","author":"Cao","year":"2023","journal-title":"arXiv:2309.14348"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/d19-1410"},{"key":"ref54","article-title":"Fine-tuning aligned language models compromises safety, even when users do not intend to!","author":"Qi","year":"2023","journal-title":"arXiv:2310.03693"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.118"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/satml64287.2025.00010"},{"key":"ref57","article-title":"Defensive prompt patch: A robust and interpretable defense of LLMs against jailbreak attacks","author":"Xiong","year":"2024","journal-title":"arXiv:2405.20099"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2024.24188"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.304"},{"key":"ref60","article-title":"AutoDefense: Multi-agent LLM defense against jailbreak attacks","author":"Zeng","year":"2024","journal-title":"arXiv:2403.04783"},{"key":"ref61","volume-title":"Meta Llama Guard 2","author":"Team","year":"2024"},{"key":"ref62","article-title":"The llama 3 herd of models","author":"Grattafiori","year":"2024","journal-title":"arXiv:2407.21783"},{"key":"ref63","first-page":"2947","article-title":"Intention analysis makes LLMs a good jailbreak defender","volume-title":"Proc. 31st Int. Conf. Comput. Linguistics","author":"Zhang"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1561\/3300000041"},{"key":"ref65","first-page":"8483","article-title":"Safety alignment in NLP tasks: Weakly aligned summarization as an in-context attack","volume-title":"Proc. 62nd Annu. Meeting Assoc. Comput. Linguistics","author":"Fu"},{"key":"ref66","first-page":"24678","article-title":"BeaverTails: Towards improved safety alignment of LLM via a human-preference dataset","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ji"},{"key":"ref67","volume-title":"Llama 3 Model Card","year":"2024"},{"key":"ref68","volume-title":"Openai Moderation Api","year":"2024"},{"key":"ref69","article-title":"Baseline defenses for adversarial attacks against aligned language models","author":"Jain","year":"2023","journal-title":"arXiv:2309.00614"},{"key":"ref70","first-page":"61593","article-title":"On prompt-driven safeguarding for large language models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zheng"},{"key":"ref71","article-title":"Bergeron: Combating adversarial attacks through a conscience-based alignment framework","author":"Pisano","year":"2023","journal-title":"arXiv:2312.00029"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.443"},{"key":"ref73","article-title":"AutoDAN: Interpretable gradient-based adversarial attacks on large language models","author":"Zhu","year":"2023","journal-title":"arXiv:2310.15140"},{"key":"ref74","first-page":"4657","article-title":"LLM-Fuzzer: Scaling assessment of large language model jailbreaks","volume-title":"Proc. 33rd USENIX Secur. Symp.","author":"Yu"},{"key":"ref75","article-title":"MART: Improving LLM safety with multi-round automatic red-teaming","author":"Ge","year":"2023","journal-title":"arXiv:2311.07689"},{"key":"ref76","article-title":"LLM self defense: By self examination, LLMs know they are being tricked","author":"Phute","year":"2023","journal-title":"arXiv:2308.07308"},{"key":"ref77","article-title":"RAIN: Your language models can align themselves without finetuning","author":"Li","year":"2023","journal-title":"arXiv:2309.07124"}],"container-title":["IEEE Transactions on Information Forensics and Security"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10206\/11313711\/11363579.pdf?arnumber=11363579","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,3]],"date-time":"2026-02-03T20:54:25Z","timestamp":1770152065000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11363579\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"references-count":77,"URL":"https:\/\/doi.org\/10.1109\/tifs.2026.3657898","relation":{},"ISSN":["1556-6013","1556-6021"],"issn-type":[{"value":"1556-6013","type":"print"},{"value":"1556-6021","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]}}}