{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,25]],"date-time":"2026-02-25T08:05:38Z","timestamp":1772006738194,"version":"3.50.1"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T00:00:00Z","timestamp":1765152000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T00:00:00Z","timestamp":1765152000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,8]]},"DOI":"10.1109\/acsac67867.2025.00095","type":"proceedings-article","created":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T20:54:58Z","timestamp":1771966498000},"page":"1209-1220","source":"Crossref","is-referenced-by-count":0,"title":["Siren: A Learning-Based Multi-Turn Attack Framework for Simulating Real-World Human Jailbreak Behaviors"],"prefix":"10.1109","author":[{"given":"Yi","family":"Zhao","sequence":"first","affiliation":[{"name":"The Hong Kong Polytechnic University,Department of Computing,Hong Kong SAR,China"}]},{"given":"Youzhi","family":"Zhang","sequence":"additional","affiliation":[{"name":"Hong Kong Institute of Science &#x0026; Innovation Chinese Academy of Sciences,Centre for Artificial Intelligence and Robotics,Hong Kong SAR,China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Universal and transferable adversarial attacks on aligned language models","author":"Zou","year":"2023","journal-title":"arXiv preprint"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/SaTML64287.2025.00010"},{"key":"ref3","article-title":"Derail yourself: Multi-turn LLM jailbreak attack through self-discovered clues","author":"Ren","year":"2024","journal-title":"arXiv preprint"},{"key":"ref4","article-title":"Great, now write an article about that: The crescendo multi-turn LLM jailbreak attack","author":"Russinovich","year":"2024","journal-title":"arXiv preprint"},{"key":"ref5","article-title":"Speak out of turn: Safety vulnerability of large language models in multi-turn dialogue","author":"Zhou","year":"2024","journal-title":"arXiv preprint"},{"key":"ref6","article-title":"Safe RLHF: safe reinforcement learning from human feedback","author":"Dai","year":"2024","journal-title":"ICLR 2024. OpenReview.net"},{"key":"ref7","article-title":"Training socially aligned language models on simulated social interactions","author":"Liu","year":"2024","journal-title":"ICLR 2024. OpenReview.net"},{"key":"ref8","article-title":"Jailbreak attacks and defenses against large language models: A survey","author":"Yi","year":"2024","journal-title":"arXiv preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73464-9_11"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i19.30150"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.773"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.299"},{"key":"ref13","article-title":"LLM self defense: By self examination, LLMs know they are being tricked","author":"Phute","year":"2024","journal-title":"Tiny Papers @ ICLR 2024. OpenReview. net"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.92"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.948"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.303"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.118"},{"key":"ref18","article-title":"LLM defenses are not robust to multi-turn human jailbreaks yet","author":"Li","year":"2024","journal-title":"arXiv preprint"},{"key":"ref19","article-title":"Finetuned language models are zero-shot learners","volume-title":"The Tenth International Conference on Learning Representations, ICLR 2022","author":"Wei","year":"2022"},{"key":"ref20","article-title":"Direct preference optimization: Your language model is secretly a reward model","author":"Rafailov","year":"2023","journal-title":"NeurIPS 2023"},{"key":"ref21","article-title":"Usage policies","volume-title":"OpenAI","year":"2025"},{"key":"ref22","article-title":"Beavertails: Towards improved safety alignment of LLM via a human-preference dataset","author":"Ji","year":"2023","journal-title":"NeurIPS 2023"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671444"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.107"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-industry.37"},{"key":"ref26","article-title":"Mortar: Metamorphic multi-turn testing for LLM-based dialogue systems","author":"Guo","year":"2024","journal-title":"arXiv preprint"},{"key":"ref27","article-title":"Detecting language model attacks with perplexity","author":"Alon","year":"2023","journal-title":"arXiv preprint"},{"key":"ref28","article-title":"Autodan: Generating stealthy jailbreak prompts on aligned large language models","author":"Liu","year":"2024","journal-title":"ICLR 2024. OpenReview.net"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2026.3660147"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.908"},{"key":"ref31","article-title":"Baichuan 2: Open large-scale language models","author":"Yang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/3658644.3670388"},{"key":"ref33","article-title":"The Llama 3 herd of models","author":"Dubey","year":"2024","journal-title":"arXiv preprint"},{"key":"ref34","article-title":"A strongreject for empty jailbreaks","author":"Souly","year":"2024","journal-title":"NeurIPS 2024"},{"key":"ref35","volume-title":"Gemma: Open models based on gemini research and technology","author":"Team","year":"2024"},{"key":"ref36","article-title":"Autodefense: Multi-agent LLM defense against jailbreak attacks","author":"Zeng","year":"2024","journal-title":"arXiv preprint"},{"key":"ref37","article-title":"Llms get lost in multi-turn conversation","author":"Laban","year":"2025","journal-title":"arXiv preprint"}],"event":{"name":"2025 IEEE Annual Computer Security Applications Conference (ACSAC)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,8]]},"end":{"date-parts":[[2025,12,12]]}},"container-title":["2025 IEEE Annual Computer Security Applications Conference (ACSAC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11391636\/11391706\/11391742.pdf?arnumber=11391742","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,25]],"date-time":"2026-02-25T07:11:09Z","timestamp":1772003469000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11391742\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,8]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/acsac67867.2025.00095","relation":{},"subject":[],"published":{"date-parts":[[2025,12,8]]}}}