{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T07:11:22Z","timestamp":1761894682386,"version":"build-2065373602"},"reference-count":45,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/icme59968.2025.11210045","type":"proceedings-article","created":{"date-parts":[[2025,10,30]],"date-time":"2025-10-30T17:57:42Z","timestamp":1761847062000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["PiCo: Jailbreaking Multimodal Large Language Models via Pictorial Code Contextualization"],"prefix":"10.1109","author":[{"given":"Aofan","family":"Liu","sequence":"first","affiliation":[{"name":"Wuhan University,School of Artificial Intelligence"}]},{"given":"Lulu","family":"Tang","sequence":"additional","affiliation":[{"name":"Wuhan University,School of Artificial Intelligence"}]},{"given":"Ting","family":"Pan","sequence":"additional","affiliation":[{"name":"Beijing Academy of Artificial Intelligence"}]},{"given":"Yuguo","family":"Yin","sequence":"additional","affiliation":[{"name":"Peking University,School of Electronic and Computer Engineering"}]},{"given":"Bin","family":"Wang","sequence":"additional","affiliation":[{"name":"Peking University,School of Electronic and Computer Engineering"}]},{"given":"Ao","family":"Yang","sequence":"additional","affiliation":[{"name":"Peking University,School of Electronic and Computer Engineering"}]}],"member":"263","reference":[{"issue":"5","key":"ref1","article-title":"Gpt-4 technical report. arxiv 2303.08774","volume":"2","year":"2023","journal-title":"View in Article"},{"journal-title":"Gemini: a family of highly capable multimodal models","year":"2023","author":"Team","key":"ref2"},{"key":"ref3","article-title":"Visual instruction tuning","volume":"36","author":"Liu","year":"2024","journal-title":"Advances in neural information processing systems"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72643-9_22"},{"journal-title":"Ai risk management should incorporate both safety and security","year":"2024","author":"Qi","key":"ref5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.17654"},{"journal-title":"Breaking down the defenses: A comparative survey of attacks on large language models","year":"2024","author":"Chowdhury","key":"ref7"},{"journal-title":"Fine-tuning aligned language models compromises safety, even when users do not intend to!","year":"2023","author":"Qi","key":"ref8"},{"journal-title":"Codechameleon: Personalized encryption framework for jailbreaking large language models","year":"2024","author":"Lv","key":"ref9"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-023-00765-8"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.895"},{"journal-title":"Defensive prompt patch: A robust and interpretable defense of llms against jailbreak attacks","year":"2024","author":"Xiong","key":"ref12"},{"article-title":"Llama guard: Llm-based input-output safeguard for human-ai conversations","year":"2023","author":"Inan","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73202-7_11"},{"journal-title":"A general language assistant as a laboratory for alignment","year":"2021","author":"Askell","key":"ref15"},{"key":"ref16","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang","year":"2022","journal-title":"Advances in neural information processing systems"},{"journal-title":"Training a helpful and harmless assistant with reinforcement learning from human feedback","year":"2022","author":"Bai","key":"ref17"},{"journal-title":"Safety-tuned llamas: Lessons from improving the safety of large language models that follow instructions","year":"2023","author":"Bianchi","key":"ref18"},{"journal-title":"anthropic","article-title":"Anthropic","year":"2023","key":"ref19"},{"journal-title":"Low-resource languages jailbreak gpt-4","year":"2023","author":"Yong","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/satml64287.2025.00010"},{"journal-title":"Easyjailbreak: A unified framework for jailbreaking large language models","year":"2024","author":"Zhou","key":"ref22"},{"key":"ref23","article-title":"Jailbroken: How does llm safety training fail?","volume":"36","author":"Wei","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.272"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3658644.3670388"},{"journal-title":"Deepinception: Hypnotize large language model to be jailbreaker","year":"2023","author":"Li","key":"ref26"},{"article-title":"Universal and transferable adversarial attacks on aligned language models","year":"2023","author":"Zou","key":"ref27"},{"journal-title":"Amplegcg: Learning a universal and transferable generative model of adversarial suffixes for jailbreaking both open and closed llms","year":"2024","author":"Liao","key":"ref28"},{"journal-title":"Autodan: Generating stealthy jailbreak prompts on aligned large language models","year":"2023","author":"Liu","key":"ref29"},{"journal-title":"Gptfuzzer: Red teaming large language models with auto-generated jailbreak prompts","year":"2023","author":"Yu","key":"ref30"},{"key":"ref31","first-page":"23716","article-title":"Flamingo: a visual language model for few-shot learning","volume":"35","author":"Alayrac","year":"2022","journal-title":"Advances in neural information processing systems"},{"key":"ref32","article-title":"Instructblip: Towards general-purpose vision-language models with instruction tuning","volume":"36","author":"Dai","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"journal-title":"Minigpt-4: Enhancing vision-language understanding with advanced large language models","year":"2023","author":"Zhu","key":"ref33"},{"year":"2023","key":"ref34","article-title":"Gpt-4v(ision) technical work and authors"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i19.30150"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73464-9_11"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i22.34568"},{"article-title":"Jailbreak in pieces: Compositional adversarial attacks on multi-modal language models","volume-title":"The Twelfth International Conference on Learning Representations","author":"Shayegani","key":"ref38"},{"journal-title":"Attackeval: How to evaluate the effectiveness of jailbreak attacking on large language models","year":"2024","author":"Jin","key":"ref39"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.776"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.92"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.198"},{"article-title":"Smoothllm: Defending large language models against jailbreaking attacks","year":"2023","author":"Robey","key":"ref43"},{"article-title":"Detecting language model attacks with perplexity","year":"2023","author":"Alon","key":"ref44"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.3390\/app14093558"}],"event":{"name":"2025 IEEE International Conference on Multimedia and Expo (ICME)","start":{"date-parts":[[2025,6,30]]},"location":"Nantes, France","end":{"date-parts":[[2025,7,4]]}},"container-title":["2025 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11208895\/11208897\/11210045.pdf?arnumber=11210045","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T05:33:22Z","timestamp":1761888802000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11210045\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":45,"URL":"https:\/\/doi.org\/10.1109\/icme59968.2025.11210045","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}