{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:33:30Z","timestamp":1763192010890,"version":"3.45.0"},"reference-count":35,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11229281","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["DEO: Jailbreak a Black-box Multimodal Large Language Model with Dual-Embedding Alignment"],"prefix":"10.1109","author":[{"given":"Lijie","family":"Zhang","sequence":"first","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Information Engineering,China"}]},{"given":"Mingsi","family":"Wang","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Information Engineering,China"}]},{"given":"Yue","family":"Zhao","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Information Engineering,China"}]},{"given":"Zijin","family":"Lin","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Information Engineering,China"}]},{"given":"Kai","family":"Chen","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Information Engineering,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1093\/nsr\/nwae403"},{"key":"ref2","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref3","article-title":"Visual instruction tuning","volume":"36","author":"Liu","year":"2024","journal-title":"Advances in neural information processing systems"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01750"},{"article-title":"Visual question answering instruction: Unlocking multimodal large language model to domainspecific visual multitasks","year":"2024","author":"Lee","key":"ref5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.hcc.2024.100211"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.346"},{"key":"ref8","article-title":"Hard prompts made easy: Gradient-based discrete optimization for prompt tuning and discovery","volume":"36","author":"Wen","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/satml64287.2025.00010"},{"article-title":"Jailbreaking attack against multimodal large language model","year":"2024","author":"Niu","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2025.3592935"},{"article-title":"Image hijacks: Adversarial images can control generative models at runtime","year":"2023","author":"Bailey","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/tifs.2025.3583249"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i22.34568"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72992-8_22"},{"article-title":"Jailbreak in pieces: Compositional adversarial attacks on multi-modal language models","volume-title":"The Twelfth International Conference on Learning Representations","author":"Shayegani","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref18","first-page":"12 888","article-title":"Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"International conference on machine learning","author":"Li"},{"key":"ref19","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International conference on machine learning","author":"Radford"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2025.3592935"},{"key":"ref21","first-page":"27 730","article-title":"Training language models to follow instructions with human feedback","volume-title":"Advances in Neural Information Processing Systems","volume":"35","author":"Ouyang","year":"2022"},{"journal-title":"\u00a0\u00a0\u00a0","key":"ref22"},{"key":"ref23","article-title":"Lima: Less is more for alignment","volume":"36","author":"Zhou","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Universal and transferable adversarial attacks on aligned language models","year":"2023","author":"Zou","key":"ref24"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.464"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73464-9_11"},{"article-title":"Vision-llms can fool themselves with self-generated typographic attacks","year":"2024","author":"Qraitem","key":"ref27"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i19.30150"},{"key":"ref29","article-title":"Are aligned neural networks adversarially aligned\u0192","volume":"36","author":"Carlini","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Towards deep learning models resistant to adversarial attacks","year":"2017","author":"Madry","key":"ref30"},{"key":"ref31","article-title":"On evaluating adversarial robustness of large vision-language models","volume":"36","author":"Zhao","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/s10208-015-9296-2"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02484"},{"article-title":"Minigpt-4: Enhancing vision-language understanding with advanced large language models","year":"2023","author":"Zhu","key":"ref34"},{"article-title":"Gpt-4 technical report","year":"2023","author":"Achiam","key":"ref35"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11229281.pdf?arnumber=11229281","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:30:24Z","timestamp":1763191824000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11229281\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":35,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11229281","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}