{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T15:44:29Z","timestamp":1773330269035,"version":"3.50.1"},"reference-count":74,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61972312"],"award-info":[{"award-number":["61972312"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62376212"],"award-info":[{"award-number":["62376212"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"National Research Foundation, Singapore, and the Cyber Security Agency through its National Cybersecurity Research and Development Programme","award":["NCRP25-P04-TAICeN"],"award-info":[{"award-number":["NCRP25-P04-TAICeN"]}]},{"name":"National Research Foundation, Singapore, and Infocomm Media Development Authority through its Trust Tech Funding Initiative"},{"name":"National Research Foundation, Singapore, and Defence Science Organization (DSO) National Laboratories through the Artificial Intelligence (AI) Singapore Programme","award":["AISG2-GC-2023-008"],"award-info":[{"award-number":["AISG2-GC-2023-008"]}]},{"name":"Career Development Fund (CDF) of Agency for Science, Technology and Research","award":["C233312028"],"award-info":[{"award-number":["C233312028"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans.Inform.Forensic Secur."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tifs.2024.3518072","type":"journal-article","created":{"date-parts":[[2024,12,23]],"date-time":"2024-12-23T19:13:10Z","timestamp":1734981190000},"page":"1333-1348","source":"Crossref","is-referenced-by-count":11,"title":["Efficient Generation of Targeted and Transferable Adversarial Examples for Vision-Language Models via Diffusion Models"],"prefix":"10.1109","volume":"20","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1239-7677","authenticated-orcid":false,"given":"Qi","family":"Guo","sequence":"first","affiliation":[{"name":"School of Software Engineering, Xi&#x2019;an Jiaotong University, Xi&#x2019;an, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7217-864X","authenticated-orcid":false,"given":"Shanmin","family":"Pang","sequence":"additional","affiliation":[{"name":"School of Software Engineering, Xi&#x2019;an Jiaotong University, Xi&#x2019;an, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2018-9344","authenticated-orcid":false,"given":"Xiaojun","family":"Jia","sequence":"additional","affiliation":[{"name":"College of Computing and Data Science, Nanyang Technological University, Jurong West, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7300-9215","authenticated-orcid":false,"given":"Yang","family":"Liu","sequence":"additional","affiliation":[{"name":"College of Computing and Data Science, Nanyang Technological University, Jurong West, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0974-9299","authenticated-orcid":false,"given":"Qing","family":"Guo","sequence":"additional","affiliation":[{"name":"Institute of High Performance Computing (IHPC) and the Centre for Frontier AI Research (CFAR), Agency for Science, Technology and Research (A*STAR), Connexis, Singapore"}]}],"member":"263","reference":[{"key":"ref1","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref2","first-page":"1","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref3","first-page":"34892","article-title":"Visual instruction tuning","volume-title":"Proc. 37th Conf. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref5","first-page":"1692","article-title":"One transformer fits all distributions in multi-modal diffusion at scale","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Bao"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3387941"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i4.28121"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/WACVW60836.2024.00106"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1063\/5.0111827"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ASE56229.2023.00189"},{"key":"ref11","article-title":"OT-attack: Enhancing adversarial transferability of vision-language models via optimal transport optimization","author":"Han","year":"2023","journal-title":"arXiv:2312.04403"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72998-0_25"},{"key":"ref13","article-title":"Agent smith: A single image can jailbreak one million multimodal LLM agents exponentially fast","author":"Gu","year":"2024","journal-title":"arXiv:2402.08567"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02308"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-023-00661-1"},{"key":"ref16","first-page":"54111","article-title":"On evaluating adversarial robustness of large vision-language models","volume-title":"Proc. 37th Conf. Neural Inf. Process. Syst.","author":"Zhao"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2024.3420128"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3381180"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2022.3226905"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00426"},{"key":"ref21","first-page":"1","article-title":"I see dead people: Gray-box adversarial attack on image-to-text models","volume-title":"Proc. Eur. Conf. Mach. Learn. Princ. Pract. Knowl. Discovery Databases","author":"Lapid"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW60793.2023.00396"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/3559758"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01183"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2023.120248"},{"key":"ref26","first-page":"1","article-title":"Rethinking model ensemble in transfer-based adversarial attacks","volume-title":"Proc. 12th Int. Conf. Learn. Represent.","author":"Chen"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01456"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00425"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2024.3411921"},{"key":"ref30","first-page":"8312","article-title":"Constructing unrestricted adversarial examples with generative models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Song"},{"key":"ref31","first-page":"1","article-title":"Generating natural adversarial examples","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Zhao"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00421"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00123"},{"key":"ref34","first-page":"1","article-title":"Towards deep learning models resistant to adversarial attacks","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"M\u0105dry"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01046"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00957"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19772-7_32"},{"issue":"24","key":"ref38","first-page":"695","article-title":"Estimation of non-normalized statistical models by score matching","volume":"6","author":"Hyv\u00e4rinen","year":"2005","journal-title":"J. Mach. Learn. Res."},{"key":"ref39","first-page":"11918","article-title":"Generative modeling by estimating gradients of the data distribution","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Song"},{"key":"ref40","article-title":"Score-based generative modeling through stochastic differential equations","author":"Song","year":"2020","journal-title":"arXiv:2011.13456"},{"key":"ref41","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. NIPS","author":"Brown"},{"issue":"1","key":"ref42","first-page":"5485","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref43","volume-title":"Vicuna: An Open-Source Chatbot Impressing GPT-4 With 90% ChatGPT Quality","author":"Chiang","year":"2023"},{"key":"ref44","article-title":"A survey on multimodal large language models","author":"Yin","year":"2023","journal-title":"arXiv:2306.13549"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/BigData59044.2023.10386743"},{"key":"ref46","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Alayrac"},{"key":"ref47","article-title":"MiniGPT-4: Enhancing vision-language understanding with advanced large language models","author":"Zhu","year":"2023","journal-title":"arXiv:2304.10592"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3395118"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1145\/3594869"},{"key":"ref50","article-title":"A survey on transferability of adversarial examples across deep neural networks","author":"Gu","year":"2023","journal-title":"arXiv:2310.17626"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00520"},{"key":"ref52","article-title":"How robust is Google\u2019s bard to adversarial image attacks?","author":"Dong","year":"2023","journal-title":"arXiv:2309.11751"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2024.3409945"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2024.3402385"},{"key":"ref55","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Ho"},{"key":"ref56","first-page":"1","article-title":"Denoising diffusion implicit models","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Song"},{"key":"ref57","article-title":"Understanding diffusion models: A unified perspective","author":"Luo","year":"2022","journal-title":"arXiv:2208.11970"},{"key":"ref58","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00394"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2003.819861"},{"key":"ref62","first-page":"6629","article-title":"Gans trained by a two time-scale update rule converge to a local Nash equilibrium","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Heusel"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00068"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/ACSSC.2011.6190099"},{"key":"ref65","article-title":"Feature squeezing: Detecting adversarial examples in deep neural networks","author":"Xu","year":"2017","journal-title":"arXiv:1704.01155"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01171"},{"key":"ref67","article-title":"A study of the effect of JPG compression on adversarial images","author":"Karolina Dziugaite","year":"2016","journal-title":"arXiv:1608.00853"},{"key":"ref68","first-page":"23818","article-title":"DISCO: Adversarial defense with local implicit functions","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Ho"},{"key":"ref69","first-page":"1","article-title":"Diffusion models for adversarial purification","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Nie"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-023-08921-2"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3415356"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.319"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/WACV.2018.00097"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3089943"}],"container-title":["IEEE Transactions on Information Forensics and Security"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10206\/10810755\/10812818.pdf?arnumber=10812818","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,27]],"date-time":"2025-01-27T18:39:49Z","timestamp":1738003189000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10812818\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":74,"URL":"https:\/\/doi.org\/10.1109\/tifs.2024.3518072","relation":{},"ISSN":["1556-6013","1556-6021"],"issn-type":[{"value":"1556-6013","type":"print"},{"value":"1556-6021","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}