{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T16:44:36Z","timestamp":1774025076878,"version":"3.50.1"},"reference-count":65,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Development Program of China","award":["2022YFB3103500"],"award-info":[{"award-number":["2022YFB3103500"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62106026"],"award-info":[{"award-number":["62106026"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005230","name":"Natural Science Foundation of Chongqing","doi-asserted-by":"publisher","award":["cstc2021jcyj-msxmX0273"],"award-info":[{"award-number":["cstc2021jcyj-msxmX0273"]}],"id":[{"id":"10.13039\/501100005230","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Postdoctoral Fellowship Program of CPSF","award":["GZC20233323"],"award-info":[{"award-number":["GZC20233323"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Multimedia"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tmm.2025.3581781","type":"journal-article","created":{"date-parts":[[2025,6,20]],"date-time":"2025-06-20T13:29:00Z","timestamp":1750426140000},"page":"6262-6272","source":"Crossref","is-referenced-by-count":2,"title":["SFCM-AEG: Source-Free Cross-Modal Adversarial Example Generation"],"prefix":"10.1109","volume":"27","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6716-0039","authenticated-orcid":false,"given":"Yan","family":"Gan","sequence":"first","affiliation":[{"name":"College of Computer Science, Chongqing University, Chongqing, China"}]},{"given":"Xinyao","family":"Xiao","sequence":"additional","affiliation":[{"name":"College of Computer Science, Chongqing University, Chongqing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9439-4623","authenticated-orcid":false,"given":"Tao","family":"Xiang","sequence":"additional","affiliation":[{"name":"College of Computer Science, Chongqing University, Chongqing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-2408-5850","authenticated-orcid":false,"given":"Chengqian","family":"Wu","sequence":"additional","affiliation":[{"name":"College of Computer Science, Chongqing University, Chongqing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2259-886X","authenticated-orcid":false,"given":"Deqiang","family":"Ouyang","sequence":"additional","affiliation":[{"name":"College of Computer Science, Chongqing University, Chongqing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612285"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3534929"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2024.102701"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01501"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3165024"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612454"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2022.109286"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02359"},{"key":"ref9","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Brown","year":"2020"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref11","first-page":"1","article-title":"Explaining and harnessing adversarial examples","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Goodfellow","year":"2015"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.06083"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2023.111353"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3559540"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3653021"},{"key":"ref17","first-page":"1060","article-title":"Generative adversarial text to image synthesis","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Reed","year":"2016"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2856256"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00143"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.01060"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00976"},{"key":"ref22","first-page":"30105","article-title":"StyleGAN-T: Unlocking the power of GANs for fast large-scale text-to-image synthesis","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Sauer","year":"2023"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3238554"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01366"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3278992"},{"key":"ref26","first-page":"8780","article-title":"Diffusion models beat GANs on image synthesis","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Dhariwal","year":"2021"},{"key":"ref27","first-page":"22675","article-title":"Reflected diffusion models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Lou","year":"2023"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01374"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3261988"},{"key":"ref31","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2021"},{"key":"ref32","article-title":"Hierarchical text-conditional image generation with CLIP latents","author":"Ramesh","year":"2022"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i6.28364"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00714"},{"key":"ref35","first-page":"1","article-title":"Controlling text-to-image diffusion by orthogonal finetuning","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Qiu","year":"2024"},{"key":"ref36","first-page":"1","article-title":"Reinforcement learning for fine-tuning text-to-image diffusion models","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Fan","year":"2024"},{"key":"ref37","first-page":"1","article-title":"Uni-ControlNet: All-in-one control to text-to-image diffusion models","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Zhao","year":"2024"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2023.3242990"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2022.08.010"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW59228.2023.00403"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICME55011.2023.00037"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3244957"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2018.2882908"},{"key":"ref44","first-page":"1","article-title":"Cross-modal learning with adversarial samples","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Li","year":"2019"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612364"},{"key":"ref46","first-page":"1","article-title":"Intriguing properties of neural networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Szegedy","year":"2014"},{"key":"ref47","article-title":"Adversarial transformation networks: Learning to generate adversarial examples","author":"Baluja","year":"2017"},{"key":"ref48","first-page":"1","article-title":"Semantic adversarial attacks via diffusion models","volume-title":"Proc. Brit. Mach. Vis. Conf.","author":"Wang","year":"2023"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72952-2_6"},{"key":"ref50","article-title":"DiffProtect: Generate adversarial examples with diffusion models for facial privacy protection","author":"Liu","year":"2023"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00421"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3480519"},{"key":"ref53","article-title":"Improving adversarial transferability by stable diffusion","author":"Liu","year":"2023"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447402"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2023.08.048"},{"key":"ref56","first-page":"1","article-title":"Content-based unrestricted adversarial attack","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Chen","year":"2024"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW59228.2023.00236"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW59228.2023.00080"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00995"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2008.128"},{"key":"ref61","first-page":"1","article-title":"Adversarial examples in the physical world","volume-title":"Proc. Int. Conf. Learn. Representations Workshop","author":"Kurakin","year":"2017"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00957"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.5555\/3015812.3015979"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1016\/j.cose.2022.102943"}],"container-title":["IEEE Transactions on Multimedia"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6046\/10844992\/11045526.pdf?arnumber=11045526","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,30]],"date-time":"2025-09-30T13:00:05Z","timestamp":1759237205000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11045526\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":65,"URL":"https:\/\/doi.org\/10.1109\/tmm.2025.3581781","relation":{},"ISSN":["1520-9210","1941-0077"],"issn-type":[{"value":"1520-9210","type":"print"},{"value":"1941-0077","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}