{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T19:01:58Z","timestamp":1772910118887,"version":"3.50.1"},"reference-count":75,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key R&#x0026;D Program of China","award":["2022ZD0115502"],"award-info":[{"award-number":["2022ZD0115502"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62461160308"],"award-info":[{"award-number":["62461160308"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62576024"],"award-info":[{"award-number":["62576024"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U23B2010"],"award-info":[{"award-number":["U23B2010"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"The Fundamental Research Funds for the Central Universities","award":["501RCQD2025141003"],"award-info":[{"award-number":["501RCQD2025141003"]}]},{"name":"Pioneer"},{"name":"Leading Goose"},{"name":"R&#x0026;D Program of Zhejiang","award":["2024C01161"],"award-info":[{"award-number":["2024C01161"]}]},{"DOI":"10.13039\/501100004826","name":"Beijing Natural Science Foundation","doi-asserted-by":"publisher","award":["QY25227"],"award-info":[{"award-number":["QY25227"]}],"id":[{"id":"10.13039\/501100004826","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Ningbo Science and Technology Innovation 2025 Major Project","award":["2025Z034"],"award-info":[{"award-number":["2025Z034"]}]},{"name":"NSFCRGC Project","award":["N CUHK498\/24"],"award-info":[{"award-number":["N CUHK498\/24"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1109\/tpami.2025.3636582","type":"journal-article","created":{"date-parts":[[2025,11,24]],"date-time":"2025-11-24T18:59:47Z","timestamp":1764010787000},"page":"3319-3334","source":"Crossref","is-referenced-by-count":2,"title":["FreeEdit: Mask-Free Reference-Based Image Editing With Multi-Modal Instruction"],"prefix":"10.1109","volume":"48","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-7917-7223","authenticated-orcid":false,"given":"Runze","family":"He","sequence":"first","affiliation":[{"name":"Chinese Academy of Sciences Institute of Information Engineering, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-4683-2726","authenticated-orcid":false,"given":"Kai","family":"Ma","sequence":"additional","affiliation":[{"name":"Meituan, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9701-6487","authenticated-orcid":false,"given":"Linjiang","family":"Huang","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8996-9907","authenticated-orcid":false,"given":"Shaofei","family":"Huang","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences Institute of Information Engineering, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8554-7827","authenticated-orcid":false,"given":"Jialin","family":"Gao","sequence":"additional","affiliation":[{"name":"Meituan, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7471-8344","authenticated-orcid":false,"given":"Xiaoming","family":"Wei","sequence":"additional","affiliation":[{"name":"Meituan, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3559-8009","authenticated-orcid":false,"given":"Jiao","family":"Dai","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences Institute of Information Engineering, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1107-3873","authenticated-orcid":false,"given":"Jizhong","family":"Han","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences Institute of Information Engineering, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9180-2935","authenticated-orcid":false,"given":"Si","family":"Liu","sequence":"additional","affiliation":[{"name":"Institute of Artificial Intelligence, Beihang University, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ho"},{"key":"ref2","article-title":"Denoising diffusion implicit models","author":"Song"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3422622"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00453"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01102"},{"key":"ref6","first-page":"8821","article-title":"Zero-shot text-to-image generation","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","author":"Ramesh"},{"key":"ref7","article-title":"Hierarchical text-conditional image generation with CLIP latents","author":"Ramesh","year":"2022"},{"key":"ref8","first-page":"36479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Saharia"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52688.2022.01042"},{"key":"ref10","article-title":"GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models","author":"Nichol","year":"2021"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"ref12","first-page":"11127","article-title":"Uni-ControlNet: All-in-one control to text-to-image diffusion models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Zhao"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.52202\/079017-2669"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73636-0_12"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01117"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02148"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00978"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3342645"},{"key":"ref19","article-title":"Instantstyle-plus: Style transfer with content-preserving in text-to-image generation","author":"Wang","year":"2024"},{"key":"ref20","article-title":"InstantStyle: Free lunch towards style-preserving in text-to-image generation","author":"Wang","year":"2024"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01764"},{"key":"ref22","first-page":"31428","article-title":"MagicBrush: A manually annotated dataset for instruction-guided image editing","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Zhang"},{"key":"ref23","article-title":"EMU Edit: Precise image editing via recognition and generation tasks","author":"Sheynin","year":"2023"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.01763"},{"key":"ref25","article-title":"An image is worth one word: Personalizing text-to-image generation using textual inversion","author":"Gal","year":"2022"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.00630"},{"key":"ref27","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref28","article-title":"DINOv2: Learning robust visual features without supervision","author":"Oquab","year":"2023"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref30","first-page":"8153","article-title":"Animate Anyone: Consistent and controllable image-to-video synthesis for character animation","volume-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","author":"Hu"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3680691"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.02155"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00192"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01461"},{"key":"ref35","article-title":"BLIP-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing","author":"Li","year":"2023"},{"key":"ref36","article-title":"IP-Adapter: Text compatible image prompt adapter for text-to-image diffusion models","author":"Ye","year":"2023"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01049"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72661-3_9"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73116-7_14"},{"key":"ref40","article-title":"Affordance-aware object insertion via mask-aware dual diffusion","author":"He","year":"2024"},{"key":"ref41","article-title":"Prompt-to-prompt image editing with cross attention control","author":"Hertz","year":"2022"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00585"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00191"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02062"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.00582"},{"key":"ref46","article-title":"Guiding instruction-based image editing via multimodal large language models","author":"Fu","year":"2023"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.00799"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.01208"},{"key":"ref49","article-title":"GPT-4 technical report","author":"Achiam","year":"2023"},{"key":"ref50","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref51","article-title":"ChatGPT","year":"2022"},{"key":"ref52","article-title":"Emu: Enhancing image generation models using photogenic needles in a Haystack","author":"Dai","year":"2023"},{"key":"ref53","article-title":"InstructEdit: Improving automatic masks for diffusion-based image editing with user instructions","author":"Wang","year":"2023"},{"key":"ref54","first-page":"3058","article-title":"UltraEdit: Instruction-based fine-grained image editing at scale","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Zhao"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.01241"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1145\/3746027.3755811"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.02433"},{"key":"ref58","article-title":"MiniGPT-4: Enhancing vision-language understanding with advanced large language models","author":"Zhu","year":"2023"},{"key":"ref59","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref60","article-title":"PixArt-$\\alpha$\u03b1: Fast training of diffusion transformer for photorealistic text-to-image synthesis","author":"Chen","year":"2023"},{"key":"ref61","article-title":"Kolors: Effective training of diffusion model for photorealistic text-to-image synthesis","author":"Team","year":"2024"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-020-01316-z"},{"key":"ref63","article-title":"DesignEdit: Multi-layered latent decomposition and fusion for unified & accurate image editing","author":"Jia","year":"2024"},{"key":"ref64","article-title":"CogVLM: Visual expert for pretrained language models","author":"Wang","year":"2023"},{"key":"ref65","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Alayrac"},{"key":"ref66","article-title":"MiniCPM: Unveiling the potential of small language models with scalable training strategies","author":"Hu","year":"2024"},{"key":"ref67","first-page":"34892","article-title":"Visual instruction tuning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.01237"},{"key":"ref69","article-title":"Diffusers: State-of-the-art diffusion models","author":"von Platen","year":"2022"},{"key":"ref70","article-title":"Kosmos-G: Generating images in context with multimodal large language models","author":"Pan","year":"2023"},{"key":"ref71","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Brown"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25353"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01391"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/11372200\/11267093.pdf?arnumber=11267093","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,9]],"date-time":"2026-02-09T21:05:17Z","timestamp":1770671117000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11267093\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3]]},"references-count":75,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2025.3636582","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3]]}}}