{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T17:57:57Z","timestamp":1772906277222,"version":"3.50.1"},"reference-count":82,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100010251","name":"Youth Science Foundation of Jiangsu Province","doi-asserted-by":"publisher","award":["BK20230924"],"award-info":[{"award-number":["BK20230924"]}],"id":[{"id":"10.13039\/501100010251","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Science Foundation of China","doi-asserted-by":"publisher","award":["U24A20330"],"award-info":[{"award-number":["U24A20330"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Science Foundation of China","doi-asserted-by":"publisher","award":["62361166670"],"award-info":[{"award-number":["62361166670"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tip.2025.3530822","type":"journal-article","created":{"date-parts":[[2025,1,23]],"date-time":"2025-01-23T18:51:38Z","timestamp":1737658298000},"page":"759-771","source":"Crossref","is-referenced-by-count":6,"title":["TRTST: Arbitrary High-Quality Text-Guided Style Transfer With Transformers"],"prefix":"10.1109","volume":"34","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2850-5222","authenticated-orcid":false,"given":"Haibo","family":"Chen","sequence":"first","affiliation":[{"name":"PCA Laboratory, the Key Laboratory of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, and the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-7254-034X","authenticated-orcid":false,"given":"Zhoujie","family":"Wang","sequence":"additional","affiliation":[{"name":"PCA Laboratory, the Key Laboratory of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, and the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4791-454X","authenticated-orcid":false,"given":"Lei","family":"Zhao","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5845-8602","authenticated-orcid":false,"given":"Jun","family":"Li","sequence":"additional","affiliation":[{"name":"PCA Laboratory, the Key Laboratory of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, and the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4800-832X","authenticated-orcid":false,"given":"Jian","family":"Yang","sequence":"additional","affiliation":[{"name":"PCA Laboratory, the Key Laboratory of Intelligent Perception and Systems for High-Dimensional Information of Ministry of Education, and the School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01140"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00092"},{"key":"ref3","article-title":"ITstyler: Image-optimized text-based style transfer","author":"Bai","year":"2023","journal-title":"arXiv:2301.10916"},{"key":"ref4","volume-title":"Clip Retrieval: Easily Compute Clip Embeddings and Build a Clip Retrieval System With Them","author":"Beaumont","year":"2022"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01764"},{"key":"ref6","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. NIPS","author":"Brown"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"ref8","first-page":"1","article-title":"Artistic style transfer with internal-external learning and contrastive learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Chen"},{"key":"ref9","article-title":"Fast patch-based style transfer of arbitrary style","author":"Qi Chen","year":"2016","journal-title":"arXiv:1612.04337"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2936746"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00840"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.461"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00165"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01104"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3589002"},{"key":"ref16","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"},{"key":"ref17","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv:2010.11929"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20059-5_41"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3528223.3530164"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.265"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.5555\/2969033.2969125"},{"key":"ref22","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. NIPS","volume":"33","author":"Ho"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00969"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.167"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01459"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5862"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1603.08155"},{"key":"ref28","article-title":"Recognizing image style","author":"Karayev","year":"2013","journal-title":"arXiv:1311.3715"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2106.12423"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00453"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00582"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00246"},{"key":"ref34","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01029"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01753"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00790"},{"key":"ref38","first-page":"22020","article-title":"Lightweight generative adversarial networks for text-guided image manipulation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Li"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.272"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46487-9_43"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00393"},{"key":"ref42","first-page":"386","article-title":"Universal style transfer via feature transforms","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Li"},{"key":"ref43","first-page":"17612","article-title":"Mind the gap: Understanding the modality gap in multi-modal contrastive representation learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Liang"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3149237"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00510"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00658"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1907.11692"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref50","article-title":"Name your style: An arbitrary artist-aware image style transfer","author":"Liu","year":"2022","journal-title":"arXiv:2202.13562"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00605"},{"key":"ref52","article-title":"DeltaEdit: Exploring text-free training for text-driven image manipulation","author":"Lyu","year":"2023","journal-title":"arXiv:2303.06285"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00585"},{"key":"ref54","first-page":"1","article-title":"Text-adaptive generative adversarial networks: Manipulating images with natural language","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Nam"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00603"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00209"},{"key":"ref57","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"issue":"8","key":"ref58","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01237-3_43"},{"key":"ref61","first-page":"25278","article-title":"LAION-5B: An open large-scale dataset for training next generation image-text models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Schuhmann"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00860"},{"key":"ref63","article-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2014","journal-title":"arXiv:1409.1556"},{"key":"ref64","article-title":"Denoising diffusion implicit models","author":"Song","year":"2020","journal-title":"arXiv:2010.02502"},{"key":"ref65","first-page":"10347","article-title":"Training data-efficient image transformers & distillation through attention","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Touvron"},{"key":"ref66","first-page":"1349","article-title":"Texture networks: Feed-forward synthesis of textures and stylized images","volume-title":"Proc. 33rd Int. Conf. Mach. Learn.","author":"Ulyanov"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.437"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3215899"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00863"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2003.819861"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01754"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00229"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00972"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20202"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00156"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00604"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00978"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1145\/3528233.3530736"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"ref81","first-page":"1","article-title":"Learning deep features for scene recognition using places database","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Zhou"},{"key":"ref82","article-title":"Deformable DETR: Deformable transformers for end-to-end object detection","author":"Zhu","year":"2020","journal-title":"arXiv:2010.04159"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/83\/10795784\/10851799.pdf?arnumber=10851799","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,31]],"date-time":"2025-01-31T05:29:53Z","timestamp":1738301393000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10851799\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":82,"URL":"https:\/\/doi.org\/10.1109\/tip.2025.3530822","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"value":"1057-7149","type":"print"},{"value":"1941-0042","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}