{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,11]],"date-time":"2025-10-11T00:24:09Z","timestamp":1760142249053,"version":"build-2065373602"},"reference-count":64,"publisher":"Springer Science and Business Media LLC","issue":"14","license":[{"start":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T00:00:00Z","timestamp":1756857600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T00:00:00Z","timestamp":1756857600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["Nos. 62402026, 62176018"],"award-info":[{"award-number":["Nos. 62402026, 62176018"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"R and D Program of Beijing Municipal Education CommissionChina","award":["KM202410016010"],"award-info":[{"award-number":["KM202410016010"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2025,11]]},"DOI":"10.1007\/s00371-025-04129-8","type":"journal-article","created":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T12:50:26Z","timestamp":1756903826000},"page":"11815-11831","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Instance-level cross-attention learning for fine-grained customizable face generation"],"prefix":"10.1007","volume":"41","author":[{"given":"Xueping","family":"Wang","sequence":"first","affiliation":[]},{"given":"Yixuan","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Hang","family":"Li","sequence":"additional","affiliation":[]},{"given":"Feihu","family":"Yan","sequence":"additional","affiliation":[]},{"given":"Guangzhe","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,3]]},"reference":[{"key":"4129_CR1","doi-asserted-by":"crossref","unstructured":"Li, Z., et\u00a0al.: Photomaker: customizing realistic human photos via stacked ID embedding. In: CVPR 16(22), pp. 8640\u20138650 (2024)","DOI":"10.1109\/CVPR52733.2024.00825"},{"key":"4129_CR2","doi-asserted-by":"crossref","unstructured":"Wu, J.Z., et\u00a0al.: Tune-a-video: one-shot tuning of image diffusion models for text-to-video generation. In: ICCV, October 1\u20136, pp. 7589\u20137599 (2023)","DOI":"10.1109\/ICCV51070.2023.00701"},{"key":"4129_CR3","doi-asserted-by":"crossref","unstructured":"Wei, F., et\u00a0al.: Powerful and flexible: personalized text-to-image generation via reinforcement learning. In Leonardis, A. et\u00a0al. (eds). ECCV, September 29-October 4, Vol. 15085, pp. 394\u2013410 (2024)","DOI":"10.1007\/978-3-031-73383-3_23"},{"key":"4129_CR4","unstructured":"Gal, R., et\u00a0al.: An image is worth one word: Personalizing text-to-image generation using textual inversion. In: ICLR, May 1\u20135 (2023)"},{"key":"4129_CR5","unstructured":"Dong, Z., Wei, P. & Lin, L. Dreamartist: towards controllable one-shot text-to-image generation via positive-negative prompt-tuning (2022). arXiv:2211.11337"},{"key":"4129_CR6","doi-asserted-by":"crossref","unstructured":"Han, L., et\u00a0al.: Svdiff: compact parameter space for diffusion fine-tuning. In: ICCV, October 1\u20136 , pp. 7289\u20137300 (2023)","DOI":"10.1109\/ICCV51070.2023.00673"},{"key":"4129_CR7","unstructured":"Mirza, M., Osindero, S.: Conditional generative adversarial nets (2014). arXiv:1411.1784"},{"key":"4129_CR8","unstructured":"Reed, S.E., et\u00a0al.: Generative adversarial text to image synthesis. In: Balcan, M. & Weinberger, K.Q. (eds) ICML, June 19\u201324, Vol. 48, pp. 1060\u20131069 (2016)"},{"key":"4129_CR9","doi-asserted-by":"crossref","unstructured":"Qiao, T., Zhang, J., Xu, D. & Tao, D.: Mirrorgan: learning text-to-image generation by redescription. In: CVPR, June 16\u201320, pp. 1505\u20131514 (2019)","DOI":"10.1109\/CVPR.2019.00160"},{"key":"4129_CR10","unstructured":"Li, B., Qi, X., Lukasiewicz, T., Torr, P.H.S.: Controllable text-to-image generation. In: Wallach, H.M. (eds) NeurIPS, December 8\u201314, pp. 2063\u20132073 (2019)"},{"key":"4129_CR11","unstructured":"Radford, A. et\u00a0al.: Learning transferable visual models from natural language supervision. In: Meila, M. & Zhang, T. (eds) ICML, July 18\u201324, Vol. 139, pp. 8748\u20138763 (2021)"},{"key":"4129_CR12","doi-asserted-by":"crossref","unstructured":"Zhou, Y., et\u00a0al.: Towards language-free training for text-to-image generation. In: CVPR, June 18\u201324, pp. 17886\u201317896 (2022)","DOI":"10.1109\/CVPR52688.2022.01738"},{"key":"4129_CR13","doi-asserted-by":"crossref","unstructured":"Galatolo, F.A., Cimino, M.G.C.A. & Vaglini, G.: Generating images from caption and vice versa via clip-guided generative latent space search. In: Imai, F.H., Distante, C. & Battiato, S. (eds). IMPROVE, April 28\u201330, pp. 166\u2013174 (2021)","DOI":"10.5220\/0010503701660174"},{"key":"4129_CR14","doi-asserted-by":"crossref","unstructured":"Patashnik, O., Wu, Z., Shechtman, E., Cohen-Or, D., Lischinski, D.: Styleclip: text-driven manipulation of stylegan imagery. In: ICCV, October 10\u201317, pp. 2065\u20132074 (2021)","DOI":"10.1109\/ICCV48922.2021.00209"},{"key":"4129_CR15","unstructured":"Sohl-Dickstein, J., Weiss, E.A., Maheswaranathan, N., Ganguli, S.: Deep unsupervised learning using nonequilibrium thermodynamics. In: Bach, F.R. & Blei, D.M. (eds) ICML, July 6\u201311, Vol. 37, pp. 2256\u20132265 (2015)"},{"key":"4129_CR16","unstructured":"Ho, J., Jain, A., Abbeel, P. Denoising diffusion probabilistic models. In: Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., Lin, H. (eds) NeurIPS, December 6\u201312 ( 2020)"},{"key":"4129_CR17","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P. & Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR, June 18\u201324, pp. 10674\u201310685 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"4129_CR18","unstructured":"Saharia, C., et\u00a0al.: Photorealistic text-to-image diffusion models with deep language understanding. In: Koyejo, S. et\u00a0al. (eds) NeurIPS, November 28\u2013December 9 (2022)"},{"key":"4129_CR19","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: ICCV, October 1\u20136, pp. 3813\u20133824 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"4129_CR20","doi-asserted-by":"crossref","unstructured":"Zheng, W., et\u00a0al.: Cogview3: finer and faster text-to-image generation via relay diffusion. In: Leonardis, A. et\u00a0al. (eds) ECCV, September 29\u2013October 4, Vol. 15135, pp. 1\u201322 (2024)","DOI":"10.1007\/978-3-031-72980-5_1"},{"key":"4129_CR21","doi-asserted-by":"crossref","unstructured":"Parihar, R., VS, S., Mani, S., Karmali, T., Babu, R.V.: Precisecontrol: enhancing text-to-image diffusion models with fine-grained attribute control. In: Leonardis, A., et\u00a0al. (eds) ECCV, September 29\u2013October 4, Vol. 15140, pp. 469\u2013487 (2024)","DOI":"10.1007\/978-3-031-73007-8_27"},{"key":"4129_CR22","doi-asserted-by":"crossref","unstructured":"Shiohara, K., Yamasaki, T.: Face2diffusion for fast and editable face personalization. In: CVPR, June 16\u201322, pp. 6850\u20136859 (2024)","DOI":"10.1109\/CVPR52733.2024.00654"},{"key":"4129_CR23","unstructured":"Yuan, G., et\u00a0al.: Inserting anybody in diffusion models via celeb basis. In: Oh, A. et\u00a0al. (eds) NeurIPS, December 10\u201316 (2023)"},{"key":"4129_CR24","doi-asserted-by":"crossref","unstructured":"Cui, S., et\u00a0al.: Idadapter: learning mixed features for tuning-free personalization of text-to-image models. In: CVPR, June 17\u201318, pp. 950\u2013959 (2024)","DOI":"10.1109\/CVPRW63382.2024.00100"},{"key":"4129_CR25","unstructured":"Ye, H., Zhang, J., Liu, S., Han, X., Yang, W.: Ip-adapter: text compatible image prompt adapter for text-to-image diffusion models (2023). arXiv:2308.06721"},{"key":"4129_CR26","doi-asserted-by":"crossref","unstructured":"Shi, J., Xiong, W., Lin, Z., Jung, H.J.: Instantbooth: personalized text-to-image generation without test-time finetuning. In: CVPR, June 16\u201322, pp. 8543\u20138552 (2024)","DOI":"10.1109\/CVPR52733.2024.00816"},{"key":"4129_CR27","doi-asserted-by":"crossref","unstructured":"Li, X., Hou, X., & Loy, C.C.: When stylegan meets stable diffusion: a $${\\cal{W}}_{+}$$ adapter for personalized image generation. In: CVPR, June 16\u201322, pp. 2187\u20132196 (2024)","DOI":"10.1109\/CVPR52733.2024.00213"},{"key":"4129_CR28","unstructured":"Dai, D., Jia, M., Zhou, Y., Xing, H. & Li, C.: Face-makeup: multimodal facial prompts for text-to-image generation (2025). arXiv:2501.02523"},{"key":"4129_CR29","unstructured":"Wang, Q., Bai, X., Wang, H., Qin, Z. & Chen, A. Instantid: zero-shot identity-preserving generation in seconds (2024). arXiv:2401.07519"},{"key":"4129_CR30","unstructured":"Mohamed, S., Han, D. & Li, Y.: Fusion is all you need: face fusion for customized identity-preserving image synthesis (2024). arXiv:2409.19111"},{"key":"4129_CR31","unstructured":"He, Z., et\u00a0al.: Imagine yourself: tuning-free personalized image generation (2024). arXiv:2409.13346"},{"key":"4129_CR32","doi-asserted-by":"crossref","unstructured":"Ruiz, N., et\u00a0al.: Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In: CVPR, June 17\u201324, pp. 22500\u201322510 (2023)","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"4129_CR33","doi-asserted-by":"crossref","unstructured":"Gal, R., et\u00a0al.: Encoder-based domain tuning for fast personalization of text-to-image models. ACM Trans. Graph. 42, 150:1\u2013150:13 (2023)","DOI":"10.1145\/3592133"},{"key":"4129_CR34","doi-asserted-by":"crossref","unstructured":"Wei, Y., et\u00a0al.: Masterweaver: taming editability and face identity for personalized text-to-image generation. In: Leonardis, A. et\u00a0al. (eds) ECCV, September 29\u2013October 4, Vol. 15109, pp. 252\u2013271 (2024)","DOI":"10.1007\/978-3-031-72983-6_15"},{"key":"4129_CR35","unstructured":"Yu, C. et\u00a0al.: Facechain-fact: face adapter with decoupled training for identity-preserved personalization (2024). arXiv:2410.12312"},{"key":"4129_CR36","unstructured":"Nichol, A.Q., et\u00a0al.: GLIDE: towards photorealistic image generation and editing with text-guided diffusion models. In: Chaudhuri, K. et\u00a0al. (eds) ICML, July 17\u201323, Vol. 162, pp. 16784\u201316804 (2022)"},{"key":"4129_CR37","unstructured":"Ho, J., Salimans, T.: Classifier-free diffusion guidance (2022). arXiv:2207.12598"},{"key":"4129_CR38","doi-asserted-by":"crossref","unstructured":"Gu, S., et\u00a0al.: Vector quantized diffusion model for text-to-image synthesis. In: CVPR, June 18\u201324, pp. 10686\u201310696 (2022)","DOI":"10.1109\/CVPR52688.2022.01043"},{"key":"4129_CR39","unstructured":"Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., Chen, M.: Hierarchical text-conditional image generation with CLIP latents (2022). arXiv:2204.06125"},{"key":"4129_CR40","unstructured":"Podell, D., et\u00a0al.: SDXL: improving latent diffusion models for high-resolution image synthesis. In: ICLR, May 7\u201311 (2024)"},{"key":"4129_CR41","doi-asserted-by":"crossref","unstructured":"Sauer, A., Lorenz, D., Blattmann, A., Rombach, R.: Adversarial diffusion distillation. In: ECCV, September 29\u2013October 4, pp. 87\u2013103 (2024)","DOI":"10.1007\/978-3-031-73016-0_6"},{"key":"4129_CR42","unstructured":"Esser, P., et\u00a0al.: Scaling rectified flow transformers for high-resolution image synthesis. In: ICML, July 21\u201327 (2024)"},{"key":"4129_CR43","unstructured":"Guo, J., Deng, J.: Insightface: an open source 2d and 3d deep face analysis library (2021). https:\/\/github.com\/deepinsight\/insightface"},{"key":"4129_CR44","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: Meila, M., Zhang, T. (eds) ICML, July 18\u201324, Vol. 139, pp. 8748\u20138763 (2021)"},{"key":"4129_CR45","unstructured":"Liu, W., Rabinovich, A., Berg, A.C.: Parsenet: Looking wider to see better (2015). arXiv:1506.04579"},{"key":"4129_CR46","doi-asserted-by":"crossref","unstructured":"Schroff, F., Kalenichenko, D., Philbin, J.: Facenet: a unified embedding for face recognition and clustering. In: CVPR, June 7\u201312, pp. 815\u2013823 (2015)","DOI":"10.1109\/CVPR.2015.7298682"},{"key":"4129_CR47","doi-asserted-by":"publisher","first-page":"1499","DOI":"10.1109\/LSP.2016.2603342","volume":"23","author":"K Zhang","year":"2016","unstructured":"Zhang, K., Zhang, Z., Li, Z., Qiao, Y.: Joint face detection and alignment using multitask cascaded convolutional networks. IEEE Signal Process. Lett. 23, 1499\u20131503 (2016)","journal-title":"IEEE Signal Process. Lett."},{"key":"4129_CR48","doi-asserted-by":"crossref","unstructured":"Deng, J., Guo, J., Zafeiriou, S.: Arcface: additive angular margin loss for deep face recognition. arXiv:1801.07698 (2018)","DOI":"10.1109\/CVPR.2019.00482"},{"key":"4129_CR49","unstructured":"Hu, E.J., et\u00a0al.: Lora: low-rank adaptation of large language models. In: ICLR, April 25\u201329 (2022)"},{"key":"4129_CR50","unstructured":"Vaswani, A., et\u00a0al.: Attention is all you need. In: Guyon, I., et\u00a0al. (eds) NeurIPS, December 4\u20139, pp. 5998\u20136008 (2017)"},{"key":"4129_CR51","doi-asserted-by":"crossref","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning. In: CVPR, June 16\u201322, pp. 26286\u201326296 (2024)","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"4129_CR52","unstructured":"Karras, T., Aila, T., Laine, S., & Lehtinen, J.: Progressive growing of gans for improved quality, stability, and variation. In: ICLR, April 30\u2013May 3 (2018)"},{"key":"4129_CR53","doi-asserted-by":"crossref","unstructured":"Hessel, J., Holtzman, A., Forbes, M., Bras, R.L., Choi, Y.: Clipscore: a reference-free evaluation metric for image captioning. Moens, M., Huang, X., Specia, L. & Yih, S.W. (eds) EMNLP, 7\u201311 November, pp. 7514\u20137528 (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"4129_CR54","unstructured":"Bai, S., et\u00a0al.: Qwen2.5-vl technical report (2025). arXiv:2502.13923"},{"key":"4129_CR55","unstructured":"Chen, Z., et\u00a0al.: Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling (2024). arXiv:2412.05271"},{"key":"4129_CR56","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. In: Guyon, I., et\u00a0al. (eds) NeurIPS 2017, December 4\u20139, pp. 6626\u20136637 (2017)"},{"key":"4129_CR57","unstructured":"Schuhmann, C., et\u00a0al.: LAION-5B: an open large-scale dataset for training next generation image-text models. In: Koyejo, S., et\u00a0al. (eds) NeurIPS, November 28\u2013December 9 (2022)"},{"key":"4129_CR58","unstructured":"Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. In: ICLR, May 3\u20137 (2021)"},{"key":"4129_CR59","doi-asserted-by":"publisher","first-page":"1175","DOI":"10.1007\/s11263-024-02227-z","volume":"133","author":"G Xiao","year":"2025","unstructured":"Xiao, G., Yin, T., Freeman, W.T., Durand, F., Han, S.: Fastcomposer: tuning-free multi-subject image generation with localized attention. Int. J. Comput. Vis. 133, 1175\u20131194 (2025)","journal-title":"Int. J. Comput. Vis."},{"key":"4129_CR60","doi-asserted-by":"publisher","first-page":"6033","DOI":"10.1007\/s00371-023-03151-y","volume":"40","author":"Y Endo","year":"2024","unstructured":"Endo, Y.: Masked-attention diffusion guidance for spatially controlling text-to-image generation. Vis. Comput. 40, 6033\u20136045 (2024)","journal-title":"Vis. Comput."},{"key":"4129_CR61","doi-asserted-by":"crossref","unstructured":"Qian, K., Pan, Y., Xu, H., Tian, L.: Transformer model incorporating local graph semantic attention for image caption. Vis. Comput. 40, 6533\u20136544 (2024)","DOI":"10.1007\/s00371-023-03180-7"},{"key":"4129_CR62","unstructured":"Pan, D., et\u00a0al.: Renderme-360: a large digital asset library and benchmarks towards high-fidelity head avatars. In: Oh, A. et\u00a0al. (eds) NeurIPS, December 10\u201316 (2023)"},{"key":"4129_CR63","doi-asserted-by":"publisher","first-page":"5605","DOI":"10.1007\/s00371-023-03125-0","volume":"40","author":"Z Ye","year":"2024","unstructured":"Ye, Z., Zhang, H., Li, X., Zhang, Q.: Demaskgan: a de-masking generative adversarial network guided by semantic segmentation. Vis. Comput. 40, 5605\u20135618 (2024)","journal-title":"Vis. Comput."},{"key":"4129_CR64","unstructured":"Guo, Z. et\u00a0al.: Pulid: pure and lightning ID customization via contrastive alignment. In: Globersons, A. et\u00a0al. (eds) NeurIPS, December 10\u201315 (2024)"}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-04129-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-025-04129-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-04129-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T08:46:15Z","timestamp":1760085975000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-025-04129-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,3]]},"references-count":64,"journal-issue":{"issue":"14","published-print":{"date-parts":[[2025,11]]}},"alternative-id":["4129"],"URL":"https:\/\/doi.org\/10.1007\/s00371-025-04129-8","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"type":"print","value":"0178-2789"},{"type":"electronic","value":"1432-2315"}],"subject":[],"published":{"date-parts":[[2025,9,3]]},"assertion":[{"value":"2 August 2025","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 September 2025","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}