{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,23]],"date-time":"2026-01-23T15:36:20Z","timestamp":1769182580723,"version":"3.49.0"},"publisher-location":"Singapore","reference-count":41,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819555666","type":"print"},{"value":"9789819555673","type":"electronic"}],"license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-5567-3_36","type":"book-chapter","created":{"date-parts":[[2026,1,22]],"date-time":"2026-01-22T21:14:34Z","timestamp":1769116474000},"page":"523-537","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Towards Better Image-Text Matching: Concept-Guided Alignment for\u00a0Vision-Language Models"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-5382-2423","authenticated-orcid":false,"given":"Xue","family":"Wang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8006-4845","authenticated-orcid":false,"given":"Huijie","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0120-8335","authenticated-orcid":false,"given":"Jialu","family":"Dong","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0004-4131-2147","authenticated-orcid":false,"given":"Yiming","family":"Lin","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-1857-7357","authenticated-orcid":false,"given":"Xin","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,23]]},"reference":[{"key":"36_CR1","unstructured":"Li, J., Li, D., et al.: BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: Proceedings of the 39th International Conference on Machine Learning, vol. 162, pp. 12888\u201312900 (2022)"},{"key":"36_CR2","unstructured":"Radford, A., Kim, J.W., Hallacy, C., Ramesh A., Goh, G.: Learning transferable visual models from natural language supervision. In: Proceedings of the 38th International Conference on Machine Learning, vol. 139, pp. 8748\u20138763 (2021)"},{"key":"36_CR3","unstructured":"Yoon, M., Koh, J.Y., Hooi, B., Salakhutdinov, R.: Multimodal graph learning for generative tasks. arXiv preprint arXiv:2310.07478 (2023)"},{"key":"36_CR4","doi-asserted-by":"crossref","unstructured":"Wortsman, M., Ilharco, G., Kim, J.W., Li, M., et al.: Robust fine-tuning of zero-shot models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7959\u20137971. IEEE (2022)","DOI":"10.1109\/CVPR52688.2022.00780"},{"key":"36_CR5","unstructured":"Gao, Y., et al.: Pyramidclip: hierarchical feature alignment for vision-language model pretraining. In: Advances in Neural Information Processing Systems, vol. 35, pp. 35959\u201335970 (2022)"},{"key":"36_CR6","unstructured":"Eslami, S., Demelo, G.: Mitigate the gap: investigating approaches for improving cross-modal alignment in clip. arXiv preprint arXiv:2406.17639 (2024)"},{"key":"36_CR7","unstructured":"Menon, S., Vondrick, C.: Visual classification via description from large language models. arXiv preprint arXiv:2210.07183 (2022)"},{"key":"36_CR8","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Saligrama, V.: Zero-shot learning via semantic similarity embedding. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 4166\u20134174. IEEE (2015)","DOI":"10.1109\/ICCV.2015.474"},{"issue":"9","key":"36_CR9","doi-asserted-by":"publisher","first-page":"3241","DOI":"10.3390\/s21093241","volume":"21","author":"J Liu","year":"2021","unstructured":"Liu, J., Shi, C., Tu, D., Shi, Z., Liu, Y.: Zero-shot image classification based on a learnable deep metric. Sensors 21(9), 3241 (2021)","journal-title":"Sensors"},{"key":"36_CR10","unstructured":"Li, J., Li, H., Erfani, S., et al.: Visual-text cross alignment: refining the similarity score in vision-language models. arXiv preprint arXiv:2406.02915 (2024)"},{"key":"36_CR11","unstructured":"Nam, G., Heo, B., Lee, J.: LipSum-FT: robust fine-tuning of zero-shot models using random text guidance. arXiv preprint arXiv:2404.00860 (2024)"},{"issue":"2","key":"36_CR12","doi-asserted-by":"publisher","first-page":"581","DOI":"10.1007\/s11263-023-01891-x","volume":"132","author":"P Gao","year":"2024","unstructured":"Gao, P., et al.: Clip-adapter: better vision-language models with feature adapters. Int. J. Comput. Vision 132(2), 581\u2013595 (2024)","journal-title":"Int. J. Comput. Vision"},{"key":"36_CR13","unstructured":"Zhang, R., Fang, R., Zhang, W., et al.: Tip-adapter: training-free clip-adapter for better vision-language modeling. arXiv preprint arXiv:2111.03930 (2021)"},{"key":"36_CR14","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778. IEEE (2016)","DOI":"10.1109\/CVPR.2016.90"},{"issue":"7","key":"36_CR15","doi-asserted-by":"publisher","first-page":"4612","DOI":"10.1109\/TPAMI.2024.3357717","volume":"46","author":"S Kim","year":"2024","unstructured":"Kim, S., Chae, D.K.: What does a model really look at?: Extracting model-oriented concepts for explaining deep neural networks. IEEE Trans. Pattern Anal. Mach. Intell. 46(7), 4612\u20134624 (2024)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"36_CR16","unstructured":"Grattafiori, A., Dubey, A., Jauhri, A., Pandey, A., et al.: The llama 3 herd of models. arXiv preprint arXiv:2407.21783 (2024)"},{"key":"36_CR17","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W. J.: BLEU: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 311\u2013318 (2022)","DOI":"10.3115\/1073083.1073135"},{"key":"36_CR18","unstructured":"Lin, C. Y.: ROUGE: a package for automatic evaluation of summaries. In: Text Summarization Branches Out, pp. 74\u201381 (2004)"},{"key":"36_CR19","doi-asserted-by":"publisher","first-page":"103","DOI":"10.1613\/jair.1.13715","volume":"77","author":"S Gehrmann","year":"2023","unstructured":"Gehrmann, S., Clark, E., Sellam, T.: Repairing the cracked foundation: a survey of obstacles in evaluation practices for generated text. J. Artif. Intell. Res. 77, 103\u2013166 (2023)","journal-title":"J. Artif. Intell. Res."},{"key":"36_CR20","doi-asserted-by":"crossref","unstructured":"Chiang, C.H., Lee, H.Y.: Can large language models be an alternative to human evaluations? In: Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics, pp. 15607\u201315631 (2023)","DOI":"10.18653\/v1\/2023.acl-long.870"},{"key":"36_CR21","unstructured":"Achiam, J., Adler, S., Agarwal, S., Ahmad, L., Akkaya, I., et al.: GPT-4 technical report. arXiv preprint arXiv:2303.08774 (2023)"},{"key":"36_CR22","unstructured":"Hu, E.J., Shen, Y., Wallis, P., Allen-Zhu, Z., Li, Y., et al.: LoRA: low-rank adaptation of large language models. In: International Conference on Learning Representations, vol. 1, no. 2, p. 3 (2022)"},{"key":"36_CR23","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: a large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"36_CR24","unstructured":"Welinder, P., et al.: Caltech-UCSD birds, 200, 11 (2010)"},{"key":"36_CR25","doi-asserted-by":"crossref","unstructured":"Cimpoi, M., Maji, S., Kokkinos, I., Mohamed, S., Vedaldi, A.: Describing textures in the wild. In: 2014 IEEE Conference on Computer Vision and Pattern Recognition, pp. 3606\u20133613. IEEE (2014)","DOI":"10.1109\/CVPR.2014.461"},{"key":"36_CR26","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"446","DOI":"10.1007\/978-3-319-10599-4_29","volume-title":"Computer Vision \u2013 ECCV 2014","author":"L Bossard","year":"2014","unstructured":"Bossard, L., Guillaumin, M., Van Gool, L.: Food-101 \u2013 mining discriminative components with random forests. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8694, pp. 446\u2013461. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10599-4_29"},{"key":"36_CR27","doi-asserted-by":"crossref","unstructured":"Parkhi, O. M., Vedaldi, A., et al.: Cats and dogs. In: 2012 IEEE Conference on Computer Vision and Pattern Recognition, pp. 3498\u20133505. IEEE (2012)","DOI":"10.1109\/CVPR.2012.6248092"},{"key":"36_CR28","unstructured":"Shu, M., et al.: Test-time prompt tuning for zero-shot generalization in vision-language models. In: Advances in Neural Information Processing Systems, vol. 35, pp. 14274\u201314289 (2022)"},{"key":"36_CR29","unstructured":"Abdul Samadh, J., Gani, M.H., Hussein, N., et al.: Align your prompts: test-time prompting with distribution alignment for zero-shot generalization. In: Advances in Neural Information Processing Systems, vol. 36, pp. 80396\u201380413 (2023)"},{"key":"36_CR30","unstructured":"Ma, X., Zhang, J., Guo, S., Xu, W.: Swapprompt: test-time prompt adaptation for vision-language models. In: Advances in Neural Information Processing Systems, vol. 36, pp. 65252\u201365264 (2023)"},{"key":"36_CR31","doi-asserted-by":"crossref","unstructured":"Pratt, S., Covert, I., Liu, R., Farhadi, A.: What does a platypus look like? Generating customized prompts for zero-shot image classification. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 15691\u201315701 (2023)","DOI":"10.1109\/ICCV51070.2023.01438"},{"key":"36_CR32","doi-asserted-by":"crossref","unstructured":"Guo, Z., et al.: Calip: zero-shot enhancement of clip with parameter-free attention. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, pp. 746\u2013754 (2023)","DOI":"10.1609\/aaai.v37i1.25152"},{"key":"36_CR33","unstructured":"Qian, Q., Xu, Y., Hu, J.: Intra-modal proxy learning for zero-shot visual categorization with clip. In: Advances in Neural Information Processing Systems, vol. 36, pp. 25461\u201325474 (2023)"},{"key":"36_CR34","unstructured":"Wang, Z., Liang, J., He, R., Xu, N., Wang, Z., Tan, T.: Improving zero-shot generalization for clip with synthesized prompts. arXiv preprint arXiv:2307.07397 (2023)"},{"key":"36_CR35","doi-asserted-by":"crossref","unstructured":"Roth, K., Kim, J.M., Koepke, A., Vinyals, O., Schmid, C., Akata, Z.: Waffling around for performance: visual classification with random words and broad concepts. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 15746\u201315757 (2023)","DOI":"10.1109\/ICCV51070.2023.01443"},{"issue":"9","key":"36_CR36","doi-asserted-by":"publisher","first-page":"2337","DOI":"10.1007\/s11263-022-01653-1","volume":"130","author":"K Zhou","year":"2022","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Learning to prompt for vision-language models. Int. J. Comput. Vision 130(9), 2337\u20132348 (2022)","journal-title":"Int. J. Comput. Vision"},{"key":"36_CR37","doi-asserted-by":"crossref","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Conditional prompt learning for vision-language models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16816\u201316825. IEEE (2022)","DOI":"10.1109\/CVPR52688.2022.01631"},{"key":"36_CR38","doi-asserted-by":"crossref","unstructured":"Khattak, M.U., Rasheed, H., Maaz, M., Khan, S., Khan, F.S.: Maple: multi-modal prompt learning. In: Proceedings of the IEEE\/CVF conference on Computer Vision and Pattern Recognition, pp. 19113\u201319122. IEEE (2023)","DOI":"10.1109\/CVPR52729.2023.01832"},{"key":"36_CR39","doi-asserted-by":"crossref","unstructured":"Khattak, M.U., Wasim, S.T., Naseer, M., Khan, S., Yang, M.H., Khan, F.S.: Self-regulating prompts: foundational model adaptation without forgetting. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 15190\u201315200. IEEE (2023)","DOI":"10.1109\/ICCV51070.2023.01394"},{"key":"36_CR40","doi-asserted-by":"crossref","unstructured":"Zheng, Z., Wei, J., Hu, X., Zhu, H., Nevatia, R.: Large language models are good prompt learners for low-shot image classification. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 28453\u201328462 (2024)","DOI":"10.1109\/CVPR52733.2024.02688"},{"key":"36_CR41","doi-asserted-by":"crossref","unstructured":"Chefer, H., Gur, S., Wolf, L.: Generic attention-model explainability for interpreting bi-modal and encoder-decoder transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 397\u2013406 (2021)","DOI":"10.1109\/ICCV48922.2021.00045"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-5567-3_36","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,22]],"date-time":"2026-01-22T21:14:41Z","timestamp":1769116481000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-5567-3_36"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"ISBN":["9789819555666","9789819555673"],"references-count":41,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-5567-3_36","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]},"assertion":[{"value":"23 January 2026","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision  (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Shanghai","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15 October 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 October 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/2025.prcv.cn\/index.asp","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}