{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:01:15Z","timestamp":1776884475307,"version":"3.51.2"},"reference-count":63,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1109\/tcsvt.2025.3557474","type":"journal-article","created":{"date-parts":[[2025,4,3]],"date-time":"2025-04-03T16:08:02Z","timestamp":1743696482000},"page":"9562-9574","source":"Crossref","is-referenced-by-count":4,"title":["Progressive Multi-Prompt Learning for Vision-Language Models"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0009-0007-4759-2276","authenticated-orcid":false,"given":"Jun","family":"Liu","sequence":"first","affiliation":[{"name":"School of Aeronautics and Astronautics, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-3579-9130","authenticated-orcid":false,"given":"Ziqian","family":"Lu","sequence":"additional","affiliation":[{"name":"School of Aeronautics and Astronautics, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3233-326X","authenticated-orcid":false,"given":"Hao","family":"Luo","sequence":"additional","affiliation":[{"name":"School of Aeronautics and Astronautics, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1785-7847","authenticated-orcid":false,"given":"Zheming","family":"Lu","sequence":"additional","affiliation":[{"name":"School of Aeronautics and Astronautics, Zhejiang University, Hangzhou, China"}]},{"given":"Yangming","family":"Zheng","sequence":"additional","affiliation":[{"name":"School of Aeronautics and Astronautics, Zhejiang University, Hangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref2","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","volume":"139","author":"Jia"},{"key":"ref3","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01653-1"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01631"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298685"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00514"},{"key":"ref8","article-title":"PLOT: Prompt learning with optimal transport for vision-language models","author":"Chen","year":"2022","journal-title":"arXiv:2210.01253"},{"key":"ref9","first-page":"12116","article-title":"Do vision transformers see like convolutional neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Raghu"},{"key":"ref10","article-title":"UNIMO: Towards unified-modal understanding and generation via cross-modal contrastive learning","author":"Li","year":"2020","journal-title":"arXiv:2012.15409"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01101"},{"key":"ref12","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Li"},{"key":"ref13","article-title":"Florence: A new foundation model for computer vision","author":"Yuan","year":"2021","journal-title":"arXiv:2111.11432"},{"key":"ref14","article-title":"SimVLM: Simple visual language model pretraining with weak supervision","author":"Wang","year":"2021","journal-title":"arXiv:2108.10904"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3235704"},{"key":"ref16","article-title":"Open-vocabulary object detection via vision and language knowledge distillation","author":"Gu","year":"2021","journal-title":"arXiv:2104.13921"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20077-9_7"},{"key":"ref18","article-title":"Tip-adapter: Training-free CLIP-adapter for better vision-language modeling","author":"Zhang","year":"2021","journal-title":"arXiv:2111.03930"},{"key":"ref19","article-title":"Unsupervised prompt learning for vision-language models","author":"Huang","year":"2022","journal-title":"arXiv:2204.03649"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2022.11.003"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01759"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3432753"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01129"},{"key":"ref24","article-title":"Language-driven semantic segmentation","author":"Li","year":"2022","journal-title":"arXiv:2201.03546"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00683"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3301933"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3395352"},{"issue":"8","key":"ref28","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1250"},{"key":"ref30","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. NIPS","volume":"33","author":"Brown"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"},{"key":"ref32","article-title":"Prefix-tuning: Optimizing continuous prompts for generation","author":"Lisa Li","year":"2021","journal-title":"arXiv:2101.00190"},{"key":"ref33","article-title":"P-tuning v2: Prompt tuning can be comparable to fine-tuning universally across scales and tasks","author":"Liu","year":"2021","journal-title":"arXiv:2110.07602"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3327605"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3245584"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3424566"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19827-4_41"},{"key":"ref39","article-title":"Exploring visual prompts for adapting large-scale models","author":"Bahng","year":"2022","journal-title":"arXiv:2203.17274"},{"key":"ref40","article-title":"CPT: Colorful prompt tuning for pre-trained vision-language models","author":"Yao","year":"2021","journal-title":"arXiv:2109.11797"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00274"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01832"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01394"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681485"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.537"},{"key":"ref46","first-page":"3301","article-title":"Shallow-deep networks: Understanding and mitigating network overthinking","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kaya"},{"key":"ref47","first-page":"18330","article-title":"BERT loses patience: Fast and robust inference with early exit","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Zhou"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.204"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2004.383"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1212.0402"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10599-4_29"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2010.5539970"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.461"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/JSTARS.2019.2918242"},{"key":"ref56","article-title":"Fine-grained visual classification of aircraft","author":"Maji","year":"2013","journal-title":"arXiv:1306.5151"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6248092"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2013.77"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref60","first-page":"5389","article-title":"Do ImageNet classifiers generalize to ImageNet","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Recht"},{"key":"ref61","first-page":"10506","article-title":"Learning robust global representations by penalizing local predictive power","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Wang"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01501"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00823"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/76\/11192799\/10948506.pdf?arnumber=10948506","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,6]],"date-time":"2025-10-06T17:39:46Z","timestamp":1759772386000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10948506\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10]]},"references-count":63,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2025.3557474","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,10]]}}}