{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,21]],"date-time":"2026-04-21T14:54:49Z","timestamp":1776783289127,"version":"3.51.2"},"reference-count":80,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62271143"],"award-info":[{"award-number":["62271143"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Frontier Technology Research and Development Program of Jiangsu Province","award":["BF2024060"],"award-info":[{"award-number":["BF2024060"]}]},{"DOI":"10.13039\/100017045","name":"Big Data Computing Center of Southeast University","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100017045","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2025,3]]},"DOI":"10.1109\/tcsvt.2024.3495533","type":"journal-article","created":{"date-parts":[[2024,11,11]],"date-time":"2024-11-11T18:46:43Z","timestamp":1731350803000},"page":"2215-2229","source":"Crossref","is-referenced-by-count":2,"title":["Variational Feature Imitation Conditioned on Visual Descriptions for Few-Shot Fine-Grained Recognition"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-7669-1674","authenticated-orcid":false,"given":"Xin","family":"Lu","sequence":"first","affiliation":[{"name":"School of Automation and the Key Laboratory of Measurement and Control of Complex Systems of Engineering, Ministry of Education, Southeast University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-6517-9739","authenticated-orcid":false,"given":"Yixuan","family":"Pan","sequence":"additional","affiliation":[{"name":"School of Automation and the Key Laboratory of Measurement and Control of Complex Systems of Engineering, Ministry of Education, Southeast University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2997-4012","authenticated-orcid":false,"given":"Yichao","family":"Cao","sequence":"additional","affiliation":[{"name":"School of Automation and the Key Laboratory of Measurement and Control of Complex Systems of Engineering, Ministry of Education, Southeast University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-5525-435X","authenticated-orcid":false,"given":"Xin","family":"Zhou","sequence":"additional","affiliation":[{"name":"Nanjing Enbo Technology Company Ltd., Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7707-7538","authenticated-orcid":false,"given":"Xiaobo","family":"Lu","sequence":"additional","affiliation":[{"name":"School of Automation and the Key Laboratory of Measurement and Control of Complex Systems of Engineering, Ministry of Education, Southeast University, Nanjing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3083257"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20077-9_4"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3126648"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612043"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33019291"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612221"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3370731"},{"key":"ref8","first-page":"1","article-title":"Cross attention network for few-shot classification","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Hou"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICEIEC49280.2020.9152261"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2022.3233553"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3248041"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2023.3240195"},{"key":"ref13","first-page":"1","article-title":"Probabilistic model-agnostic metalearning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Finn"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01091"},{"key":"ref15","first-page":"1126","article-title":"Model-agnostic meta-learning for fast adaptation of deep networks","volume-title":"Proc. 34th Int. Conf. Mach. Learn.","volume":"70","author":"Finn"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00131"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.109381"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413884"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.3001510"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/152"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.3043128"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2020.2977553"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3275382"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i3.25383"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2024.110485"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2022.108792"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2024.3369870"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00869"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00526"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3236636"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i5.28208"},{"issue":"1","key":"ref32","first-page":"1","article-title":"Novel dataset for fine-grained image categorization: Stanford dogs","volume-title":"Proc. CVPR Workshop Fine-Grained Vis. Categorization (FGVC)","volume":"2","author":"Khosla"},{"key":"ref33","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref34","first-page":"23","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Alayrac"},{"key":"ref35","first-page":"19","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02147"},{"key":"ref37","first-page":"1","article-title":"Detecting any human-object interaction relationship: Universal hoi detector with spatial prompt learning on foundation models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Cao"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00507"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2023.123079"},{"key":"ref40","first-page":"1","article-title":"Learning structured output representation using deep conditional generative models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Sohn"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.2990606"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00017"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2924811"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1312.6114"},{"key":"ref45","first-page":"1","article-title":"Beta-vae: Learning basic visual concepts with a constrained variational framework","volume-title":"Proc. ICLR","volume":"3","author":"Higgins"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00177"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3052785"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00844"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2020.2968304"},{"issue":"1","key":"ref50","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1109\/TCSVT.2010.2087812","article-title":"Boosting variational inference with margin learning for few-shot scene-adaptive anomaly detection","volume":"1","author":"Huang","year":"2022","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"ref51","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","volume":"139","author":"Jia"},{"key":"ref52","first-page":"8821","article-title":"Zero-shot text-to-image generation","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Ramesh"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00271"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-023-01891-x"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01653-1"},{"key":"ref56","article-title":"Exploring visual prompts for adapting large-scale models","author":"Bahng","year":"2022","journal-title":"arXiv:2203.17274"},{"key":"ref57","first-page":"1","article-title":"Fine-grained visual prompting","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Yang"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19827-4_41"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/WACV45572.2020.9093476"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/ICME.2019.00024"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00676"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00743"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00855"},{"key":"ref64","first-page":"21981","article-title":"Crosstransformers: Spatially-aware few-shot transfer","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Doersch"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00792"},{"key":"ref66","article-title":"The Caltech-UCSD birds-200-2011 dataset","author":"Wah","year":"2011"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2013.77"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00914"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00672"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20196"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547997"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00745"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01155"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1807.06521"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00913"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref77","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Li"},{"key":"ref78","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-demo.3"},{"key":"ref80","article-title":"EVA-CLIP-18B: Scaling CLIP to 18 billion parameters","author":"Sun","year":"2024","journal-title":"arXiv:2402.04252"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/76\/10916540\/10750049.pdf?arnumber=10750049","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,7]],"date-time":"2025-03-07T18:51:57Z","timestamp":1741373517000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10750049\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3]]},"references-count":80,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2024.3495533","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3]]}}}