{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,10]],"date-time":"2026-03-10T06:46:38Z","timestamp":1773125198650,"version":"3.50.1"},"reference-count":47,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Development Program of China","award":["2020AAA0109700"],"award-info":[{"award-number":["2020AAA0109700"]}]},{"name":"Guangdong Outstanding Youth Fund","award":["2021B1515020061"],"award-info":[{"award-number":["2021B1515020061"]}]},{"name":"Shenzhen Science and Technology Program","award":["RCYX20200714114642083"],"award-info":[{"award-number":["RCYX20200714114642083"]}]},{"DOI":"10.13039\/501100017607","name":"Shenzhen Municipal Fundamental Research Program","doi-asserted-by":"publisher","award":["JCYJ20190807154211365"],"award-info":[{"award-number":["JCYJ20190807154211365"]}],"id":[{"id":"10.13039\/501100017607","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Nansha Key Research and Development Program","award":["2022ZD014"],"award-info":[{"award-number":["2022ZD014"]}]},{"DOI":"10.13039\/501100002402","name":"Sun Yat-sen University","doi-asserted-by":"publisher","award":["22lgqb38"],"award-info":[{"award-number":["22lgqb38"]}],"id":[{"id":"10.13039\/501100002402","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100002402","name":"Sun Yat-sen University","doi-asserted-by":"publisher","award":["76160-12220011"],"award-info":[{"award-number":["76160-12220011"]}],"id":[{"id":"10.13039\/501100002402","id-type":"DOI","asserted-by":"publisher"}]},{"name":"MindSpore"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2024,11]]},"DOI":"10.1109\/tnnls.2023.3293484","type":"journal-article","created":{"date-parts":[[2023,7,28]],"date-time":"2023-07-28T17:34:15Z","timestamp":1690565655000},"page":"16277-16287","source":"Crossref","is-referenced-by-count":17,"title":["Fine-Grained Visual\u2013Text Prompt-Driven Self-Training for Open-Vocabulary Object Detection"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8784-8343","authenticated-orcid":false,"given":"Yanxin","family":"Long","sequence":"first","affiliation":[{"name":"School of Intelligent Systems Engineering, Sun Yat-sen University at Shenzhen, Shenzhen, China"}]},{"given":"Jianhua","family":"Han","sequence":"additional","affiliation":[{"name":"Huawei Noah&#x2019;s Ark Lab, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8910-6300","authenticated-orcid":false,"given":"Runhui","family":"Huang","sequence":"additional","affiliation":[{"name":"School of Intelligent Systems Engineering, Sun Yat-sen University at Shenzhen, Shenzhen, China"}]},{"given":"Hang","family":"Xu","sequence":"additional","affiliation":[{"name":"Huawei Noah&#x2019;s Ark Lab, Shanghai, China"}]},{"given":"Yi","family":"Zhu","sequence":"additional","affiliation":[{"name":"Huawei Noah&#x2019;s Ark Lab, Shanghai, China"}]},{"given":"Chunjing","family":"Xu","sequence":"additional","affiliation":[{"name":"Huawei Noah&#x2019;s Ark Lab, Shanghai, China"}]},{"given":"Xiaodan","family":"Liang","sequence":"additional","affiliation":[{"name":"School of Intelligent Systems Engineering, Sun Yat-sen University at Shenzhen, Shenzhen, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-009-0275-4"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00550"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01246-5_24"},{"key":"ref5","article-title":"Zero-shot object detection by hybrid region embedding","author":"Demirel","year":"2018","journal-title":"arXiv:1805.06157"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00618"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-69535-4_10"},{"key":"ref8","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jia"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"ref10","article-title":"Learning transferable visual models from natural language supervision","author":"Radford","year":"2021","journal-title":"arXiv:2103.00020"},{"key":"ref11","article-title":"Towards open vocabulary object detection without human-provided bounding boxes","author":"Gao","year":"2021","journal-title":"arXiv:2111.09452"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20077-9_21"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20077-9_41"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"},{"key":"ref15","article-title":"Learning to prompt for vision-language models","author":"Zhou","year":"2021","journal-title":"arXiv:2109.01134"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00852"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.3011807"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2021.3140070"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2022.3184821"},{"key":"ref20","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Li"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01416"},{"key":"ref22","article-title":"Open-vocabulary object detection via vision and language knowledge distillation","author":"Gu","year":"2021","journal-title":"arXiv:2104.13921"},{"key":"ref23","first-page":"33781","article-title":"Bridging the gap between object and image-level representations for open-vocabulary detection","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Bangalath"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2022.3145962"},{"key":"ref26","article-title":"VisualBERT: A simple and performant baseline for vision and language","author":"Li","year":"2019","journal-title":"arXiv:1908.03557"},{"key":"ref27","article-title":"M6: A Chinese multimodal pretrainer","author":"Lin","year":"2021","journal-title":"arXiv:2103.00823"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2102.12092"},{"key":"ref29","article-title":"ERNIE-ViLG: Unified generative pre-training for bidirectional vision-language generation","author":"Zhang","year":"2021","journal-title":"arXiv:2112.15283"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2813306"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2817340"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1250"},{"key":"ref33","article-title":"It\u2019s not just size that matters: Small language models are also few-shot learners","author":"Schick","year":"2020","journal-title":"arXiv:2009.07118"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/taffc.2022.3204972"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"ref36","article-title":"CPT: Colorful prompt tuning for pre-trained vision-language models","author":"Yao","year":"2021","journal-title":"arXiv:2109.11797"},{"key":"ref37","article-title":"Extract free dense labels from CLIP","author":"Zhou","year":"2021","journal-title":"arXiv:2112.01071"},{"key":"ref38","article-title":"CLIP-adapter: Better vision-language models with feature adapters","author":"Gao","year":"2021","journal-title":"arXiv:2110.04544"},{"key":"ref39","article-title":"GPT understands, too","author":"Liu","year":"2021","journal-title":"arXiv:2103.10385"},{"key":"ref40","article-title":"ActionCLIP: A new paradigm for video action recognition","author":"Wang","year":"2021","journal-title":"arXiv:2109.08472"},{"key":"ref41","first-page":"91","article-title":"Faster R-CNN: Towards real-time object detection with region proposal networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Ren"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00294"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01171"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6868"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1906.07155"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"issue":"11","key":"ref47","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"Van der Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10737991\/10197240.pdf?arnumber=10197240","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T19:27:32Z","timestamp":1732735652000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10197240\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11]]},"references-count":47,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2023.3293484","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11]]}}}