{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,9]],"date-time":"2026-04-09T12:28:47Z","timestamp":1775737727084,"version":"3.50.1"},"publisher-location":"Cham","reference-count":48,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031200793","type":"print"},{"value":"9783031200809","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-20080-9_42","type":"book-chapter","created":{"date-parts":[[2022,11,2]],"date-time":"2022-11-02T19:59:12Z","timestamp":1667419152000},"page":"728-755","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":263,"title":["Simple Open-Vocabulary Object Detection"],"prefix":"10.1007","author":[{"given":"Matthias","family":"Minderer","sequence":"first","affiliation":[]},{"given":"Alexey","family":"Gritsenko","sequence":"additional","affiliation":[]},{"given":"Austin","family":"Stone","sequence":"additional","affiliation":[]},{"given":"Maxim","family":"Neumann","sequence":"additional","affiliation":[]},{"given":"Dirk","family":"Weissenborn","sequence":"additional","affiliation":[]},{"given":"Alexey","family":"Dosovitskiy","sequence":"additional","affiliation":[]},{"given":"Aravindh","family":"Mahendran","sequence":"additional","affiliation":[]},{"given":"Anurag","family":"Arnab","sequence":"additional","affiliation":[]},{"given":"Mostafa","family":"Dehghani","sequence":"additional","affiliation":[]},{"given":"Zhuoran","family":"Shen","sequence":"additional","affiliation":[]},{"given":"Xiao","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xiaohua","family":"Zhai","sequence":"additional","affiliation":[]},{"given":"Thomas","family":"Kipf","sequence":"additional","affiliation":[]},{"given":"Neil","family":"Houlsby","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,3]]},"reference":[{"key":"42_CR1","doi-asserted-by":"crossref","unstructured":"Arnab, A., Dehghani, M., Heigold, G., Sun, C., Lu\u010di\u0107, M., Schmid, C.: ViViT: a video vision transformer. In: ICCV, pp. 6836\u20136846 (2021)","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"42_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"397","DOI":"10.1007\/978-3-030-01246-5_24","volume-title":"Computer Vision \u2013 ECCV 2018","author":"A Bansal","year":"2018","unstructured":"Bansal, A., Sikka, K., Sharma, G., Chellappa, R., Divakaran, A.: Zero-shot object detection. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11205, pp. 397\u2013414. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01246-5_24"},{"key":"42_CR3","unstructured":"Bello, I., et al.: Revisiting ResNets: improved training and scaling strategies. In: NeurIPS, vol. 34 (2021)"},{"issue":"3","key":"42_CR4","doi-asserted-by":"publisher","first-page":"546","DOI":"10.1109\/TPAMI.2015.2453950","volume":"38","author":"SK Biswas","year":"2016","unstructured":"Biswas, S.K., Milanfar, P.: One shot detection with laplacian object and fast matrix cosine similarity. IEEE Trans. Pattern Anal. Mach. Intell. 38(3), 546\u2013562 (2016)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"42_CR5","unstructured":"Bradbury, J., et al.: JAX: composable transformations of Python+NumPy programs (2018). http:\/\/github.com\/google\/jax"},{"key":"42_CR6","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"42_CR7","doi-asserted-by":"crossref","unstructured":"Chen, D.J., Hsieh, H.Y., Liu, T.L.: Adaptive image transformer for one-shot object detection. In: CVPR, pp. 12242\u201312251 (2021)","DOI":"10.1109\/CVPR46437.2021.01207"},{"key":"42_CR8","doi-asserted-by":"crossref","unstructured":"Dehghani, M., Gritsenko, A.A., Arnab, A., Minderer, M., Tay, Y.: SCENIC: a JAX library for computer vision research and beyond. arXiv preprint arXiv:2110.11403 (2021)","DOI":"10.1109\/CVPR52688.2022.02070"},{"key":"42_CR9","unstructured":"Fang, Y., et al.: You only look at one sequence: rethinking transformer in vision through object detection. In: NeurIPS, vol. 34 (2021)"},{"key":"42_CR10","unstructured":"Frome, A., et al.: Devise: a deep visual-semantic embedding model. In: NeurIPS. vol. 26 (2013)"},{"key":"42_CR11","doi-asserted-by":"crossref","unstructured":"Ghiasi, G., et al.: Simple copy-paste is a strong data augmentation method for instance segmentation. In: CVPR, pp. 2918\u20132928 (2021)","DOI":"10.1109\/CVPR46437.2021.00294"},{"key":"42_CR12","unstructured":"Gu, X., Lin, T.Y., Kuo, W., Cui, Y.: Open-vocabulary object detection via vision and language knowledge distillation. arXiv preprint arXiv:2104.13921 (2021)"},{"key":"42_CR13","doi-asserted-by":"crossref","unstructured":"Gupta, A., Dollar, P., Girshick, R.: LVIS: a dataset for large vocabulary instance segmentation. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00550"},{"key":"42_CR14","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Dollar, P., Girshick, R.: Mask R-CNN. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"42_CR15","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"42_CR16","unstructured":"Hsieh, T.I., Lo, Y.C., Chen, H.T., Liu, T.L.: One-shot object detection with co-attention and co-excitation. In: NeurIPS, vol. 32. Curran Associates, Inc. (2019)"},{"key":"42_CR17","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"646","DOI":"10.1007\/978-3-319-46493-0_39","volume-title":"Computer Vision \u2013 ECCV 2016","author":"G Huang","year":"2016","unstructured":"Huang, G., Sun, Yu., Liu, Z., Sedra, D., Weinberger, K.Q.: Deep networks with stochastic depth. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 646\u2013661. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_39"},{"key":"42_CR18","unstructured":"Huang, Z., Zeng, Z., Liu, B., Fu, D., Fu, J.: Pixel-BERT: aligning image pixels with text by deep multi-modal transformers. arXiv preprint arXiv:2004.00849 (2020)"},{"key":"42_CR19","unstructured":"Jia, C., et al.: Scaling up visual and vision-language representation learning with noisy text supervision. In: ICML, vol. 139, pp. 4904\u20134916. PMLR (2021)"},{"key":"42_CR20","doi-asserted-by":"crossref","unstructured":"Kamath, A., Singh, M., LeCun, Y., Synnaeve, G., Misra, I., Carion, N.: MDETR - modulated detection for end-to-end multi-modal understanding. In: ICCV, pp. 1780\u20131790 (2021)","DOI":"10.1109\/ICCV48922.2021.00180"},{"key":"42_CR21","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"491","DOI":"10.1007\/978-3-030-58558-7_29","volume-title":"Computer Vision \u2013 ECCV 2020","author":"A Kolesnikov","year":"2020","unstructured":"Kolesnikov, A., et al.: Big Transfer (BiT): general visual representation learning. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12350, pp. 491\u2013507. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58558-7_29"},{"key":"42_CR22","unstructured":"Kolesnikov, A., et al.: An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. In: ICLR (2021)"},{"issue":"1","key":"42_CR23","doi-asserted-by":"publisher","first-page":"32","DOI":"10.1007\/s11263-016-0981-7","volume":"123","author":"R Krishna","year":"2017","unstructured":"Krishna, R., et al.: Visual genome: connecting language and vision using crowdsourced dense image annotations. Int. J. Comput. Vision 123(1), 32\u201373 (2017)","journal-title":"Int. J. Comput. Vision"},{"issue":"7","key":"42_CR24","doi-asserted-by":"publisher","first-page":"1956","DOI":"10.1007\/s11263-020-01316-z","volume":"128","author":"A Kuznetsova","year":"2020","unstructured":"Kuznetsova, A.: The open images dataset V4. Int. J. Comput. Vision 128(7), 1956\u20131981 (2020)","journal-title":"Int. J. Comput. Vision"},{"key":"42_CR25","unstructured":"Lee, J., Lee, Y., Kim, J., Kosiorek, A.R., Choi, S., Teh, Y.W.: Set transformer: a framework for attention-based permutation-invariant neural networks. In: ICML, Proceedings of Machine Learning Research, vol. 97, pp. 3744\u20133753. PMLR (2019)"},{"key":"42_CR26","unstructured":"Li, L.H., et al.: Grounded language-image pre-training. arXiv preprint arXiv:2112.03857 (2021)"},{"key":"42_CR27","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"42_CR28","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"21","DOI":"10.1007\/978-3-319-46448-0_2","volume-title":"Computer Vision \u2013 ECCV 2016","author":"W Liu","year":"2016","unstructured":"Liu, W., et al.: SSD: single shot multibox detector. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9905, pp. 21\u201337. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46448-0_2"},{"key":"42_CR29","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"185","DOI":"10.1007\/978-3-030-01216-8_12","volume-title":"Computer Vision \u2013 ECCV 2018","author":"D Mahajan","year":"2018","unstructured":"Mahajan, D.: Exploring the limits of weakly supervised pretraining. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11206, pp. 185\u2013201. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01216-8_12"},{"key":"42_CR30","unstructured":"Michaelis, C., Ustyuzhaninov, I., Bethge, M., Ecker, A.S.: One-shot instance segmentation. arXiv preprint arXiv:1811.11507 (2018)"},{"key":"42_CR31","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"635","DOI":"10.1007\/978-3-030-58555-6_38","volume-title":"Computer Vision \u2013 ECCV 2020","author":"A Osokin","year":"2020","unstructured":"Osokin, A., Sumin, D., Lomakin, V.: OS2D: one-stage one-shot object detection by matching anchor features. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12360, pp. 635\u2013652. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58555-6_38"},{"key":"42_CR32","unstructured":"Pham, H., et al.: Combined scaling for zero-shot transfer learning. arXiv preprint arXiv:2111.10050 (2021)"},{"key":"42_CR33","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: ICML, 18\u201324 July 2021, vol. 139, pp. 8748\u20138763. PMLR (2021)"},{"key":"42_CR34","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. In: NeurIPS, vol. 28. Curran Associates, Inc. (2015)"},{"key":"42_CR35","doi-asserted-by":"crossref","unstructured":"Shao, S., et al.: Objects365: a large-scale, high-quality dataset for object detection. In: ICCV, pp. 8429\u20138438 (2019)","DOI":"10.1109\/ICCV.2019.00852"},{"key":"42_CR36","unstructured":"Socher, R., Ganjoo, M., Manning, C.D., Ng, A.: Zero-shot learning through cross-modal transfer. In: NeurIPS, vol. 26 (2013)"},{"key":"42_CR37","unstructured":"Song, H., et al.: ViDT: an efficient and effective fully transformer-based object detector. In: ICLR (2022)"},{"key":"42_CR38","unstructured":"Steiner, A., Kolesnikov, A., Zhai, X., Wightman, R., Uszkoreit, J., Beyer, L.: How to train your ViT? data, augmentation, and regularization in vision transformers. arXiv preprint arXiv:2106.10270 (2021)"},{"key":"42_CR39","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., Jegou, H.: Training data-efficient image transformers and distillation through attention. In: ICML, vol. 139, pp. 10347\u201310357 (2021)"},{"issue":"9","key":"42_CR40","doi-asserted-by":"publisher","first-page":"2251","DOI":"10.1109\/TPAMI.2018.2857768","volume":"41","author":"Y Xian","year":"2018","unstructured":"Xian, Y., Lampert, C.H., Schiele, B., Akata, Z.: Zero-shot learning-a comprehensive evaluation of the good, the bad and the ugly. IEEE Trans. Pattern Anal. Mach. Intell. 41(9), 2251\u20132265 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"42_CR41","unstructured":"Yao, Z., Ai, J., Li, B., Zhang, C.: Efficient detr: improving end-to-end object detector with dense prior. arXiv preprint arXiv:2104.01318 (2021)"},{"key":"42_CR42","doi-asserted-by":"crossref","unstructured":"Zareian, A., Rosa, K.D., Hu, D.H., Chang, S.F.: Open-vocabulary object detection using captions. In: CVPR, pp. 14393\u201314402 (2021)","DOI":"10.1109\/CVPR46437.2021.01416"},{"key":"42_CR43","doi-asserted-by":"crossref","unstructured":"Zhai, X., Kolesnikov, A., Houlsby, N., Beyer, L.: Scaling vision transformers. arXiv preprint arXiv:2106.04560 (2021)","DOI":"10.1109\/CVPR52688.2022.01179"},{"key":"42_CR44","doi-asserted-by":"crossref","unstructured":"Zhai, X., et al.: LiT: zero-shot transfer with locked-image text tuning. arXiv preprint arXiv:2111.07991 (2021)","DOI":"10.1109\/CVPR52688.2022.01759"},{"key":"42_CR45","doi-asserted-by":"crossref","unstructured":"Zhong, Y., et al.: RegionCLIP: region-based language-image pretraining. arXiv preprint arXiv:2112.09106 (2021)","DOI":"10.1109\/CVPR52688.2022.01629"},{"key":"42_CR46","doi-asserted-by":"crossref","unstructured":"Zhou, X., Girdhar, R., Joulin, A., Kr\u00e4henb\u00fchl, P., Misra, I.: Detecting twenty-thousand classes using image-level supervision. In: arXiv preprint arXiv:2201.02605 (2021)","DOI":"10.1007\/978-3-031-20077-9_21"},{"key":"42_CR47","unstructured":"Zhou, X., Koltun, V., Kr\u00e4henb\u00fchl, P.: Probabilistic two-stage detection. arXiv preprint arXiv:2103.07461 (2021)"},{"key":"42_CR48","unstructured":"Zhu, X., Su, W., Lu, L., Li, B., Wang, X., Dai, J.: Deformable DETR: deformable transformers for end-to-end object detection. In: ICLR (2021)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-20080-9_42","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,7]],"date-time":"2022-11-07T00:36:30Z","timestamp":1667781390000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-20080-9_42"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031200793","9783031200809"],"references-count":48,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-20080-9_42","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"3 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}