{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,30]],"date-time":"2026-03-30T18:13:40Z","timestamp":1774894420512,"version":"3.50.1"},"publisher-location":"Cham","reference-count":76,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031200526","type":"print"},{"value":"9783031200533","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-20053-3_29","type":"book-chapter","created":{"date-parts":[[2022,11,5]],"date-time":"2022-11-05T16:21:52Z","timestamp":1667665312000},"page":"497-515","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":92,"title":["Three Things Everyone Should Know About Vision Transformers"],"prefix":"10.1007","author":[{"given":"Hugo","family":"Touvron","sequence":"first","affiliation":[]},{"given":"Matthieu","family":"Cord","sequence":"additional","affiliation":[]},{"given":"Alaaeldin","family":"El-Nouby","sequence":"additional","affiliation":[]},{"given":"Jakob","family":"Verbeek","sequence":"additional","affiliation":[]},{"given":"Herv\u00e9","family":"J\u00e9gou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,6]]},"reference":[{"key":"29_CR1","doi-asserted-by":"crossref","unstructured":"Arnab, A., Dehghani, M., Heigold, G., Sun, C., Lu\u010di\u0107, M., Schmid, C.: ViVit: a video vision transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6836\u20136846 (2021)","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"29_CR2","unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv preprint arXiv:1607.06450 (2016)"},{"key":"29_CR3","unstructured":"Bao, H., Dong, L., Wei, F.: BEiT: BERT pre-training of image transformers. arXiv preprint arXiv:2106.08254 (2021)"},{"key":"29_CR4","doi-asserted-by":"crossref","unstructured":"Bello, I., Zoph, B., Vaswani, A., Shlens, J., Le, Q.V.: Attention augmented convolutional networks. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3286\u20133295 (2019)","DOI":"10.1109\/ICCV.2019.00338"},{"key":"29_CR5","doi-asserted-by":"crossref","unstructured":"Berriel, R., et al.: Budget-aware adapters for multi-domain learning. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 382\u2013391 (2019)","DOI":"10.1109\/ICCV.2019.00047"},{"key":"29_CR6","unstructured":"Brown, T.B., et al.: Language models are few-shot learners. arXiv preprint arXiv:2005.14165 (2020)"},{"key":"29_CR7","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"29_CR8","doi-asserted-by":"crossref","unstructured":"Caron, M., et al.: Emerging properties in self-supervised vision transformers. arXiv preprint arXiv:2104.14294 (2021)","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"29_CR9","doi-asserted-by":"crossref","unstructured":"Chang, H., Zhang, H., Jiang, L., Liu, C., Freeman, W.T.: MaskGIT: masked generative image transformer. arXiv preprint arXiv:2202.04200 (2022)","DOI":"10.1109\/CVPR52688.2022.01103"},{"key":"29_CR10","doi-asserted-by":"crossref","unstructured":"Chen, X., Xie, S., He, K.: An empirical study of training self-supervised vision transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9640\u20139649 (2021)","DOI":"10.1109\/ICCV48922.2021.00950"},{"key":"29_CR11","doi-asserted-by":"crossref","unstructured":"d\u2019Ascoli, S., Touvron, H., Leavitt, M.L., Morcos, A.S., Biroli, G., Sagun, L.: ConViT: improving vision transformers with soft convolutional inductive biases. In: International Conference on Machine Learning, pp. 2286\u20132296. PMLR (2021)","DOI":"10.1088\/1742-5468\/ac9830"},{"key":"29_CR12","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: NAACL (2019)"},{"key":"29_CR13","doi-asserted-by":"crossref","unstructured":"Ding, X., Zhang, X., Ma, N., Han, J., Ding, G., Sun, J.: RepVGG: making VGG-style convnets great again. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13733\u201313742 (2021)","DOI":"10.1109\/CVPR46437.2021.01352"},{"key":"29_CR14","unstructured":"Ding, X., Zhang, X., Han, J., Ding, G.: RepMLP: re-parameterizing convolutions into fully-connected layers for image recognition. arXiv preprint arXiv:2105.01883 (2021)"},{"key":"29_CR15","unstructured":"Dong, X., et al.: PeCo: perceptual codebook for BERT pre-training of vision transformers. arXiv preprint arXiv:2111.12710 (2021)"},{"key":"29_CR16","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. In: International Conference on Learning Representations (2021)"},{"key":"29_CR17","unstructured":"El-Nouby, A., Izacard, G., Touvron, H., Laptev, I., Jegou, H., Grave, E.: Are large-scale datasets necessary for self-supervised pre-training? arXiv preprint arXiv:2112.10740 (2021)"},{"key":"29_CR18","unstructured":"El-Nouby, A., et al.: XCiT: cross-covariance image transformers. In: NeurIPS (2021)"},{"key":"29_CR19","doi-asserted-by":"crossref","unstructured":"Fan, H., et al.: Multiscale vision transformers. arXiv preprint arXiv:2104.11227 (2021)","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"29_CR20","unstructured":"Goyal, A., Bochkovskiy, A., Deng, J., Koltun, V.: Non-deep networks. arXiv preprint arXiv:2110.07641 (2021)"},{"key":"29_CR21","doi-asserted-by":"crossref","unstructured":"Graham, B., et al.: LeViT: a vision transformer in convnet\u2019s clothing for faster inference. arXiv preprint arXiv:2104.01136 (2021)","DOI":"10.1109\/ICCV48922.2021.01204"},{"key":"29_CR22","doi-asserted-by":"crossref","unstructured":"Guo, Y., Shi, H., Kumar, A., Grauman, K., Simunic, T., Feris, R.S.: SpotTune: transfer learning through adaptive fine-tuning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4805\u20134814 (2019)","DOI":"10.1109\/CVPR.2019.00494"},{"key":"29_CR23","unstructured":"Han, K., Xiao, A., Wu, E., Guo, J., Xu, C., Wang, Y.: Transformer in transformer. arXiv preprint arXiv:2103.00112 (2021)"},{"key":"29_CR24","doi-asserted-by":"crossref","unstructured":"He, K., Chen, X., Xie, S., Li, Y., Doll\u00e1r, P., Girshick, R.: Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377 (2021)","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"29_CR25","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"29_CR26","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Identity mappings in deep residual networks. arXiv preprint arXiv:1603.05027 (2016)","DOI":"10.1007\/978-3-319-46493-0_38"},{"key":"29_CR27","unstructured":"Hendrycks, D., Gimpel, K.: Gaussian error linear units (GELUs). arXiv preprint arXiv:1606.08415 (2016)"},{"key":"29_CR28","unstructured":"Horn, G.V., et al.: The inaturalist challenge 2017 dataset. arXiv preprint arXiv:1707.06642 (2017)"},{"key":"29_CR29","unstructured":"Houlsby, N., et al.: Parameter-efficient transfer learning for NLP. In: International Conference on Machine Learning, pp. 2790\u20132799. PMLR (2019)"},{"key":"29_CR30","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"646","DOI":"10.1007\/978-3-319-46493-0_39","volume-title":"Computer Vision \u2013 ECCV 2016","author":"G Huang","year":"2016","unstructured":"Huang, G., Sun, Yu., Liu, Z., Sedra, D., Weinberger, K.Q.: Deep networks with stochastic depth. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 646\u2013661. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_39"},{"key":"29_CR31","unstructured":"Hudson, D.A., Zitnick, C.L.: Generative adversarial transformers. In: International Conference on Machine Learning, pp. 4487\u20134499. PMLR (2021)"},{"key":"29_CR32","unstructured":"Ioffe, S., Szegedy, C.: Batch normalization: accelerating deep network training by reducing internal covariate shift. In: International Conference on Machine Learning, pp. 448\u2013456. PMLR (2015)"},{"key":"29_CR33","doi-asserted-by":"crossref","unstructured":"Karita, S., Chen, N., Hayashi, T., et al.: A comparative study on transformer vs RNN in speech applications. arXiv preprint arXiv:1909.06317 (2019)","DOI":"10.1109\/ASRU46091.2019.9003750"},{"key":"29_CR34","doi-asserted-by":"crossref","unstructured":"Krause, J., Stark, M., Deng, J., Fei-Fei, L.: 3D object representations for fine-grained categorization. In: IEEE Workshop on 3D Representation and Recognition (2013)","DOI":"10.1109\/ICCVW.2013.77"},{"issue":"6","key":"29_CR35","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1145\/3065386","volume":"60","author":"A Krizhevsky","year":"2012","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.: ImageNet classification with deep convolutional neural networks. Commun. ACM 60(6), 84\u201390 (2012)","journal-title":"Commun. ACM"},{"key":"29_CR36","unstructured":"Krizhevsky, A.: Learning multiple layers of features from tiny images. Tech. rep., CIFAR (2009)"},{"key":"29_CR37","unstructured":"Lample, G., Charton, F.: Deep learning for symbolic mathematics. arXiv preprint arXiv:1912.01412 (2019)"},{"key":"29_CR38","doi-asserted-by":"crossref","unstructured":"Li, X., Wang, W., Hu, X., Yang, J.: Selective kernel networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 510\u2013519 (2019)","DOI":"10.1109\/CVPR.2019.00060"},{"key":"29_CR39","unstructured":"Liu, H., Dai, Z., So, D.R., Le, Q.V.: Pay attention to MLPs. arXiv preprint arXiv:2105.08050 (2021)"},{"key":"29_CR40","unstructured":"Liu, Y., et al.: RoBERTa: a robustly optimized BERT pretraining approach. arXiv preprint arXiv:1907.11692 (2019)"},{"key":"29_CR41","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. arXiv preprint arXiv:2103.14030 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"29_CR42","doi-asserted-by":"crossref","unstructured":"Liu, Z., Mao, H., Wu, C.Y., Feichtenhofer, C., Darrell, T., Xie, S.: A convnet for the 2020s. arXiv preprint arXiv:2201.03545 (2022)","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"29_CR43","unstructured":"Loshchilov, I., Hutter, F.: Fixing weight decay regularization in Adam. arXiv preprint arXiv:1711.05101 (2017)"},{"key":"29_CR44","doi-asserted-by":"crossref","unstructured":"L\u00fcscher, C., Beck, E., Irie, K., et al.: RWTH ASR systems for LibriSpeech: hybrid vs attention. In: Interspeech (2019)","DOI":"10.21437\/Interspeech.2019-1780"},{"key":"29_CR45","unstructured":"Mahabadi, R.K., Ruder, S., Dehghani, M., Henderson, J.: Parameter-efficient multi-task fine-tuning for transformers via shared hypernetworks. In: ACL\/IJCNLP (2021)"},{"key":"29_CR46","doi-asserted-by":"crossref","unstructured":"Mancini, M., Ricci, E., Caputo, B., Bul\u00f2, S.R.: Adding new tasks to a single network with weight transformations using binary masks. In: European Conference on Computer Vision Workshops (2018)","DOI":"10.1007\/978-3-030-11012-3_14"},{"key":"29_CR47","unstructured":"Melas-Kyriazi, L.: Do you even need attention? A stack of feed-forward layers does surprisingly well on ImageNet. arXiv preprint arXiv:2105.02723 (2021)"},{"key":"29_CR48","doi-asserted-by":"crossref","unstructured":"Nilsback, M.E., Zisserman, A.: Automated flower classification over a large number of classes. In: Proceedings of the Indian Conference on Computer Vision, Graphics and Image Processing (2008)","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"29_CR49","unstructured":"Nouby, A.E., Izacard, G., Touvron, H., Laptev, I., J\u00e9gou, H., Grave, E.: Are large-scale datasets necessary for self-supervised pre-training? arXiv preprint arXiv:2112.10740 (2021)"},{"key":"29_CR50","doi-asserted-by":"crossref","unstructured":"Oquab, M., Bottou, L., Laptev, I., Sivic, J.: Learning and transferring mid-level image representations using convolutional neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1717\u20131724 (2014)","DOI":"10.1109\/CVPR.2014.222"},{"key":"29_CR51","doi-asserted-by":"crossref","unstructured":"Pfeiffer, J., R\u00fcckl\u00e9, A., Poth, C., Kamath, A., Vulic, I., Ruder, S., Cho, K., Gurevych, I.: AdapterHub: A framework for adapting transformers. In: EMNLP (2020)","DOI":"10.18653\/v1\/2020.emnlp-demos.7"},{"issue":"8","key":"29_CR52","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019)","journal-title":"OpenAI blog"},{"key":"29_CR53","unstructured":"Ramesh, A., et al.: Zero-shot text-to-image generation. arXiv preprint arXiv:2102.12092 (2021)"},{"key":"29_CR54","doi-asserted-by":"crossref","unstructured":"Rebuffi, S.A., Bilen, H., Vedaldi, A.: Efficient parametrization of multi-domain deep neural networks. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8119\u20138127 (2018)","DOI":"10.1109\/CVPR.2018.00847"},{"key":"29_CR55","unstructured":"Recht, B., Roelofs, R., Schmidt, L., Shankar, V.: Do ImageNet classifiers generalize to ImageNet? In: International Conference on Machine Learning, pp. 5389\u20135400. PMLR (2019)"},{"issue":"3","key":"29_CR56","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., et al.: ImageNet large scale visual recognition challenge. Int. J. Comput. Vis. 115(3), 211\u2013252 (2015). https:\/\/doi.org\/10.1007\/s11263-015-0816-y","journal-title":"Int. J. Comput. Vis."},{"key":"29_CR57","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: International Conference on Learning Representations (2015)"},{"key":"29_CR58","unstructured":"Steiner, A., Kolesnikov, A., Zhai, X., Wightman, R., Uszkoreit, J., Beyer, L.: How to train your ViT? Data, augmentation, and regularization in vision transformers. arXiv preprint arXiv:2106.10270 (2021)"},{"key":"29_CR59","doi-asserted-by":"crossref","unstructured":"Szegedy, C., et al.: Going deeper with convolutions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1\u20139 (2015)","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"29_CR60","unstructured":"Tolstikhin, I., et al.: MLP-Mixer: an all-MLP architecture for vision. arXiv preprint arXiv:2105.01601 (2021)"},{"key":"29_CR61","doi-asserted-by":"crossref","unstructured":"Touvron, H., et al.: ResMLP: feedforward networks for image classification with data-efficient training. arXiv preprint arXiv:2105.03404 (2021)","DOI":"10.1109\/TPAMI.2022.3206148"},{"key":"29_CR62","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: International Conference on Machine Learning, pp. 10347\u201310357. PMLR (2021)"},{"key":"29_CR63","unstructured":"Touvron, H., et al.: Augmenting convolutional networks with attention-based aggregation. arXiv preprint arXiv:2112.13692 (2021)"},{"key":"29_CR64","doi-asserted-by":"crossref","unstructured":"Touvron, H., Cord, M., Sablayrolles, A., Synnaeve, G., J\u00e9gou, H.: Going deeper with image transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 32\u201342(2021)","DOI":"10.1109\/ICCV48922.2021.00010"},{"key":"29_CR65","unstructured":"Touvron, H., Vedaldi, A., Douze, M., Jegou, H.: Fixing the train-test resolution discrepancy. Adv. Neural Inf. Process. Syst. 32 (2019)"},{"key":"29_CR66","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"29_CR67","doi-asserted-by":"crossref","unstructured":"Wang, W., et al.: Pyramid vision transformer: a versatile backbone for dense prediction without convolutions. arXiv preprint arXiv:2102.12122 (2021)","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"29_CR68","doi-asserted-by":"crossref","unstructured":"Wei, C., Fan, H., Xie, S., Wu, C.Y., Yuille, A., Feichtenhofer, C.: Masked feature prediction for self-supervised visual pre-training. arXiv preprint arXiv:2112.09133 (2021)","DOI":"10.1109\/CVPR52688.2022.01426"},{"key":"29_CR69","unstructured":"Wightman, R., Touvron, H., J\u00e9gou, H.: ResNet strikes back: an improved training procedure in timm. arXiv preprint arXiv:2110.00476 (2021)"},{"key":"29_CR70","doi-asserted-by":"crossref","unstructured":"Wu, H., et al.: CvT: introducing convolutions to vision transformers. arXiv preprint arXiv:2103.15808 (2021)","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"29_CR71","unstructured":"Xiao, T., Singh, M., Mintun, E., Darrell, T., Doll\u00e1r, P., Girshick, R.B.: Early convolutions help transformers see better. arXiv preprint arXiv:2106.14881 (2021)"},{"key":"29_CR72","doi-asserted-by":"crossref","unstructured":"Xie, Z., et al.: SimMIM: a simple framework for masked image modeling. arXiv preprint arXiv:2111.09886 (2021)","DOI":"10.1109\/CVPR52688.2022.00943"},{"key":"29_CR73","unstructured":"Yosinski, J., Clune, J., Bengio, Y., Lipson, H.: How transferable are features in deep neural networks? arXiv preprint arXiv:1411.1792 (2014)"},{"key":"29_CR74","doi-asserted-by":"crossref","unstructured":"Yuan, L., et al.: Tokens-to-Token ViT: training vision transformers from scratch on ImageNet. arXiv preprint arXiv:2101.11986 (2021)","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"29_CR75","doi-asserted-by":"crossref","unstructured":"Zagoruyko, S., Komodakis, N.: Wide residual networks. arXiv preprint arXiv:1605.07146 (2016)","DOI":"10.5244\/C.30.87"},{"key":"29_CR76","unstructured":"Zhou, J., et al.: iBOT: image BERT pre-training with online tokenizer. International Conference on Learning Representations (2022)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-20053-3_29","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,3,11]],"date-time":"2023-03-11T06:51:51Z","timestamp":1678517511000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-20053-3_29"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031200526","9783031200533"],"references-count":76,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-20053-3_29","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"6 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}