{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T16:06:09Z","timestamp":1776182769817,"version":"3.50.1"},"publisher-location":"Cham","reference-count":63,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031200526","type":"print"},{"value":"9783031200533","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-20053-3_30","type":"book-chapter","created":{"date-parts":[[2022,11,5]],"date-time":"2022-11-05T16:21:52Z","timestamp":1667665312000},"page":"516-533","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":293,"title":["DeiT III: Revenge of\u00a0the\u00a0ViT"],"prefix":"10.1007","author":[{"given":"Hugo","family":"Touvron","sequence":"first","affiliation":[]},{"given":"Matthieu","family":"Cord","sequence":"additional","affiliation":[]},{"given":"Herv\u00e9","family":"J\u00e9gou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,6]]},"reference":[{"key":"30_CR1","unstructured":"Bao, H., Dong, L., Wei, F.: BEiT: BERT pre-training of image transformers. arXiv preprint arXiv:2106.08254 (2021)"},{"key":"30_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"30_CR3","doi-asserted-by":"crossref","unstructured":"Chu, P., Bian, X., Liu, S., Ling, H.: Feature space augmentation for long-tailed data. arXiv preprint arXiv:2008.03673 (2020)","DOI":"10.1007\/978-3-030-58526-6_41"},{"key":"30_CR4","unstructured":"Cordonnier, J.B., Loukas, A., Jaggi, M.: On the relationship between self-attention and convolutional layers. arXiv preprint arXiv:1911.03584 (2019)"},{"key":"30_CR5","doi-asserted-by":"crossref","unstructured":"Cubuk, E.D., Zoph, B., Shlens, J., Le, Q.V.: RandAugment: practical automated data augmentation with a reduced search space. arXiv preprint arXiv:1909.13719 (2019)","DOI":"10.1109\/CVPRW50498.2020.00359"},{"key":"30_CR6","doi-asserted-by":"crossref","unstructured":"Cubuk, E.D., Zoph, B., Man\u00e9, D., Vasudevan, V., Le, Q.V.: AutoAugment: learning augmentation policies from data. arXiv preprint arXiv:1805.09501 (2018)","DOI":"10.1109\/CVPR.2019.00020"},{"key":"30_CR7","doi-asserted-by":"crossref","unstructured":"d\u2019Ascoli, S., Touvron, H., Leavitt, M.L., Morcos, A.S., Biroli, G., Sagun, L.: ConViT: improving vision transformers with soft convolutional inductive biases. In: ICML (2021)","DOI":"10.1088\/1742-5468\/ac9830"},{"key":"30_CR8","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255 (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"30_CR9","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: NAACL (2019)"},{"key":"30_CR10","doi-asserted-by":"crossref","unstructured":"Dong, X., et al.: CSWin transformer: a general vision transformer backbone with cross-shaped windows. arXiv preprint arXiv:2107.00652 (2021)","DOI":"10.1109\/CVPR52688.2022.01181"},{"key":"30_CR11","unstructured":"Dong, X., et al.: PeCo: perceptual codebook for BERT pre-training of vision transformers. arXiv preprint arXiv:2111.12710 (2021)"},{"key":"30_CR12","unstructured":"Dosovitskiy, A., et al.: An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. In: International Conference on Learning Representations (2021)"},{"key":"30_CR13","unstructured":"El-Nouby, A., Izacard, G., Touvron, H., Laptev, I., Jegou, H., Grave, E.: Are large-scale datasets necessary for self-supervised pre-training? arXiv preprint arXiv:2112.10740 (2021)"},{"key":"30_CR14","unstructured":"El-Nouby, A., Neverova, N., Laptev, I., J\u00e9gou, H.: Training vision transformers for image retrieval. arXiv preprint arXiv:2102.05644 (2021)"},{"key":"30_CR15","unstructured":"El-Nouby, A., et al.: XCiT: cross-covariance image transformers. arXiv preprint arXiv:2106.09681 (2021)"},{"key":"30_CR16","doi-asserted-by":"crossref","unstructured":"Fan, H., et al.: Multiscale vision transformers. arXiv preprint arXiv:2104.11227 (2021)","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"30_CR17","doi-asserted-by":"crossref","unstructured":"Graham, B., et al.: LeViT: a vision transformer in convnet\u2019s clothing for faster inference. arXiv preprint arXiv:2104.01136 (2021)","DOI":"10.1109\/ICCV48922.2021.01204"},{"key":"30_CR18","unstructured":"Han, K., Xiao, A., Wu, E., Guo, J., Xu, C., Wang, Y.: Transformer in transformer. arXiv preprint arXiv:2103.00112 (2021)"},{"key":"30_CR19","doi-asserted-by":"crossref","unstructured":"He, K., Chen, X., Xie, S., Li, Y., Doll\u2019ar, P., Girshick, R.B.: Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377 (2021)","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"30_CR20","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Conference on Computer Vision and Pattern Recognition (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"30_CR21","doi-asserted-by":"crossref","unstructured":"Heo, B., Yun, S., Han, D., Chun, S., Choe, J., Oh, S.J.: Rethinking spatial dimensions of vision transformers. arXiv preprint arXiv:2103.16302 (2021)","DOI":"10.1109\/ICCV48922.2021.01172"},{"key":"30_CR22","unstructured":"Horn, G.V., et al: The iNaturalist species classification and detection dataset. arXiv preprint arXiv:1707.06642 (2017)"},{"key":"30_CR23","unstructured":"Horn, G.V., et al.: The inaturalist challenge 2018 dataset. arXiv preprint arXiv:1707.06642 (2018)"},{"key":"30_CR24","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"646","DOI":"10.1007\/978-3-319-46493-0_39","volume-title":"Computer Vision \u2013 ECCV 2016","author":"G Huang","year":"2016","unstructured":"Huang, G., Sun, Yu., Liu, Z., Sedra, D., Weinberger, K.Q.: Deep networks with stochastic depth. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 646\u2013661. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_39"},{"key":"30_CR25","unstructured":"Kolesnikov, A., et al.: Big transfer (bit): general visual representation learning. arXiv preprint arXiv:1912.113706, 3 (2019)"},{"key":"30_CR26","doi-asserted-by":"crossref","unstructured":"Krause, J., Stark, M., Deng, J., Fei-Fei, L.: 3D object representations for fine-grained categorization. In: IEEE Workshop on 3D Representation and Recognition (2013)","DOI":"10.1109\/ICCVW.2013.77"},{"key":"30_CR27","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.: ImageNet classification with deep convolutional neural networks. In: NeurIPS (2012)"},{"key":"30_CR28","unstructured":"Krizhevsky, A.: Learning multiple layers of features from tiny images. Technical report, CIFAR (2009)"},{"issue":"11","key":"30_CR29","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y LeCun","year":"1998","unstructured":"LeCun, Y., Bottou, L., Bengio, Y., Haffner, P.: Gradient-based learning applied to document recognition. Proc. IEEE 86(11), 2278\u20132324 (1998)","journal-title":"Proc. IEEE"},{"key":"30_CR30","unstructured":"LingChen, T.C., Khonsari, A., Lashkari, A., Nazari, M.R., Sambee, J.S., Nascimento, M.A.: UniformAugment: a search-free probabilistic data augmentation approach. arXiv preprint arXiv:2003.14348 (2020)"},{"key":"30_CR31","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. arXiv preprint arXiv:2103.14030 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"30_CR32","doi-asserted-by":"crossref","unstructured":"Liu, Z., Mao, H., Wu, C.Y., Feichtenhofer, C., Darrell, T., Xie, S.: A convnet for the 2020s. arXiv preprint arXiv:2201.03545 (2022)","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"30_CR33","doi-asserted-by":"crossref","unstructured":"M\u00fcller, S., Hutter, F.: TrivialAugment: tuning-free yet state-of-the-art data augmentation. arXiv preprint arXiv:2103.10158 (2021)","DOI":"10.1109\/ICCV48922.2021.00081"},{"key":"30_CR34","doi-asserted-by":"crossref","unstructured":"Neimark, D., Bar, O., Zohar, M., Asselmann, D.: Video transformer network. arXiv preprint arXiv:2102.00719 (2021)","DOI":"10.1109\/ICCVW54120.2021.00355"},{"key":"30_CR35","doi-asserted-by":"crossref","unstructured":"Nilsback, M.E., Zisserman, A.: Automated flower classification over a large number of classes. In: Proceedings of the Indian Conference on Computer Vision, Graphics and Image Processing (2008)","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"30_CR36","doi-asserted-by":"crossref","unstructured":"Oquab, M., Bottou, L., Laptev, I., Sivic, J.: Learning and transferring mid-level image representations using convolutional neural networks. In: Conference on Computer Vision and Pattern Recognition (2014)","DOI":"10.1109\/CVPR.2014.222"},{"key":"30_CR37","doi-asserted-by":"crossref","unstructured":"Radosavovic, I., Kosaraju, R.P., Girshick, R.B., He, K., Doll\u00e1r, P.: Designing network design spaces. In: Conference on Computer Vision and Pattern Recognition (2020)","DOI":"10.1109\/CVPR42600.2020.01044"},{"key":"30_CR38","unstructured":"Recht, B., Roelofs, R., Schmidt, L., Shankar, V.: Do ImageNet classifiers generalize to ImageNet? In: International Conference on Machine Learning (2019)"},{"issue":"3","key":"30_CR39","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., et al.: ImageNet large scale visual recognition challenge. Int. J. Comput. Vision 115(3), 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vision"},{"key":"30_CR40","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: International Conference on Learning Representations (2015)"},{"key":"30_CR41","unstructured":"Steiner, A., Kolesnikov, A., Zhai, X., Wightman, R., Uszkoreit, J., Beyer, L.: How to train your ViT? Data, augmentation, and regularization in vision transformers. arXiv preprint arXiv:2106.10270 (2021)"},{"key":"30_CR42","doi-asserted-by":"crossref","unstructured":"Szegedy, C., et al.: Going deeper with convolutions. In: Conference on Computer Vision and Pattern Recognition (2015)","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"30_CR43","unstructured":"Tan, M., Le, Q.V.: EfficientNet: rethinking model scaling for convolutional neural networks. arXiv preprint arXiv:1905.11946 (2019)"},{"key":"30_CR44","unstructured":"Tan, M., Le, Q.V.: EfficientNetV2: smaller models and faster training. In: International Conference on Machine Learning (2021)"},{"key":"30_CR45","unstructured":"Tolstikhin, I., et al.: MLP-mixer: an all-MLP architecture for vision. arXiv preprint arXiv:2105.01601 (2021)"},{"key":"30_CR46","doi-asserted-by":"crossref","unstructured":"Touvron, H., et al.: ResMLP: feedforward networks for image classification with data-efficient training. arXiv preprint arXiv:2105.03404 (2021)","DOI":"10.1109\/TPAMI.2022.3206148"},{"key":"30_CR47","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: International Conference on Machine Learning (2021)"},{"key":"30_CR48","unstructured":"Touvron, H., et al.: Augmenting convolutional networks with attention-based aggregation. arXiv preprint arXiv:2112.13692 (2021)"},{"key":"30_CR49","doi-asserted-by":"crossref","unstructured":"Touvron, H., Cord, M., El-Nouby, A., Verbeek, J., J\u2019egou, H.: Three things everyone should know about vision transformers. arXiv preprint arXiv:2203.09795 (2022)","DOI":"10.1007\/978-3-031-20053-3_29"},{"key":"30_CR50","doi-asserted-by":"crossref","unstructured":"Touvron, H., Cord, M., Sablayrolles, A., Synnaeve, G., J\u00e9gou, H.: Going deeper with image transformers. In: International Conference on Computer Vision (2021)","DOI":"10.1109\/ICCV48922.2021.00010"},{"key":"30_CR51","doi-asserted-by":"crossref","unstructured":"Touvron, H., Sablayrolles, A., Douze, M., Cord, M., J\u00e9gou, H.: Grafit: learning fine-grained image representations with coarse labels. In: International Conference on Computer Vision (2021)","DOI":"10.1109\/ICCV48922.2021.00091"},{"key":"30_CR52","unstructured":"Touvron, H., Vedaldi, A., Douze, M., Jegou, H.: Fixing the train-test resolution discrepancy. In: Neurips (2019)"},{"key":"30_CR53","unstructured":"Touvron, H., Vedaldi, A., Douze, M., J\u00e9gou, H.: Fixing the train-test resolution discrepancy: FixEfficientNet. arXiv preprint arXiv:2003.08237 (2020)"},{"key":"30_CR54","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS (2017)"},{"key":"30_CR55","doi-asserted-by":"crossref","unstructured":"Wang, W., et al.: Pyramid vision transformer: a versatile backbone for dense prediction without convolutions. arXiv preprint arXiv:2102.12122 (2021)","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"30_CR56","unstructured":"Wightman, R., Touvron, H., J\u00e9gou, H.: ResNet strikes back: an improved training procedure in TIMM. arXiv preprint arXiv:2110.00476 (2021)"},{"key":"30_CR57","doi-asserted-by":"crossref","unstructured":"Wu, H., et al.: CVT: introducing convolutions to vision transformers. arXiv preprint arXiv:2103.15808 (2021)","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"30_CR58","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"432","DOI":"10.1007\/978-3-030-01228-1_26","volume-title":"Computer Vision \u2013 ECCV 2018","author":"T Xiao","year":"2018","unstructured":"Xiao, T., Liu, Y., Zhou, B., Jiang, Y., Sun, J.: Unified perceptual parsing for scene understanding. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11209, pp. 432\u2013448. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01228-1_26"},{"key":"30_CR59","unstructured":"Xiao, T., Singh, M., Mintun, E., Darrell, T., Doll\u00e1r, P., Girshick, R.: Early convolutions help transformers see better. arXiv preprint arXiv:2106.14881 (2021)"},{"key":"30_CR60","doi-asserted-by":"crossref","unstructured":"Yun, S., Han, D., Oh, S.J., Chun, S., Choe, J., Yoo, Y.: CutMix: regularization strategy to train strong classifiers with localizable features. arXiv preprint arXiv:1905.04899 (2019)","DOI":"10.1109\/ICCV.2019.00612"},{"key":"30_CR61","unstructured":"Zhang, H., Ciss\u00e9, M., Dauphin, Y.N., Lopez-Paz, D.: Mixup: beyond empirical risk minimization. arXiv preprint arXiv:1710.09412 (2017)"},{"key":"30_CR62","doi-asserted-by":"crossref","unstructured":"Zhou, B., Zhao, H., Puig, X., Fidler, S., Barriuso, A., Torralba, A.: Scene parsing through ADE20K dataset. In: Conference on Computer Vision and Pattern Recognition (2017)","DOI":"10.1109\/CVPR.2017.544"},{"key":"30_CR63","unstructured":"Zhou, J., et al.: iBOT: image BERT pre-training with online tokenizer. arXiv preprint arXiv:2111.07832 (2021)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-20053-3_30","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,3,11]],"date-time":"2023-03-11T06:51:09Z","timestamp":1678517469000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-20053-3_30"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031200526","9783031200533"],"references-count":63,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-20053-3_30","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"6 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}