{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T15:48:50Z","timestamp":1778082530597,"version":"3.51.4"},"publisher-location":"Cham","reference-count":76,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031198328","type":"print"},{"value":"9783031198335","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19833-5_5","type":"book-chapter","created":{"date-parts":[[2022,11,4]],"date-time":"2022-11-04T00:40:30Z","timestamp":1667522430000},"page":"69-86","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":60,"title":["Efficient Video Transformers with\u00a0Spatial-Temporal Token Selection"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8129-7333","authenticated-orcid":false,"given":"Junke","family":"Wang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4372-241X","authenticated-orcid":false,"given":"Xitong","family":"Yang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5314-6853","authenticated-orcid":false,"given":"Hengduo","family":"Li","sequence":"additional","affiliation":[]},{"given":"Li","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8689-5807","authenticated-orcid":false,"given":"Zuxuan","family":"Wu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1907-8567","authenticated-orcid":false,"given":"Yu-Gang","family":"Jiang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,4]]},"reference":[{"key":"5_CR1","doi-asserted-by":"crossref","unstructured":"Abernethy, J., Lee, C., Tewari, A.: Perturbation techniques in online learning and optimization. Perturbations, Optimization, and Statistics (2016)","DOI":"10.7551\/mitpress\/10761.003.0009"},{"key":"5_CR2","doi-asserted-by":"crossref","unstructured":"Arnab, A., Dehghani, M., Heigold, G., Sun, C., Lu\u010di\u0107, M., Schmid, C.: ViViT: a video vision transformer. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"5_CR3","unstructured":"Bertasius, G., Wang, H., Torresani, L.: Is space-time attention all you need for video understanding? In: ICML (2021)"},{"key":"5_CR4","unstructured":"Berthet, Q., Blondel, M., Teboul, O., Cuturi, M., Vert, J.P., Bach, F.: Learning with differentiable perturbed optimizers. arXiv preprint arXiv:2002.08676 (2020)"},{"key":"5_CR5","doi-asserted-by":"crossref","unstructured":"Bhardwaj, S., Srinivasan, M., Khapra, M.M.: Efficient video classification using fewer frames. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00044"},{"key":"5_CR6","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"5_CR7","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo Vadis, action recognition? A new model and the kinetics dataset. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"5_CR8","doi-asserted-by":"crossref","unstructured":"Cordonnier, J.B., et al.: Differentiable patch selection for image recognition. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00238"},{"key":"5_CR9","unstructured":"Cuturi, M., Teboul, O., Vert, J.P.: Differentiable ranking and sorting using optimal transport. In: NeurIPS (2019)"},{"key":"5_CR10","doi-asserted-by":"crossref","unstructured":"Davidson, J., et al.: The YouTube video recommendation system. In: RS (2010)","DOI":"10.1145\/1864708.1864770"},{"key":"5_CR11","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"5_CR12","doi-asserted-by":"crossref","unstructured":"Dong, J., et al.: Dual encoding for zero-example video retrieval. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00957"},{"key":"5_CR13","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. In: ICLR (2021)"},{"key":"5_CR14","doi-asserted-by":"crossref","unstructured":"Fan, H., et al.: Multiscale vision transformers. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"5_CR15","unstructured":"Fan, Q., Chen, C.F.R., Kuehne, H., Pistoia, M., Cox, D.: More is less: learning efficient video representations by temporal aggregation modules. In: NeurIPS (2019)"},{"key":"5_CR16","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C.: X3D: expanding architectures for efficient video recognition. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00028"},{"key":"5_CR17","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Fan, H., Malik, J., He, K.: SlowFast networks for video recognition. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00630"},{"key":"5_CR18","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Pinz, A., Zisserman, A.: Convolutional two-stream network fusion for video action recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.213"},{"key":"5_CR19","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"214","DOI":"10.1007\/978-3-030-58548-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"V Gabeur","year":"2020","unstructured":"Gabeur, V., Sun, C., Alahari, K., Schmid, C.: Multi-modal transformer for video retrieval. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12349, pp. 214\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58548-8_13"},{"key":"5_CR20","unstructured":"Goyal, P., et al.: Accurate, large minibatch SGD: training ImageNet in 1 hour. arXiv preprint arXiv:1706.02677 (2017)"},{"key":"5_CR21","doi-asserted-by":"crossref","unstructured":"Goyal, R., et al.: The \u201csomething something\u201d video database for learning and evaluating visual common sense. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.622"},{"key":"5_CR22","doi-asserted-by":"crossref","unstructured":"Hara, K., Kataoka, H., Satoh, Y.: Can spatiotemporal 3d CNNs retrace the history of 2d CNNs and ImageNet? In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00685"},{"key":"5_CR23","unstructured":"He, B., Yang, X., Wu, Z., Chen, H., Lim, S.N., Shrivastava, A.: GTA: global temporal attention for video action understanding. In: BMVC (2021)"},{"key":"5_CR24","doi-asserted-by":"crossref","unstructured":"Heo, B., Yun, S., Han, D., Chun, S., Choe, J., Oh, S.J.: Rethinking spatial dimensions of vision transformers. arXiv preprint arXiv:2103.16302 (2021)","DOI":"10.1109\/ICCV48922.2021.01172"},{"key":"5_CR25","doi-asserted-by":"crossref","unstructured":"Huang, Y., Cui, B., Jiang, J., Hong, K., Zhang, W., Xie, Y.: Real-time video recommendation exploration. In: ICMD (2016)","DOI":"10.1145\/2882903.2903743"},{"key":"5_CR26","unstructured":"Jang, E., Gu, S., Poole, B.: Categorical reparameterization with Gumbel-softmax. arXiv preprint arXiv:1611.01144 (2016)"},{"key":"5_CR27","doi-asserted-by":"crossref","unstructured":"Jiang, B., Wang, M., Gan, W., Wu, W., Yan, J.: STM: spatiotemporal and motion encoding for action recognition. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00209"},{"key":"5_CR28","unstructured":"Kay, W., et al.: The kinetics human action video dataset. arXiv preprint arXiv:1705.06950 (2017)"},{"key":"5_CR29","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"key":"5_CR30","unstructured":"Kitaev, N., Kaiser, L., Levskaya, A.: Reformer: the efficient transformer. In: ICLR (2020)"},{"key":"5_CR31","doi-asserted-by":"crossref","unstructured":"Kondratyuk, D., et al.: MoviNets: mobile video networks for efficient video recognition. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01576"},{"key":"5_CR32","doi-asserted-by":"crossref","unstructured":"Korbar, B., Tran, D., Torresani, L.: SCSampler: sampling salient clips from video for efficient action recognition. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00633"},{"key":"5_CR33","doi-asserted-by":"crossref","unstructured":"Lee, J., Abu-El-Haija, S.: Large-scale content-only video recommendation. In: ICCVW (2017)","DOI":"10.1109\/ICCVW.2017.121"},{"key":"5_CR34","unstructured":"Li, K., et al.: UniFormer: unified transformer for efficient spatial-temporal representation learning. In: ICLR (2022)"},{"key":"5_CR35","doi-asserted-by":"crossref","unstructured":"Li, T., Liu, J., Zhang, W., Ni, Y., Wang, W., Li, Z.: UAV-human: a large benchmark for human behavior understanding with unmanned aerial vehicles. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01600"},{"key":"5_CR36","doi-asserted-by":"crossref","unstructured":"Li, Y., Ji, B., Shi, X., Zhang, J., Kang, B., Wang, L.: TEA: temporal excitation and aggregation for action recognition. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00099"},{"key":"5_CR37","doi-asserted-by":"crossref","unstructured":"Lin, J., Gan, C., Han, S.: TSM: temporal shift module for efficient video understanding. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00718"},{"key":"5_CR38","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"5_CR39","unstructured":"Liu, Z., et al.: Video swin transformer. arXiv preprint arXiv:2106.13230 (2021)"},{"key":"5_CR40","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: TEINet: towards an efficient architecture for video recognition. In: AAAI (2020)","DOI":"10.1609\/aaai.v34i07.6836"},{"key":"5_CR41","unstructured":"Loshchilov, I., Hutter, F.: SGDR: stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983 (2016)"},{"key":"5_CR42","unstructured":"Loshchilov, I., Hutter, F.: Fixing weight decay regularization in adam (2018)"},{"key":"5_CR43","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/1961209.1961213","volume":"29","author":"T Mei","year":"2011","unstructured":"Mei, T., Yang, B., Hua, X.S., Li, S.: Contextual video recommendation by multimodal relevance and user feedback. TOIS 29, 1\u201324 (2011)","journal-title":"TOIS"},{"key":"5_CR44","unstructured":"Naseer, M., Ranasinghe, K., Khan, S., Hayat, M., Khan, F., Yang, M.H.: Intriguing properties of vision transformers. In: NeurIPS (2021)"},{"key":"5_CR45","doi-asserted-by":"crossref","unstructured":"Neimark, D., Bar, O., Zohar, M., Asselmann, D.: Video transformer network. arXiv preprint arXiv:2102.00719 (2021)","DOI":"10.1109\/ICCVW54120.2021.00355"},{"key":"5_CR46","unstructured":"Pan, B., Panda, R., Jiang, Y., Wang, Z., Feris, R., Oliva, A.: IA-RED$$^{2}$$: Interpretability-aware redundancy reduction for vision transformers. In: NeurIPS (2021)"},{"key":"5_CR47","unstructured":"Paszke, A., et al.: PyTorch: an imperative style, high-performance deep learning library. In: NeurIPS (2019)"},{"key":"5_CR48","unstructured":"Patrick, M., et al.: Keeping your eye on the ball: trajectory attention in video transformers. In: NeurIPS (2021)"},{"key":"5_CR49","unstructured":"Rao, Y., Zhao, W., Liu, B., Lu, J., Zhou, J., Hsieh, C.J.: DynamicViT: efficient vision transformers with dynamic token sparsification. In: NeurIPS (2021)"},{"key":"5_CR50","unstructured":"Ryoo, M.S., Piergiovanni, A., Arnab, A., Dehghani, M., Angelova, A.: TokenLearner: adaptive space-time tokenization for videos. In: NeurIPS (2021)"},{"key":"5_CR51","doi-asserted-by":"crossref","unstructured":"Sun, Z., Ke, Q., Rahmani, H., Bennamoun, M., Wang, G., Liu, J.: Human action recognition from various data modalities: a review. IEEE TPAMI, 1\u201320 (2022)","DOI":"10.1109\/TPAMI.2022.3183112"},{"key":"5_CR52","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: ICML (2021)"},{"key":"5_CR53","doi-asserted-by":"crossref","unstructured":"Tran, D., Wang, H., Torresani, L., Ray, J., LeCun, Y., Paluri, M.: A closer look at spatiotemporal convolutions for action recognition. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00675"},{"key":"5_CR54","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS (2017)"},{"key":"5_CR55","doi-asserted-by":"crossref","unstructured":"Wang, H., Tran, D., Torresani, L., Feiszli, M.: Video modeling with correlation networks. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00043"},{"key":"5_CR56","doi-asserted-by":"crossref","unstructured":"Wang, L., Tong, Z., Ji, B., Wu, G.: TDN: temporal difference networks for efficient action recognition. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00193"},{"key":"5_CR57","doi-asserted-by":"crossref","unstructured":"Wang, R., et al.: BEVT: BERT pretraining of video transformers. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01432"},{"key":"5_CR58","unstructured":"Wang, S., Li, B.Z., Khabsa, M., Fang, H., Ma, H.: Linformer: self-attention with linear complexity. arXiv preprint arXiv:2006.04768 (2020)"},{"key":"5_CR59","doi-asserted-by":"crossref","unstructured":"Wang, Y., Chen, Z., Jiang, H., Song, S., Han, Y., Huang, G.: Adaptive focus for efficient video recognition. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01594"},{"key":"5_CR60","doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: AdaFocus V2: end-to-end training of spatial dynamic networks for video recognition. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01943"},{"key":"5_CR61","doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: End-to-end video instance segmentation with transformers. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00863"},{"key":"5_CR62","doi-asserted-by":"crossref","unstructured":"Wu, C.Y., Zaheer, M., Hu, H., Manmatha, R., Smola, A.J., Kr\u00e4henb\u00fchl, P.: Compressed video action recognition. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00631"},{"key":"5_CR63","doi-asserted-by":"publisher","first-page":"1699","DOI":"10.1109\/TPAMI.2020.3029425","volume":"44","author":"Z Wu","year":"2022","unstructured":"Wu, Z., Li, H., Xiong, C., Jiang, Y.G., Davis, L.S.: A dynamic frame selection framework for fast video recognition. IEEE TPAMI 44, 1699\u20131711 (2022)","journal-title":"IEEE TPAMI"},{"key":"5_CR64","doi-asserted-by":"crossref","unstructured":"Wu, Z., Li, H., Zheng, Y., Xiong, C., Jiang, Y., Davis, L.S.: A coarse-to-fine framework for resource efficient video recognition. In: IJCV (2021)","DOI":"10.1007\/s11263-021-01508-1"},{"key":"5_CR65","doi-asserted-by":"crossref","unstructured":"Wu, Z., Xiong, C., Ma, C.Y., Socher, R., Davis, L.S.: AdaFrame: adaptive frame selection for fast video recognition. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00137"},{"key":"5_CR66","unstructured":"Xie, Y., et al.: Differentiable top-k with optimal transport. In: NeurIPS (2020)"},{"key":"5_CR67","doi-asserted-by":"crossref","unstructured":"Xu, L., Huang, H., Liu, J.: SUTD-TraffiCQA: a question answering benchmark and an efficient network for video reasoning over traffic events. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00975"},{"key":"5_CR68","unstructured":"Yang, J., et al.: Focal self-attention for local-global interactions in vision transformers. In: NeurIPS (2021)"},{"key":"5_CR69","doi-asserted-by":"crossref","unstructured":"Yeung, S., Russakovsky, O., Mori, G., Fei-Fei, L.: End-to-end learning of action detection from frame glimpses in videos. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.293"},{"key":"5_CR70","doi-asserted-by":"crossref","unstructured":"Yuan, L., et al.: Central similarity quantization for efficient image and video retrieval. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00315"},{"key":"5_CR71","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"323","DOI":"10.1007\/978-3-030-58604-1_20","volume-title":"Computer Vision \u2013 ECCV 2020","author":"D Zhang","year":"2020","unstructured":"Zhang, D., Zhang, H., Tang, J., Wang, M., Hua, X., Sun, Q.: Feature pyramid transformer. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12373, pp. 323\u2013339. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58604-1_20"},{"key":"5_CR72","unstructured":"Zhang, Z., Zhang, H., Zhao, L., Chen, T., Pfister, T.: Aggregating nested transformers. In: AAAI (2022)"},{"key":"5_CR73","doi-asserted-by":"crossref","unstructured":"Zheng, S., et al.: Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"5_CR74","first-page":"7970","volume":"29","author":"YD Zheng","year":"2020","unstructured":"Zheng, Y.D., Liu, Z., Lu, T., Wang, L.: Dynamic sampling networks for efficient action recognition in videos. TIP 29, 7970\u20137983 (2020)","journal-title":"TIP"},{"key":"5_CR75","unstructured":"Zhu, X., Su, W., Lu, L., Li, B., Wang, X., Dai, J.: Deformable DETR: deformable transformers for end-to-end object detection. In: ICLR (2021)"},{"key":"5_CR76","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"713","DOI":"10.1007\/978-3-030-01216-8_43","volume-title":"Computer Vision \u2013 ECCV 2018","author":"M Zolfaghari","year":"2018","unstructured":"Zolfaghari, M., Singh, K., Brox, T.: ECO: efficient convolutional network for online video understanding. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11206, pp. 713\u2013730. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01216-8_43"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19833-5_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,30]],"date-time":"2023-11-30T02:04:36Z","timestamp":1701309876000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19833-5_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198328","9783031198335"],"references-count":76,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19833-5_5","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"4 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}