{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:45:34Z","timestamp":1777657534641,"version":"3.51.4"},"publisher-location":"Cham","reference-count":65,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031198328","type":"print"},{"value":"9783031198335","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19833-5_28","type":"book-chapter","created":{"date-parts":[[2022,11,4]],"date-time":"2022-11-04T00:40:30Z","timestamp":1667522430000},"page":"475-492","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":27,"title":["Dynamic Temporal Filtering in\u00a0Video Models"],"prefix":"10.1007","author":[{"given":"Fuchen","family":"Long","sequence":"first","affiliation":[]},{"given":"Zhaofan","family":"Qiu","sequence":"additional","affiliation":[]},{"given":"Yingwei","family":"Pan","sequence":"additional","affiliation":[]},{"given":"Ting","family":"Yao","sequence":"additional","affiliation":[]},{"given":"Chong-Wah","family":"Ngo","sequence":"additional","affiliation":[]},{"given":"Tao","family":"Mei","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,4]]},"reference":[{"key":"28_CR1","doi-asserted-by":"crossref","unstructured":"Arnab, A., Dehghani, M., Heigold, G., Sun, C., Lucic, M., Schmid, C.: ViViT: a video vision transformer. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"28_CR2","unstructured":"Bertasius, G., Wang, H., Torresani, L.: Is space-time attention all you need for video understanding? In: ICML (2021)"},{"key":"28_CR3","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo Vadis, action recognition? A new model and the kinetics dataset. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"28_CR4","doi-asserted-by":"crossref","unstructured":"Chen, Y., Dai, X., Liu, M., Chen, D., Yuan, L., Liu, Z.: Dynamic convolution: attention over convolution kernels. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01104"},{"key":"28_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"364","DOI":"10.1007\/978-3-030-01246-5_22","volume-title":"Computer Vision \u2013 ECCV 2018","author":"Y Chen","year":"2018","unstructured":"Chen, Y., Kalantidis, Y., Li, J., Yan, S., Feng, J.: Multi-fiber networks for video recognition. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11205, pp. 364\u2013380. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01246-5_22"},{"key":"28_CR6","doi-asserted-by":"crossref","unstructured":"Diba, A., Sharma, V., Gool, L.V.: Deep temporal linear encoding networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.168"},{"key":"28_CR7","unstructured":"Dosovitskiy, A., et al.: An image is worth $$16\\times 16$$ words: transformers for image recognition at scale. In: ICLR (2021)"},{"key":"28_CR8","doi-asserted-by":"crossref","unstructured":"Fan, H., et al.: Multiscale vision transformers. arXiv preprint arXiv:2104.11227 (2021)","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"28_CR9","unstructured":"Fan, Q., Chen, C.F., Kuehne, H., Pistoia, M., Cox, D.: More is less: learning efficient video representations by big-little network and depthwise temporal aggregation. In: NeurIPS (2019)"},{"key":"28_CR10","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C.: X3D: expanding architectures for efficient video recognition. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00028"},{"key":"28_CR11","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Fan, H., Malik, J., He, K.: SlowFast networks for video recognition. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00630"},{"key":"28_CR12","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Pinz, A., Zisserman, A.: Convolutional two-stream network fusion for video action recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.213"},{"key":"28_CR13","doi-asserted-by":"crossref","unstructured":"Goyal, R., et al.: The \u201csomething something\u201d video database for learning and evaluating visual common sense. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.622"},{"key":"28_CR14","unstructured":"Han, K., Xiao, A., Wu, E., Guo, J., Xu, C., Wang, Y.: Transformer in transformer. In: NeurIPS (2021)"},{"key":"28_CR15","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"28_CR16","doi-asserted-by":"publisher","first-page":"221","DOI":"10.1109\/TPAMI.2012.59","volume":"35","author":"S Ji","year":"2013","unstructured":"Ji, S., Xu, W., Yang, M., Yu, K.: 3D convolutional neural networks for human action recognition. IEEE Trans. PAMI 35, 221\u2013231 (2013)","journal-title":"IEEE Trans. PAMI"},{"key":"28_CR17","doi-asserted-by":"crossref","unstructured":"Jiang, B., Wang, M., Gan, W., Wu, W., Yan, J.: STM: SpatioTemporal and motion encoding for action recognition. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00209"},{"key":"28_CR18","doi-asserted-by":"crossref","unstructured":"Karpathy, A., Toderici, G., Shetty, S., Leung, T., Sukthankar, R., Fei-Fei, L.: Large-scale video classification with convolutional neural networks. In: CVPR (2014)","DOI":"10.1109\/CVPR.2014.223"},{"key":"28_CR19","doi-asserted-by":"crossref","unstructured":"Klaser, A., Marszalek, M., Schmid, C.: A spatio-temporal descriptor based on 3D-gradients. In: BMVC (2008)","DOI":"10.5244\/C.22.99"},{"key":"28_CR20","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"345","DOI":"10.1007\/978-3-030-58517-4_21","volume-title":"Computer Vision \u2013 ECCV 2020","author":"H Kwon","year":"2020","unstructured":"Kwon, H., Kim, M., Kwak, S., Cho, M.: MotionSqueeze: neural motion feature learning for video understanding. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12361, pp. 345\u2013362. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58517-4_21"},{"issue":"2\u20133","key":"28_CR21","doi-asserted-by":"publisher","first-page":"107","DOI":"10.1007\/s11263-005-1838-7","volume":"64","author":"I Laptev","year":"2005","unstructured":"Laptev, I.: On space-time interest points. Int. J. Comput. Vis. 64(2\u20133), 107\u2013123 (2005)","journal-title":"Int. J. Comput. Vis."},{"key":"28_CR22","doi-asserted-by":"crossref","unstructured":"Laptev, I., Marszalek, M., Schmid, C., Rozenfeld, B.: Learning realistic human actions from movies. In: CVPR (2008)","DOI":"10.1109\/CVPR.2008.4587756"},{"key":"28_CR23","doi-asserted-by":"crossref","unstructured":"Li, X., Wang, Y., Zhou, Z., Qiao, Y.: SmallBigNet: integrating core and contextual views for video classification. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00117"},{"key":"28_CR24","doi-asserted-by":"crossref","unstructured":"Li, Y., Ji, B., Shi, X., Zhang, J., Kang, B., Wang, L.: TEA: temporal excitation and aggregation for action recognition. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00099"},{"key":"28_CR25","unstructured":"Li, Y., Yao, T., Pan, Y., Mei, T.: Contextual transformer networks for visual recognition. IEEE Trans. PAMI (2022)"},{"key":"28_CR26","doi-asserted-by":"crossref","unstructured":"Lin, J., Gan, C., Han, S.: TSM: temporal shift module for efficient video understanding. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00718"},{"key":"28_CR27","doi-asserted-by":"crossref","unstructured":"Liu, X., Lee, J.Y., Jin, H.: Learning video representations from correspondence proposals. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00440"},{"key":"28_CR28","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"28_CR29","unstructured":"Liu, Z., et al.: Video Swin transformer. arXiv preprint arXiv:2106.13230 (2021)"},{"key":"28_CR30","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: TEINet: towards an efficient architecture for video recognition. In: AAAI (2020)","DOI":"10.1609\/aaai.v34i07.6836"},{"key":"28_CR31","doi-asserted-by":"crossref","unstructured":"Long, F., Qiu, Z., Pan, Y., Yao, T., Luo, J., Mei, T.: Stand-alone inter-frame attention in video models. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00319"},{"key":"28_CR32","doi-asserted-by":"crossref","unstructured":"Long, F., Yao, T., Qiu, Z., Tian, X., Luo, J., Mei, T.: Gaussian temporal awareness networks for action localization. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00043"},{"key":"28_CR33","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"137","DOI":"10.1007\/978-3-030-58580-8_9","volume-title":"Computer Vision \u2013 ECCV 2020","author":"F Long","year":"2020","unstructured":"Long, F., Yao, T., Qiu, Z., Tian, X., Luo, J., Mei, T.: Learning to localize actions from\u00a0moments. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12348, pp. 137\u2013154. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58580-8_9"},{"key":"28_CR34","doi-asserted-by":"crossref","unstructured":"Long, F., Yao, T., Qiu, Z., Tian, X., Luo, J., Mei, T.: Bi-calibration networks for weakly-supervised video representation learning. arXiv preprint arXiv:2206.10491 (2022)","DOI":"10.1007\/s11263-023-01779-w"},{"issue":"6","key":"28_CR35","doi-asserted-by":"publisher","first-page":"1577","DOI":"10.1109\/TMM.2019.2943204","volume":"22","author":"F Long","year":"2020","unstructured":"Long, F., Yao, T., Qiu, Z., Tian, X., Mei, T., Luo, J.: Coarse-to-fine localization of temporal action proposals. IEEE Trans. Multimedia 22(6), 1577\u20131590 (2020)","journal-title":"IEEE Trans. Multimedia"},{"key":"28_CR36","unstructured":"Loshchilov, I., Hutter, F.: SGDR: stochastic gradient descent with warm restarts. In: ICLR (2017)"},{"key":"28_CR37","doi-asserted-by":"crossref","unstructured":"Luo, C., Yuille, A.: Grouped spatial-temporal aggregation for efficient action recognition. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00561"},{"key":"28_CR38","unstructured":"Ng, J.Y.H., Hausknecht, M., Vijayanarasimhan, S., Vinyals, O., Monga, R., Toderici, G.: Beyond short snippets: deep networks for video classification. In: CVPR (2015)"},{"key":"28_CR39","volume-title":"Signals and Systems","author":"AV Oppenheim","year":"1998","unstructured":"Oppenheim, A.V., Willsky, A.S., Newab, S.H.: Signals and Systems. Prentice Hall, Englewood Cliffs (1998)"},{"key":"28_CR40","doi-asserted-by":"crossref","unstructured":"Qiu, Z., Yao, T., Mei, T.: Learning spatio-temporal representation with pseudo-3D residual networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.590"},{"key":"28_CR41","unstructured":"Qiu, Z., Yao, T., Ngo, C.W., Mei, T.: Optimization planning for 3D ConvNets. In: ICML (2021)"},{"key":"28_CR42","doi-asserted-by":"crossref","unstructured":"Qiu, Z., Yao, T., Ngo, C.W., Tian, X., Mei, T.: Learning spatio-temporal representation with local and global diffusion. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.01233"},{"key":"28_CR43","unstructured":"Rao, Y., Zhao, W., Zhu, Z., Lu, J., Zhou, J.: Global filter networks for image classification. In: NeurIPS (2021)"},{"key":"28_CR44","doi-asserted-by":"crossref","unstructured":"Scovanner, P., Ali, S., Shah, M.: A 3-dimensional SIFT descriptor and its application to action recognition. In: ACM MM (2007)","DOI":"10.1145\/1291233.1291311"},{"key":"28_CR45","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., Batra, D.: Grad-CAM: visual explanations from deep networks via gradient-based localization. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.74"},{"key":"28_CR46","unstructured":"Simonyan, K., Zisserman, A.: Two-stream convolutional networks for action recognition in videos. In: NIPS (2014)"},{"key":"28_CR47","unstructured":"Srivastava, N., Mansimov, E., Salakhutdinov, R.: Unsupervised learning of video representations using LSTMs. In: ICML (2015)"},{"key":"28_CR48","doi-asserted-by":"crossref","unstructured":"Sudhakaran, S., Escalera, S., Lanz, O.: Gate-shift networks for video action recognition. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00118"},{"key":"28_CR49","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., Jegou, H.: Training data-efficient image transformers and distillation through attention. arXiv preprint arXiv:2012.12877 (2020)"},{"key":"28_CR50","doi-asserted-by":"crossref","unstructured":"Tran, D., Bourdev, L., Fergus, R., Torresani, L., Paluri, M.: Learning spatiotemporal features with 3D convolutional networks. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.510"},{"key":"28_CR51","doi-asserted-by":"crossref","unstructured":"Tran, D., Wang, H., Torresani, L., Feiszli, M.: Video classification with channel-separated convolutional networks. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00565"},{"key":"28_CR52","doi-asserted-by":"crossref","unstructured":"Tran, D., Wang, H., Torresani, L., Ray, J., LeCun, Y., Paluri, M.: A closer look at spatiotemporal convolutions for action recognition. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00675"},{"key":"28_CR53","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NIPS (2017)"},{"key":"28_CR54","doi-asserted-by":"crossref","unstructured":"Wang, H., Klaser, A., Schmid, C., Liu, C.L.: Action recognition by dense trajectories. In: CVPR (2011)","DOI":"10.1109\/CVPR.2011.5995407"},{"key":"28_CR55","doi-asserted-by":"crossref","unstructured":"Wang, H., Tran, D., Torresani, L., Feiszli, M.: Video modeling with correlation networks. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00043"},{"key":"28_CR56","doi-asserted-by":"crossref","unstructured":"Wang, L., Tong, Z., Ji, B., Wu, G.: TDN: temporal difference networks for efficient action recognition. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00193"},{"key":"28_CR57","doi-asserted-by":"crossref","unstructured":"Wang, L., et al.: Temporal segment networks: towards good practices for deep action recognition. In: ECCV (2016)","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"28_CR58","doi-asserted-by":"crossref","unstructured":"Wang, X., Girshick, R., Gupta, A., He, K.: Non-local neural networks. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00813"},{"key":"28_CR59","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"413","DOI":"10.1007\/978-3-030-01228-1_25","volume-title":"Computer Vision \u2013 ECCV 2018","author":"X Wang","year":"2018","unstructured":"Wang, X., Gupta, A.: Videos as space-time region graphs. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11209, pp. 413\u2013431. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01228-1_25"},{"key":"28_CR60","doi-asserted-by":"crossref","unstructured":"Wang, Z., She, Q., Smolic, A.: ACTION-Net: multipath excitation for action recognition. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01301"},{"key":"28_CR61","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"318","DOI":"10.1007\/978-3-030-01267-0_19","volume-title":"Computer Vision \u2013 ECCV 2018","author":"S Xie","year":"2018","unstructured":"Xie, S., Sun, C., Huang, J., Tu, Z., Murphy, K.: Rethinking spatiotemporal feature learning: speed-accuracy trade-offs in video classification. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11219, pp. 318\u2013335. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01267-0_19"},{"key":"28_CR62","doi-asserted-by":"crossref","unstructured":"Yao, T., Zhang, Y., Qiu, Z., Pan, Y., Mei, T.: SeCo: exploring sequence supervision for unsupervised representation learning. In: AAAI (2021)","DOI":"10.1609\/aaai.v35i12.17274"},{"key":"28_CR63","doi-asserted-by":"crossref","unstructured":"Yuan, L., et al.: Tokens-to-token ViT: training vision transformers from scratch on ImageNet. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"28_CR64","unstructured":"Zhao, Y., Xiong, Y., Lin, D.: Trajectory convolution for action recognition. In: NeurIPS (2018)"},{"key":"28_CR65","doi-asserted-by":"crossref","unstructured":"Zhi, Y., Tong, Z., Wang, L., Wu, G.: MGSampler: an explainable sampling strategy for video action recognition. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00154"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19833-5_28","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,30]],"date-time":"2023-11-30T02:00:58Z","timestamp":1701309658000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19833-5_28"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198328","9783031198335"],"references-count":65,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19833-5_28","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"4 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}