{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T17:19:33Z","timestamp":1774718373446,"version":"3.50.1"},"publisher-location":"Cham","reference-count":45,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031200670","type":"print"},{"value":"9783031200687","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-20068-7_31","type":"book-chapter","created":{"date-parts":[[2022,11,10]],"date-time":"2022-11-10T08:06:38Z","timestamp":1668067598000},"page":"538-554","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":197,"title":["FAST-VQA: Efficient End-to-End Video Quality Assessment with\u00a0Fragment Sampling"],"prefix":"10.1007","author":[{"given":"Haoning","family":"Wu","sequence":"first","affiliation":[]},{"given":"Chaofeng","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Jingwen","family":"Hou","sequence":"additional","affiliation":[]},{"given":"Liang","family":"Liao","sequence":"additional","affiliation":[]},{"given":"Annan","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Wenxiu","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Qiong","family":"Yan","sequence":"additional","affiliation":[]},{"given":"Weisi","family":"Lin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,11]]},"reference":[{"key":"31_CR1","doi-asserted-by":"crossref","unstructured":"Arnab, A., Dehghani, M., Heigold, G., Sun, C., Lucic, M., Schmid, C.: ViViT: a video vision transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 6836\u20136846, October 2021","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"31_CR2","doi-asserted-by":"crossref","unstructured":"Caba Heilbron, F., Escorcia, V., Ghanem, B., Carlos Niebles, J.: ActivityNet: a large-scale video benchmark for human activity understanding. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2015","DOI":"10.1109\/CVPR.2015.7298698"},{"issue":"4","key":"31_CR3","doi-asserted-by":"publisher","first-page":"1903","DOI":"10.1109\/TCSVT.2021.3088505","volume":"32","author":"B Chen","year":"2021","unstructured":"Chen, B., Zhu, L., Li, G., Lu, F., Fan, H., Wang, S.: Learning generalized spatial-temporal deep feature representation for no-reference video quality assessment. IEEE Trans. Circ. Syst. Video Technol. 32(4), 1903\u20131916 (2021)","journal-title":"IEEE Trans. Circ. Syst. Video Technol."},{"key":"31_CR4","doi-asserted-by":"crossref","unstructured":"Cho, K., et al.: Learning phrase representations using RNN encoder-decoder for statistical machine translation. In: Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 1724\u20131734. ACL (2014)","DOI":"10.3115\/v1\/D14-1179"},{"key":"31_CR5","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 248\u2013255 (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"31_CR6","doi-asserted-by":"crossref","unstructured":"Fan, H., et al.: Multiscale vision transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 6824\u20136835, October 2021","DOI":"10.1109\/ICCV48922.2021.00675"},{"issue":"9","key":"31_CR7","doi-asserted-by":"publisher","first-page":"2061","DOI":"10.1109\/TCSVT.2017.2707479","volume":"28","author":"D Ghadiyaram","year":"2018","unstructured":"Ghadiyaram, D., Pan, J., Bovik, A.C., Moorthy, A.K., Panda, P., Yang, K.C.: In-capture mobile video distortions: a study of subjective behavior and objective algorithms. IEEE Trans. Circ. Syst. Video Technol. 28(9), 2061\u20132077 (2018)","journal-title":"IEEE Trans. Circ. Syst. Video Technol."},{"key":"31_CR8","doi-asserted-by":"publisher","first-page":"72139","DOI":"10.1109\/ACCESS.2021.3077642","volume":"9","author":"F G\u00f6tz-Hahn","year":"2021","unstructured":"G\u00f6tz-Hahn, F., Hosu, V., Lin, H., Saupe, D.: KonVid-150k: a dataset for no-reference video quality assessment of videos in-the-wild. IEEE Access 9, 72139\u201372160 (2021)","journal-title":"IEEE Access"},{"key":"31_CR9","doi-asserted-by":"crossref","unstructured":"Gu, C., et al.: AVA: a video dataset of spatio-temporally localized atomic visual actions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2018","DOI":"10.1109\/CVPR.2018.00633"},{"key":"31_CR10","doi-asserted-by":"crossref","unstructured":"Hara, K., Kataoka, H., Satoh, Y.: Learning spatio-temporal features with 3D residual networks for action recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) Workshops, pp. 3154\u20133160 (2017)","DOI":"10.1109\/ICCVW.2017.373"},{"key":"31_CR11","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"31_CR12","doi-asserted-by":"crossref","unstructured":"Hosu, V., et al.: The Konstanz natural video database (KoNViD-1k). In: Ninth International Conference on Quality of Multimedia Experience (QoMEX), pp. 1\u20136 (2017)","DOI":"10.1109\/QoMEX.2017.7965673"},{"key":"31_CR13","doi-asserted-by":"publisher","first-page":"4041","DOI":"10.1109\/TIP.2020.2967829","volume":"29","author":"V Hosu","year":"2020","unstructured":"Hosu, V., Lin, H., Sziranyi, T., Saupe, D.: KonIQ-10k: an ecologically valid database for deep learning of blind image quality assessment. IEEE Trans. Image Process. 29, 4041\u20134056 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"31_CR14","doi-asserted-by":"crossref","unstructured":"Kang, L., Ye, P., Li, Y., Doermann, D.: Convolutional neural networks for no-reference image quality assessment. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2014)","DOI":"10.1109\/CVPR.2014.224"},{"key":"31_CR15","doi-asserted-by":"crossref","unstructured":"Kang, L., Ye, P., Li, Y., Doermann, D.: Simultaneous estimation of image quality and distortion via multi-task convolutional neural networks. In: IEEE International Conference on Image Processing (ICIP) (2015)","DOI":"10.1109\/ICIP.2015.7351311"},{"key":"31_CR16","unstructured":"Kay, W., et al.: The kinetics human action video dataset. ArXiv abs\/1705.06950 (2017)"},{"key":"31_CR17","doi-asserted-by":"crossref","unstructured":"Ke, J., Wang, Q., Wang, Y., Milanfar, P., Yang, F.: MUSIQ: multi-scale image quality transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 5148\u20135157, October 2021","DOI":"10.1109\/ICCV48922.2021.00510"},{"key":"31_CR18","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"224","DOI":"10.1007\/978-3-030-01246-5_14","volume-title":"Computer Vision \u2013 ECCV 2018","author":"W Kim","year":"2018","unstructured":"Kim, W., Kim, J., Ahn, S., Kim, J., Lee, S.: Deep video quality assessor: from spatio-temporal visual sensitivity to a convolutional neural aggregation network. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11205, pp. 224\u2013241. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01246-5_14"},{"key":"31_CR19","unstructured":"Kolesnikov, A., et al.: An image is worth $$16\\times 16$$ words: transformers for image recognition at scale (2021)"},{"issue":"12","key":"31_CR20","doi-asserted-by":"publisher","first-page":"5923","DOI":"10.1109\/TIP.2019.2923051","volume":"28","author":"J Korhonen","year":"2019","unstructured":"Korhonen, J.: Two-level approach for no-reference consumer video quality assessment. IEEE Trans. Image Process. 28(12), 5923\u20135938 (2019)","journal-title":"IEEE Trans. Image Process."},{"key":"31_CR21","doi-asserted-by":"crossref","unstructured":"Korhonen, J., Su, Y., You, J.: Blind natural video quality prediction via statistical temporal features and deep spatial features. In: Proceedings of the 28th ACM International Conference on Multimedia, MM 2020, pp. 3311\u20133319. Association for Computing Machinery, New York (2020)","DOI":"10.1145\/3394171.3413845"},{"issue":"9","key":"31_CR22","doi-asserted-by":"publisher","first-page":"5944","DOI":"10.1109\/TCSVT.2022.3164467","volume":"32","author":"B Li","year":"2022","unstructured":"Li, B., Zhang, W., Tian, M., Zhai, G., Wang, X.: Blindly assess quality of in-the-wild videos via quality-aware pre-training and motion perception. IEEE Trans. Circ. Syst. Video Technol. 32(9), 5944\u20135958 (2022)","journal-title":"IEEE Trans. Circ. Syst. Video Technol."},{"key":"31_CR23","doi-asserted-by":"crossref","unstructured":"Li, D., Jiang, T., Jiang, M.: Quality assessment of in-the-wild videos. In: Proceedings of the 27th ACM International Conference on Multimedia, MM 2019, pp. 2351\u20132359. Association for Computing Machinery, New York (2019)","DOI":"10.1145\/3343031.3351028"},{"issue":"4","key":"31_CR24","doi-asserted-by":"publisher","first-page":"1238","DOI":"10.1007\/s11263-020-01408-w","volume":"129","author":"D Li","year":"2021","unstructured":"Li, D., Jiang, T., Jiang, M.: Unified quality assessment of in-the-wild videos with mixed datasets training. Int. J. Comput. Vis. 129(4), 1238\u20131257 (2021). https:\/\/doi.org\/10.1007\/s11263-020-01408-w","journal-title":"Int. J. Comput. Vis."},{"issue":"5","key":"31_CR25","doi-asserted-by":"publisher","first-page":"1221","DOI":"10.1109\/TMM.2018.2875354","volume":"21","author":"D Li","year":"2019","unstructured":"Li, D., Jiang, T., Lin, W., Jiang, M.: Which has better visual quality: the clear blue sky or a blurry animal? IEEE Trans. Multimedia 21(5), 1221\u20131234 (2019)","journal-title":"IEEE Trans. Multimedia"},{"key":"31_CR26","doi-asserted-by":"crossref","unstructured":"Liao, L., et al.: Exploring the effectiveness of video perceptual representation in blind video quality assessment. In: Proceedings of the 30th ACM International Conference on Multimedia (ACM MM) (2022)","DOI":"10.1145\/3503161.3547849"},{"key":"31_CR27","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. arXiv preprint arXiv:2103.14030 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"31_CR28","unstructured":"Liu, Z., et al.: Video swin transformer. arXiv preprint arXiv:2106.13230 (2021)"},{"issue":"12","key":"31_CR29","doi-asserted-by":"publisher","first-page":"4695","DOI":"10.1109\/TIP.2012.2214050","volume":"21","author":"A Mittal","year":"2012","unstructured":"Mittal, A., Moorthy, A.K., Bovik, A.C.: No-reference image quality assessment in the spatial domain. IEEE Trans. Image Process. 21(12), 4695\u20134708 (2012)","journal-title":"IEEE Trans. Image Process."},{"issue":"1","key":"31_CR30","doi-asserted-by":"publisher","first-page":"289","DOI":"10.1109\/TIP.2015.2502725","volume":"25","author":"A Mittal","year":"2016","unstructured":"Mittal, A., Saad, M.A., Bovik, A.C.: A completely blind video integrity oracle. IEEE Trans. Image Process. 25(1), 289\u2013300 (2016)","journal-title":"IEEE Trans. Image Process."},{"issue":"7","key":"31_CR31","doi-asserted-by":"publisher","first-page":"3073","DOI":"10.1109\/TIP.2016.2562513","volume":"25","author":"M Nuutinen","year":"2016","unstructured":"Nuutinen, M., Virtanen, T., Vaahteranoksa, M., Vuori, T., Oittinen, P., H\u00e4kkinen, J.: CVD2014\u2013a database for evaluating no-reference video quality assessment algorithms. IEEE Trans. Image Process. 25(7), 3073\u20133086 (2016)","journal-title":"IEEE Trans. Image Process."},{"issue":"8","key":"31_CR32","doi-asserted-by":"publisher","first-page":"3339","DOI":"10.1109\/TIP.2012.2191563","volume":"21","author":"MA Saad","year":"2012","unstructured":"Saad, M.A., Bovik, A.C., Charrier, C.: Blind image quality assessment: a natural scene statistics approach in the DCT domain. IEEE Trans. Image Process. 21(8), 3339\u20133352 (2012)","journal-title":"IEEE Trans. Image Process."},{"issue":"2","key":"31_CR33","doi-asserted-by":"publisher","first-page":"612","DOI":"10.1109\/TIP.2018.2869673","volume":"28","author":"Z Sinno","year":"2019","unstructured":"Sinno, Z., Bovik, A.C.: Large-scale study of perceptual video quality. IEEE Trans. Image Process. 28(2), 612\u2013627 (2019)","journal-title":"IEEE Trans. Image Process."},{"key":"31_CR34","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Ioffe, S., Vanhoucke, V., Alemi, A.A.: Inception-v4, inception-ResNet and the impact of residual connections on learning. In: Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence, AAAI 2017, pp. 4278\u20134284. AAAI Press (2017)","DOI":"10.1609\/aaai.v31i1.11231"},{"key":"31_CR35","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u2019egou, H.: Training data-efficient image transformers & distillation through attention. In: Proceedings of the International Conference on Machine Learning (ICML) (2021)"},{"key":"31_CR36","doi-asserted-by":"publisher","first-page":"4449","DOI":"10.1109\/TIP.2021.3072221","volume":"30","author":"Z Tu","year":"2021","unstructured":"Tu, Z., Wang, Y., Birkbeck, N., Adsumilli, B., Bovik, A.C.: UGC-VQA: benchmarking blind video quality assessment for user generated content. IEEE Trans. Image Process. 30, 4449\u20134464 (2021)","journal-title":"IEEE Trans. Image Process."},{"key":"31_CR37","doi-asserted-by":"publisher","first-page":"425","DOI":"10.1109\/OJSP.2021.3090333","volume":"2","author":"Z Tu","year":"2021","unstructured":"Tu, Z., Yu, X., Wang, Y., Birkbeck, N., Adsumilli, B., Bovik, A.C.: RAPIQUE: rapid and accurate video quality prediction of user generated content. IEEE Open J. Sig. Process. 2, 425\u2013440 (2021)","journal-title":"IEEE Open J. Sig. Process."},{"key":"31_CR38","doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: Rich features for perceptual quality assessment of UGC videos. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 13435\u201313444, June 2021","DOI":"10.1109\/CVPR46437.2021.01323"},{"key":"31_CR39","unstructured":"Wu, H., et al.: DisCoVQA: temporal distortion-content transformers for video quality assessment. arXiv preprint arXiv: 2206.09853 (2022)"},{"key":"31_CR40","doi-asserted-by":"crossref","unstructured":"Yim, J.G., Wang, Y., Birkbeck, N., Adsumilli, B.: Subjective quality assessment for YouTube UGC dataset. In: 2020 IEEE International Conference on Image Processing (ICIP), pp. 131\u2013135 (2020)","DOI":"10.1109\/ICIP40778.2020.9191194"},{"key":"31_CR41","doi-asserted-by":"crossref","unstructured":"Ying, Z.A., Niu, H., Gupta, P., Mahajan, D., Ghadiyaram, D., Bovik, A.: From patches to pictures (PaQ-2-PiQ): mapping the perceptual space of picture quality. arXiv preprint arXiv:1912.10088 (2019)","DOI":"10.1109\/CVPR42600.2020.00363"},{"key":"31_CR42","doi-asserted-by":"crossref","unstructured":"Ying, Z., Mandal, M., Ghadiyaram, D., Bovik, A.: Patch-VQ: \u2018patching up\u2019 the video quality problem. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 14019\u201314029, June 2021","DOI":"10.1109\/CVPR46437.2021.01380"},{"key":"31_CR43","doi-asserted-by":"crossref","unstructured":"You, J.: Long short-term convolutional transformer for no-reference video quality assessment. In: Proceedings of the 29th ACM International Conference on Multimedia, MM 2021, pp. 2112\u20132120. Association for Computing Machinery, New York (2021)","DOI":"10.1145\/3474085.3475368"},{"key":"31_CR44","doi-asserted-by":"crossref","unstructured":"You, J., Korhonen, J.: Deep neural networks for no-reference video quality assessment. In: Proceedings of the IEEE International Conference on Image Processing (ICIP), pp. 2349\u20132353 (2019)","DOI":"10.1109\/ICIP.2019.8803395"},{"issue":"1","key":"31_CR45","doi-asserted-by":"publisher","first-page":"36","DOI":"10.1109\/TCSVT.2018.2886771","volume":"30","author":"W Zhang","year":"2020","unstructured":"Zhang, W., Ma, K., Yan, J., Deng, D., Wang, Z.: Blind image quality assessment using a deep bilinear convolutional neural network. IEEE Trans. Circ. Syst. Video Technol. 30(1), 36\u201347 (2020)","journal-title":"IEEE Trans. Circ. Syst. Video Technol."}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-20068-7_31","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,10]],"date-time":"2022-11-10T08:21:00Z","timestamp":1668068460000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-20068-7_31"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031200670","9783031200687"],"references-count":45,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-20068-7_31","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"11 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}