{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T04:43:53Z","timestamp":1774586633253,"version":"3.50.1"},"publisher-location":"Cham","reference-count":58,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783030664145","type":"print"},{"value":"9783030664152","type":"electronic"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-66415-2_53","type":"book-chapter","created":{"date-parts":[[2021,1,9]],"date-time":"2021-01-09T14:56:04Z","timestamp":1610204164000},"page":"756-773","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":34,"title":["Noisy Student Training Using Body Language Dataset Improves Facial Expression Recognition"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4427-142X","authenticated-orcid":false,"given":"Vikas","family":"Kumar","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5148-6296","authenticated-orcid":false,"given":"Shivansh","family":"Rao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7178-8072","authenticated-orcid":false,"given":"Li","family":"Yu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,1,10]]},"reference":[{"key":"53_CR1","doi-asserted-by":"crossref","unstructured":"Dhall, A., Goecke, R., Lucey, S., Gedeon, T.: Collecting large, richly annotated facial-expression databases from movies. IEEE Multimedia (3), 34\u201341 (2012)","DOI":"10.1109\/MMUL.2012.26"},{"key":"53_CR2","doi-asserted-by":"crossref","unstructured":"Fan, Y., Lam, J.C., Li, V.O.: Video-based emotion recognition using deeply-supervised neural networks. In: Proceedings of the 20th ACM International Conference on Multimodal Interaction, pp. 584\u2013588(2018)","DOI":"10.1145\/3242969.3264978"},{"key":"53_CR3","doi-asserted-by":"crossref","unstructured":"Lu, C., et al.: Multiple spatio-temporal feature learning for video-based emotion recognition in the wild. In: Proceedings of the 20th ACM International Conference on Multimodal Interaction, pp. 646\u2013652 (2018)","DOI":"10.1145\/3242969.3264992"},{"key":"53_CR4","doi-asserted-by":"crossref","unstructured":"Vielzeuf, V., Pateux, S., Jurie, F.: Temporal multimodal fusion for video emotion classification in the wild. In: Proceedings of the 19th ACM International Conference on Multimodal Interaction, pp. 569\u2013576 (2017)","DOI":"10.1145\/3136755.3143011"},{"key":"53_CR5","doi-asserted-by":"crossref","unstructured":"Dhall, A.: Emotiw 2019: Automatic emotion, engagement and cohesion prediction tasks. In: 2019 International Conference on Multimodal Interaction, pp. 546\u2013550 (2019)","DOI":"10.1145\/3340555.3355710"},{"key":"53_CR6","doi-asserted-by":"crossref","unstructured":"Littlewort, G., Bartlett, M.S., Fasel, I., Susskind, J., Movellan, J.: Dynamics of facial expression extracted automatically from video. In: 2004 Conference on Computer Vision and Pattern Recognition Workshop, p. 80. IEEE (2004)","DOI":"10.1109\/CVPR.2004.327"},{"issue":"6","key":"53_CR7","doi-asserted-by":"publisher","first-page":"803","DOI":"10.1016\/j.imavis.2008.08.005","volume":"27","author":"C Shan","year":"2009","unstructured":"Shan, C., Gong, S., McOwan, P.W.: Facial expression recognition based on local binary patterns: a comprehensive study. Image Vis. Comput. 27(6), 803\u2013816 (2009)","journal-title":"Image Vis. Comput."},{"key":"53_CR8","unstructured":"Knyazev, B., Shvetsov, R., Efremova, N., Kuharenko, A.: Convolutional neural networks pretrained on large face recognition datasets for emotion classification from video. arXiv preprint arXiv:1711.04598 (2017)"},{"key":"53_CR9","unstructured":"Tang, Y.: Deep learning using linear support vector machines. arXiv preprint arXiv:1306.0239 (2013)"},{"key":"53_CR10","doi-asserted-by":"crossref","unstructured":"Meng, D., Peng, X., Wang, K., Qiao, Y.: frame attention networks for facial expression recognition in videos. In: 2019 IEEE International Conference on Image Processing (ICIP), pp. 3866\u20133870. IEEE (2019)","DOI":"10.1109\/ICIP.2019.8803603"},{"issue":"1","key":"53_CR11","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s11263-019-01215-y","volume":"128","author":"Y Luo","year":"2020","unstructured":"Luo, Y., Ye, J., Adams, R.B., Li, J., Newman, M.G., Wang, J.Z.: Arbee: towards automated recognition of bodily expression of emotion in the wild. Int. J. Comput. Vis. 128(1), 1\u201325 (2020)","journal-title":"Int. J. Comput. Vis."},{"key":"53_CR12","doi-asserted-by":"crossref","unstructured":"Xie, Q., Hovy, E., Luong, M.T., Le, Q.V.: Self-training with noisy student improves imagenet classification. arXiv preprint arXiv:1911.04252 (2019)","DOI":"10.1109\/CVPR42600.2020.01070"},{"key":"53_CR13","doi-asserted-by":"crossref","unstructured":"Lucey, P., Cohn, J.F., Kanade, T., Saragih, J., Ambadar, Z., Matthews, I.: The extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression. In: IEEE Computer Society Conference on Computer Vision and Pattern Recognition-Workshops, vol. 2010, pp. 94\u2013101. IEEE (2010)","DOI":"10.1109\/CVPRW.2010.5543262"},{"key":"53_CR14","doi-asserted-by":"crossref","unstructured":"Sikka, K., Dykstra, K., Sathyanarayana, S., Littlewort, G., Bartlett, M.: Multiple kernel learning for emotion recognition in the wild. In: Proceedings of the 15th ACM on International conference on multimodal interaction, pp. 517\u2013524 (2013)","DOI":"10.1145\/2522848.2531741"},{"key":"53_CR15","doi-asserted-by":"crossref","unstructured":"Liu, M., Wang, R., Huang, Z., Shan, S., Chen, X.: Partial least squares regression on grassmannian manifold for emotion recognition. In: Proceedings of the 15th ACM on International conference on multimodal interaction, pp. 525\u2013530 (2013)","DOI":"10.1145\/2522848.2531738"},{"key":"53_CR16","doi-asserted-by":"crossref","unstructured":"Chen, J., Chen, Z., Chi, Z., Fu, H.: Emotion recognition in the wild with feature fusion and multiple kernel learning. In: Proceedings of the 16th International Conference on Multimodal Interaction, pp. 508\u2013513 (2014)","DOI":"10.1145\/2663204.2666277"},{"key":"53_CR17","doi-asserted-by":"crossref","unstructured":"Liu, C., Tang, T., Lv, K., Wang, M.: Multi-feature based emotion recognition for video clips. In: Proceedings of the 20th ACM International Conference on Multimodal Interaction, pp. 630\u2013634 (2018)","DOI":"10.1145\/3242969.3264989"},{"key":"53_CR18","doi-asserted-by":"crossref","unstructured":"Fan, Y., Lu, X., Li, D., Liu, Y.: Video-based emotion recognition using cnn-rnn and c3d hybrid networks. In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 445\u2013450 (2016)","DOI":"10.1145\/2993148.2997632"},{"key":"53_CR19","doi-asserted-by":"crossref","unstructured":"Tran, D., Bourdev, L., Fergus, R., Torresani, L., Paluri, M.: Learning spatiotemporal features with 3D convolutional networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 4489\u20134497 (2015)","DOI":"10.1109\/ICCV.2015.510"},{"issue":"8","key":"53_CR20","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural Comput. 9(8), 1735\u20131780 (1997)","journal-title":"Neural Comput."},{"key":"53_CR21","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"323","DOI":"10.1007\/978-3-030-27202-9_29","volume-title":"Image Analysis and Recognition","author":"M Aminbeidokhti","year":"2019","unstructured":"Aminbeidokhti, M., Pedersoli, M., Cardinal, P., Granger, E.: Emotion recognition with spatial attention and temporal softmax pooling. In: Karray, F., Campilho, A., Yu, A. (eds.) ICIAR 2019. LNCS, vol. 11662, pp. 323\u2013331. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-27202-9_29"},{"issue":"9","key":"53_CR22","doi-asserted-by":"publisher","first-page":"e0222713","DOI":"10.1371\/journal.pone.0222713","volume":"14","author":"Y Fang","year":"2019","unstructured":"Fang, Y., Gao, J., Huang, C., Peng, H., Wu, R.: Self multi-head attention-based convolutional neural networks for fake news detection. PloS one 14(9), e0222713 (2019)","journal-title":"PloS one"},{"key":"53_CR23","unstructured":"Lin, Z., et al.: A structured self-attentive sentence embedding. arXiv preprint arXiv:1703.03130 (2017)"},{"key":"53_CR24","doi-asserted-by":"publisher","first-page":"4057","DOI":"10.1109\/TIP.2019.2956143","volume":"29","author":"K Wang","year":"2020","unstructured":"Wang, K., Peng, X., Yang, J., Meng, D., Qiao, Y.: Region attention networks for pose and occlusion robust facial expression recognition. IEEE Trans. Image Process. 29, 4057\u20134069 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"53_CR25","doi-asserted-by":"publisher","first-page":"2015","DOI":"10.3389\/fpsyg.2018.02015","volume":"9","author":"X Zeng","year":"2018","unstructured":"Zeng, X., Wu, Q., Zhang, S., Liu, Z., Zhou, Q., Zhang, M.: A false trail to follow: differential effects of the facial feedback signals from the upper and lower face on the recognition of micro-expressions. Front. Psychol. 9, 2015 (2018)","journal-title":"Front. Psychol."},{"key":"53_CR26","doi-asserted-by":"crossref","unstructured":"Acharya, D., Huang, Z., Pani Paudel, D., Van Gool, L.: Covariance pooling for facial expression recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 367\u2013374 (2018)","DOI":"10.1109\/CVPRW.2018.00077"},{"key":"53_CR27","unstructured":"Valstar, M., Pantic, M.: Induced disgust, happiness and surprise: an addition to the mmi facial expression database. In: Proceedings of 3rd International Workshop on EMOTION (satellite of LREC): Corpora for Research on Emotion and Affect, Paris, France, p. 65 (2010)"},{"key":"53_CR28","unstructured":"Lyons, M.J., Akamatsu, S., Kamachi, M., Gyoba, J., Budynek, J.: The Japanese female facial expression (jaffe) database. In: Proceedings of Third International Conference on Automatic Face and Gesture Recognition, pp. 14\u201316 (1998)"},{"key":"53_CR29","unstructured":"Kollias, D., Zafeiriou, S.: Aff-wild2: extending the aff-wild database for affect recognition. arXiv preprint arXiv:1811.07770 (2018)"},{"key":"53_CR30","doi-asserted-by":"crossref","unstructured":"Yarowsky, D.: Unsupervised word sense disambiguation rivaling supervised methods. In: 33rd Annual Meeting of the Association for Computational Linguistics, pp. 189\u2013196 (1995)","DOI":"10.3115\/981658.981684"},{"key":"53_CR31","unstructured":"Riloff, E.: Automatically generating extraction patterns from untagged text. In: Proceedings of the National Conference on Artificial Intelligence, pp. 1044\u20131049 (1996)"},{"key":"53_CR32","doi-asserted-by":"crossref","unstructured":"Radosavovic, I., Doll\u00e1r, P., Girshick, R., Gkioxari, G., He, K.: Data distillation: Towards omni-supervised learning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4119\u20134128 (2018)","DOI":"10.1109\/CVPR.2018.00433"},{"key":"53_CR33","unstructured":"Bachman, P., Alsharif, O., Precup, D.: Learning with pseudo-ensembles. In: Advances in Neural Information Processing Systems, pp. 3365\u20133373 (2014)"},{"key":"53_CR34","unstructured":"Rasmus, A., Berglund, M., Honkala, M., Valpola, H., Raiko, T.: Semi-supervised learning with ladder networks. In: Advances in Neural Information Processing Systems, pp. 3546\u20133554 (2015)"},{"issue":"10","key":"53_CR35","doi-asserted-by":"publisher","first-page":"1499","DOI":"10.1109\/LSP.2016.2603342","volume":"23","author":"K Zhang","year":"2016","unstructured":"Zhang, K., Zhang, Z., Li, Z., Qiao, Y.: Joint face detection and alignment using multitask cascaded convolutional networks. IEEE Signal Process. Lett. 23(10), 1499\u20131503 (2016)","journal-title":"IEEE Signal Process. Lett."},{"key":"53_CR36","unstructured":"Jiang, Y., et al.: Enlightengan: Deep light enhancement without paired supervision. arXiv preprint arXiv:1906.06972 (2019)"},{"issue":"9","key":"53_CR37","doi-asserted-by":"publisher","first-page":"4193","DOI":"10.1109\/TIP.2017.2689999","volume":"26","author":"K Zhang","year":"2017","unstructured":"Zhang, K., Huang, Y., Du, Y., Wang, L.: Facial expression recognition based on deep evolutional spatial-temporal networks. IEEE Trans. Image Process. 26(9), 4193\u20134203 (2017)","journal-title":"IEEE Trans. Image Process."},{"key":"53_CR38","doi-asserted-by":"crossref","unstructured":"Jung, H., Lee, S., Yim, J., Park, S., Kim, J.: Joint fine-tuning in deep neural networks for facial expression recognition. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2983\u20132991 (2015)","DOI":"10.1109\/ICCV.2015.341"},{"key":"53_CR39","doi-asserted-by":"crossref","unstructured":"Cai, J., Meng, Z., Khan, A.S., Li, Z., O\u2019Reilly, J., Tong, Y.: Island loss for learning discriminative features in facial expression recognition. In: 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pp. 302\u2013309. IEEE (2018)","DOI":"10.1109\/FG.2018.00051"},{"key":"53_CR40","doi-asserted-by":"crossref","unstructured":"Sikka, K., Sharma, G., Bartlett, M.: Lomo: latent ordinal model for facial analysis in videos. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5580\u20135589 (2016)","DOI":"10.1109\/CVPR.2016.602"},{"key":"53_CR41","doi-asserted-by":"crossref","unstructured":"Gu, C., et al.: Ava: a video dataset of spatio-temporally localized atomic visual actions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6047\u20136056 (2018)","DOI":"10.1109\/CVPR.2018.00633"},{"issue":"Jul","key":"53_CR42","first-page":"1755","volume":"10","author":"DE King","year":"2009","unstructured":"King, D.E.: Dlib-ml: a machine learning toolkit. J. Mach. Learn. Res. 10(Jul), 1755\u20131758 (2009)","journal-title":"J. Mach. Learn. Res."},{"key":"53_CR43","unstructured":"Anila, S., Devarajan, N.: Preprocessing technique for face recognition applications under varying illumination conditions. Glob. J. Comput. Sci. Technol. (2012)"},{"issue":"4","key":"53_CR44","doi-asserted-by":"publisher","first-page":"712","DOI":"10.3390\/s17040712","volume":"17","author":"Y Liu","year":"2017","unstructured":"Liu, Y., Li, Y., Ma, X., Song, R.: Facial expression recognition with fusion features extracted from salient facial areas. Sensors 17(4), 712 (2017)","journal-title":"Sensors"},{"issue":"6","key":"53_CR45","first-page":"429","volume":"7","author":"S Wang","year":"2012","unstructured":"Wang, S., Li, W., Wang, Y., Jiang, Y., Jiang, S., Zhao, R.: An improved difference of gaussian filter in face recognition. J. Multimedia 7(6), 429\u2013433 (2012)","journal-title":"J. Multimedia"},{"issue":"3","key":"53_CR46","doi-asserted-by":"publisher","first-page":"324","DOI":"10.3390\/electronics8030324","volume":"8","author":"RI Bendjillali","year":"2019","unstructured":"Bendjillali, R.I., Beladgham, M., Merit, K., Taleb-Ahmed, A.: Improved facial expression recognition based on dwt feature for deep CNN. Electronics 8(3), 324 (2019)","journal-title":"Electronics"},{"issue":"2","key":"53_CR47","doi-asserted-by":"publisher","first-page":"197","DOI":"10.1007\/s10015-007-0428-x","volume":"11","author":"M Karthigayan","year":"2007","unstructured":"Karthigayan, M., et al.: Development of a personified face emotion recognition technique using fitness function. Artif. Life Rob. 11(2), 197\u2013203 (2007)","journal-title":"Artif. Life Rob."},{"key":"53_CR48","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"234","DOI":"10.1007\/978-3-319-24574-4_28","volume-title":"Medical Image Computing and Computer-Assisted Intervention \u2013 MICCAI 2015","author":"O Ronneberger","year":"2015","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-Net: convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W.M., Frangi, A.F. (eds.) MICCAI 2015. LNCS, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28"},{"key":"53_CR49","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"53_CR50","doi-asserted-by":"crossref","unstructured":"Barsoum, E., Zhang, C., Ferrer, C.C., Zhang, Z.: Training deep networks for facial expression recognition with crowd-sourced label distribution. In: Proceedings of the 18th ACM International Conference on Multimodal Interaction, pp. 279\u2013283 (2016)","DOI":"10.1145\/2993148.2993165"},{"key":"53_CR51","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. In: Advances in Neural Information Processing Systems, pp. 1097\u20131105 (2012)"},{"key":"53_CR52","doi-asserted-by":"crossref","unstructured":"Cubuk, E.D., Zoph, B., Shlens, J., Le, Q.V.: Randaugment: Practical data augmentation with no separate search. arXiv preprint arXiv:1909.13719 (2019)","DOI":"10.1109\/CVPRW50498.2020.00359"},{"key":"53_CR53","doi-asserted-by":"publisher","first-page":"27","DOI":"10.1016\/j.neucom.2018.03.068","volume":"309","author":"J Yan","year":"2018","unstructured":"Yan, J., Zheng, W., Cui, Z., Tang, C., Zhang, T., Zong, Y.: Multi-cue fusion for emotion recognition in the wild. Neurocomputing 309, 27\u201335 (2018)","journal-title":"Neurocomputing"},{"key":"53_CR54","doi-asserted-by":"crossref","unstructured":"Hu, P., Cai, D., Wang, S., Yao, A., Chen, Y.: Learning supervised scoring ensemble for emotion recognition in the wild. In: Proceedings of the 19th ACM International Conference on Multimodal Interaction, pp. 553\u2013560 (2017)","DOI":"10.1145\/3136755.3143009"},{"key":"53_CR55","doi-asserted-by":"publisher","first-page":"41273","DOI":"10.1109\/ACCESS.2019.2907327","volume":"7","author":"JH Kim","year":"2019","unstructured":"Kim, J.H., Kim, B.G., Roy, P.P., Jeong, D.M.: Efficient facial expression recognition algorithm based on hierarchical deep neural network structure. IEEE Access 7, 41273\u201341285 (2019)","journal-title":"IEEE Access"},{"key":"53_CR56","doi-asserted-by":"crossref","unstructured":"Vielzeuf, V., Kervadec, C., Pateux, S., Lechervy, A., Jurie, F.: An occam\u2019s razor view on learning audiovisual emotion recognition with small training sets. In: Proceedings of the 20th ACM International Conference on Multimodal Interaction, pp. 589\u2013593 (2018)","DOI":"10.1145\/3242969.3264980"},{"key":"53_CR57","doi-asserted-by":"publisher","first-page":"49","DOI":"10.1016\/j.patrec.2017.10.022","volume":"119","author":"N Sun","year":"2019","unstructured":"Sun, N., Li, Q., Huan, R., Liu, J., Han, G.: Deep spatial-temporal feature fusion for facial expression recognition in static images. Pattern Recogn. Lett. 119, 49\u201361 (2019)","journal-title":"Pattern Recogn. Lett."},{"key":"53_CR58","doi-asserted-by":"crossref","unstructured":"Kuo, C.M., Lai, S.H., Sarkis, M.: A compact deep learning model for robust facial expression recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 2121\u20132129 (2018)","DOI":"10.1109\/CVPRW.2018.00286"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2020 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-66415-2_53","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,9]],"date-time":"2025-01-09T00:12:08Z","timestamp":1736381528000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-66415-2_53"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030664145","9783030664152"],"references-count":58,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-66415-2_53","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"10 January 2021","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Glasgow","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"United Kingdom","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 August 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 August 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2020.eu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"OpenReview","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5025","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1360","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"27% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"7","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"The conference was held virtually due to the COVID-19 pandemic. From the ECCV Workshops 249 full papers, 18 short papers, and 21 further contributions were published out of a total of 467 submissions.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}