{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:15:23Z","timestamp":1775578523480,"version":"3.50.1"},"publisher-location":"Cham","reference-count":86,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031197772","type":"print"},{"value":"9783031197789","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19778-9_7","type":"book-chapter","created":{"date-parts":[[2022,11,2]],"date-time":"2022-11-02T20:28:41Z","timestamp":1667420921000},"page":"107-125","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":24,"title":["Pre-training Strategies and\u00a0Datasets for\u00a0Facial Representation Learning"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3185-4979","authenticated-orcid":false,"given":"Adrian","family":"Bulat","sequence":"first","affiliation":[]},{"given":"Shiyang","family":"Cheng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8794-4842","authenticated-orcid":false,"given":"Jing","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Andrew","family":"Garbett","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0196-922X","authenticated-orcid":false,"given":"Enrique","family":"Sanchez","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1803-5338","authenticated-orcid":false,"given":"Georgios","family":"Tzimiropoulos","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,3]]},"reference":[{"key":"7_CR1","doi-asserted-by":"crossref","unstructured":"Browatzki, B., Wallraven, C.: 3FabRec: fast few-shot face alignment by reconstruction. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6110\u20136120 (2020)","DOI":"10.1109\/CVPR42600.2020.00615"},{"key":"7_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"616","DOI":"10.1007\/978-3-319-48881-3_43","volume-title":"Computer Vision \u2013 ECCV 2016 Workshops","author":"A Bulat","year":"2016","unstructured":"Bulat, A., Tzimiropoulos, G.: Two-stage convolutional part heatmap regression for the 1st 3D face alignment in the wild (3DFAW) challenge. In: Hua, G., J\u00e9gou, H. (eds.) ECCV 2016. LNCS, vol. 9914, pp. 616\u2013624. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-48881-3_43"},{"key":"7_CR3","doi-asserted-by":"crossref","unstructured":"Bulat, A., Tzimiropoulos, G.: How far are we from solving the 2D & 3D face alignment problem? (and a dataset of 230,000 3D facial landmarks). In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1021\u20131030 (2017)","DOI":"10.1109\/ICCV.2017.116"},{"key":"7_CR4","doi-asserted-by":"crossref","unstructured":"Burgos-Artizzu, X.P., Perona, P., Doll\u00e1r, P.: Robust face landmark estimation under occlusion. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1513\u20131520 (2013)","DOI":"10.1109\/ICCV.2013.191"},{"issue":"3","key":"7_CR5","doi-asserted-by":"publisher","first-page":"243","DOI":"10.1111\/1467-9280.00144","volume":"10","author":"AM Burton","year":"1999","unstructured":"Burton, A.M., Wilson, S., Cowan, M., Bruce, V.: Face recognition in poor-quality video: evidence from security surveillance. Psychol. Sci. 10(3), 243\u2013248 (1999)","journal-title":"Psychol. Sci."},{"key":"7_CR6","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"354","DOI":"10.1007\/978-3-319-46493-0_22","volume-title":"Computer Vision \u2013 ECCV 2016","author":"Z Cai","year":"2016","unstructured":"Cai, Z., Fan, Q., Feris, R.S., Vasconcelos, N.: A unified multi-scale deep convolutional neural network for fast object detection. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 354\u2013370. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_22"},{"key":"7_CR7","doi-asserted-by":"crossref","unstructured":"Cao, Q., Shen, L., Xie, W., Parkhi, O.M., Zisserman, A.: VGGFace2: a dataset for recognising faces across pose and age. In: 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pp. 67\u201374. IEEE (2018)","DOI":"10.1109\/FG.2018.00020"},{"key":"7_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1007\/978-3-030-01264-9_9","volume-title":"Computer Vision \u2013 ECCV 2018","author":"M Caron","year":"2018","unstructured":"Caron, M., Bojanowski, P., Joulin, A., Douze, M.: Deep clustering for unsupervised learning of visual features. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) Computer Vision \u2013 ECCV 2018. LNCS, vol. 11218, pp. 139\u2013156. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01264-9_9"},{"key":"7_CR9","unstructured":"Caron, M., Misra, I., Mairal, J., Goyal, P., Bojanowski, P., Joulin, A.: Unsupervised learning of visual features by contrasting cluster assignments. arXiv preprint arXiv:2006.09882 (2020)"},{"key":"7_CR10","unstructured":"Chen, L.C., Papandreou, G., Kokkinos, I., Murphy, K., Yuille, A.L.: Semantic image segmentation with deep convolutional nets and fully connected CRFs. arXiv preprint arXiv:1412.7062 (2014)"},{"key":"7_CR11","unstructured":"Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations. arXiv (2020)"},{"key":"7_CR12","unstructured":"Chen, T., Kornblith, S., Swersky, K., Norouzi, M., Hinton, G.: Big self-supervised models are strong semi-supervised learners. arXiv preprint arXiv:2006.10029 (2020)"},{"key":"7_CR13","doi-asserted-by":"crossref","unstructured":"Chen, X., He, K.: Exploring simple Siamese representation learning. arXiv preprint arXiv:2011.10566 (2020)","DOI":"10.1109\/CVPR46437.2021.01549"},{"key":"7_CR14","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"188","DOI":"10.1007\/978-3-030-69541-5_12","volume-title":"Computer Vision \u2013 ACCV 2020","author":"S Cheng","year":"2021","unstructured":"Cheng, S., Tzimiropoulos, G., Shen, J., Pantic, M.: Faster, better and more detailed: 3D face reconstruction with graph convolutional networks. In: Ishikawa, H., Liu, C.-L., Pajdla, T., Shi, J. (eds.) ACCV 2020. LNCS, vol. 12626, pp. 188\u2013205. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-69541-5_12"},{"key":"7_CR15","unstructured":"Dai, J., Li, Y., He, K., Sun, J.: R-FCN: object detection via region-based fully convolutional networks. In: NIPS (2016)"},{"key":"7_CR16","doi-asserted-by":"crossref","unstructured":"Deng, J., Guo, J., Xue, N., Zafeiriou, S.: ArcFace: additive angular margin loss for deep face recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4690\u20134699 (2019)","DOI":"10.1109\/CVPR.2019.00482"},{"key":"7_CR17","doi-asserted-by":"crossref","unstructured":"Deng, J., Guo, J., Zhou, Y., Yu, J., Kotsia, I., Zafeiriou, S.: RetinaFace: single-stage dense face localisation in the wild. arXiv preprint arXiv:1905.00641 (2019)","DOI":"10.1109\/CVPR42600.2020.00525"},{"key":"7_CR18","doi-asserted-by":"crossref","unstructured":"Dong, X., Yang, Y.: Teacher supervises students how to learn from partially labeled images for facial landmark detection. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 783\u2013792 (2019)","DOI":"10.1109\/ICCV.2019.00087"},{"key":"7_CR19","doi-asserted-by":"crossref","unstructured":"Fan, Y., Lu, X., Li, D., Liu, Y.: Video-based emotion recognition using CNN-RNN and C3D hybrid networks. In: ACM International Conference on Multimodal Interaction (2016)","DOI":"10.1145\/2993148.2997632"},{"key":"7_CR20","unstructured":"Ghiasi, G., Fowlkes, C.C.: Occlusion coherence: detecting and localizing occluded faces. arXiv preprint arXiv:1506.08347 (2015)"},{"issue":"3","key":"7_CR21","doi-asserted-by":"publisher","first-page":"863","DOI":"10.1007\/s11042-009-0417-2","volume":"51","author":"M Grgic","year":"2011","unstructured":"Grgic, M., Delac, K., Grgic, S.: SCface-surveillance cameras face database. Multimedia Tools Appl. 51(3), 863\u2013879 (2011)","journal-title":"Multimedia Tools Appl."},{"key":"7_CR22","unstructured":"Grill, J.B., et al.: Bootstrap your own latent: a new approach to self-supervised learning. arXiv preprint arXiv:2006.07733 (2020)"},{"issue":"1","key":"7_CR23","doi-asserted-by":"publisher","first-page":"68","DOI":"10.4018\/jse.2010101605","volume":"1","author":"H Gunes","year":"2010","unstructured":"Gunes, H., Pantic, M.: Automatic, dimensional and continuous emotion recognition. Int. J. Synth. Emot. (IJSE) 1(1), 68\u201399 (2010)","journal-title":"Int. J. Synth. Emot. (IJSE)"},{"key":"7_CR24","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"87","DOI":"10.1007\/978-3-319-46487-9_6","volume-title":"Computer Vision \u2013 ECCV 2016","author":"Y Guo","year":"2016","unstructured":"Guo, Y., Zhang, L., Hu, Y., He, X., Gao, J.: MS-Celeb-1M: a dataset and benchmark for large-scale face recognition. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9907, pp. 87\u2013102. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46487-9_6"},{"key":"7_CR25","doi-asserted-by":"crossref","unstructured":"He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. arXiv (2019)","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"7_CR26","doi-asserted-by":"crossref","unstructured":"He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9729\u20139738 (2020)","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"7_CR27","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"7_CR28","doi-asserted-by":"crossref","unstructured":"Honari, S., Molchanov, P., Tyree, S., Vincent, P., Pal, C., Kautz, J.: Improving landmark localization with semi-supervised learning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1546\u20131555 (2018)","DOI":"10.1109\/CVPR.2018.00167"},{"key":"7_CR29","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4700\u20134708 (2017)","DOI":"10.1109\/CVPR.2017.243"},{"key":"7_CR30","doi-asserted-by":"publisher","first-page":"66","DOI":"10.1016\/j.imavis.2017.01.012","volume":"65","author":"H Kaya","year":"2017","unstructured":"Kaya, H., G\u00fcrp\u0131nar, F., Salah, A.A.: Video-based emotion recognition in the wild using deep transfer learning and score fusion. Image Vis. Comput. 65, 66\u201375 (2017)","journal-title":"Image Vis. Comput."},{"key":"7_CR31","doi-asserted-by":"crossref","unstructured":"Knyazev, B., Shvetsov, R., Efremova, N., Kuharenko, A.: Convolutional neural networks pretrained on large face recognition datasets for emotion classification from video. arXiv preprint arXiv:1711.04598 (2017)","DOI":"10.1109\/FG.2018.00109"},{"key":"7_CR32","doi-asserted-by":"crossref","unstructured":"Koestinger, M., Wohlhart, P., Roth, P.M., Bischof, H.: Annotated facial landmarks in the wild: a large-scale, real-world database for facial landmark localization. In: 2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops), pp. 2144\u20132151. IEEE (2011)","DOI":"10.1109\/ICCVW.2011.6130513"},{"key":"7_CR33","doi-asserted-by":"crossref","unstructured":"Kossaifi, J., Toisoul, A., Bulat, A., Panagakis, Y., Hospedales, T.M., Pantic, M.: Factorized higher-order CNNs with an application to spatio-temporal emotion estimation. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00610"},{"key":"7_CR34","doi-asserted-by":"crossref","unstructured":"Kumar, A., et al.: LUVLi face alignment: estimating landmarks\u2019 location, uncertainty, and visibility likelihood. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8236\u20138246 (2020)","DOI":"10.1109\/CVPR42600.2020.00826"},{"key":"7_CR35","unstructured":"Lee, D.H.: Pseudo-label: the simple and efficient semi-supervised learning method for deep neural networks. In: Workshop on Challenges in Representation Learning, ICML, vol. 3 (2013)"},{"key":"7_CR36","doi-asserted-by":"crossref","unstructured":"Li, J., et al.: DSFD: dual shot face detector. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5060\u20135069 (2019)","DOI":"10.1109\/CVPR.2019.00520"},{"key":"7_CR37","doi-asserted-by":"crossref","unstructured":"Li, S., Deng, W.: Deep facial expression recognition: a survey. IEEE Trans. Affect. Comput. 13, 1195\u20131215 (2020)","DOI":"10.1109\/TAFFC.2020.2981446"},{"key":"7_CR38","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"349","DOI":"10.1007\/978-3-030-11015-4_26","volume-title":"Computer Vision \u2013 ECCV 2018 Workshops","author":"I Lim","year":"2019","unstructured":"Lim, I., Dielen, A., Campen, M., Kobbelt, L.: A simple approach to intrinsic correspondence learning on unstructured 3D\u00a0meshes. In: Leal-Taix\u00e9, L., Roth, S. (eds.) ECCV 2018. LNCS, vol. 11131, pp. 349\u2013362. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-11015-4_26"},{"key":"7_CR39","doi-asserted-by":"crossref","unstructured":"Long, J., Shelhamer, E., Darrell, T.: Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3431\u20133440 (2015)","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"7_CR40","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"185","DOI":"10.1007\/978-3-030-01216-8_12","volume-title":"Computer Vision \u2013 ECCV 2018","author":"D Mahajan","year":"2018","unstructured":"Mahajan, D., et al.: Exploring the limits of weakly supervised pretraining. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11206, pp. 185\u2013201. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01216-8_12"},{"issue":"2","key":"7_CR41","doi-asserted-by":"publisher","first-page":"151","DOI":"10.1109\/T-AFFC.2013.4","volume":"4","author":"SM Mavadati","year":"2013","unstructured":"Mavadati, S.M., Mahoor, M.H., Bartlett, K., Trinh, P., Cohn, J.F.: DISFA: a spontaneous facial action intensity database. IEEE Trans. Affect. Comput. 4(2), 151\u2013160 (2013)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"7_CR42","doi-asserted-by":"crossref","unstructured":"Maze, B., et al.: IARPA Janus benchmark-C: face dataset and protocol. In: IEEE International Conference on Biometrics (ICB) (2018)","DOI":"10.1109\/ICB2018.2018.00033"},{"key":"7_CR43","doi-asserted-by":"crossref","unstructured":"Misra, I., van der Maaten, L.: Self-supervised learning of pretext-invariant representations. arXiv (2019)","DOI":"10.1109\/CVPR42600.2020.00674"},{"issue":"1","key":"7_CR44","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1109\/TAFFC.2017.2740923","volume":"10","author":"A Mollahosseini","year":"2017","unstructured":"Mollahosseini, A., Hasani, B., Mahoor, M.H.: AffectNet: a database for facial expression, valence, and arousal computing in the wild. IEEE Trans. Affect. Comput. 10(1), 18\u201331 (2017)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"7_CR45","doi-asserted-by":"crossref","unstructured":"Ng, H.W., Nguyen, V.D., Vonikakis, V., Winkler, S.: Deep learning for emotion recognition on small datasets using transfer learning. In: Proceedings of the 2015 ACM on International Conference on Multimodal Interaction, pp. 443\u2013449 (2015)","DOI":"10.1145\/2818346.2830593"},{"key":"7_CR46","doi-asserted-by":"crossref","unstructured":"Ntinou, I., Sanchez, E., Bulat, A., Valstar, M., Tzimiropoulos, G.: A transfer learning approach to heatmap regression for action unit intensity estimation. IEEE Trans. Affective Comput. (2021)","DOI":"10.1109\/TAFFC.2021.3061605"},{"key":"7_CR47","doi-asserted-by":"crossref","unstructured":"Parkhi, O.M., Vedaldi, A., Zisserman, A.: Deep face recognition (2015)","DOI":"10.5244\/C.29.41"},{"key":"7_CR48","doi-asserted-by":"crossref","unstructured":"Parkin, A., Grinchuk, O.: Recognizing multi-modal face spoofing with face recognition networks. In: CVPR-W (2019)","DOI":"10.1109\/CVPRW.2019.00204"},{"issue":"24","key":"7_CR49","doi-asserted-by":"publisher","first-page":"6171","DOI":"10.1073\/pnas.1721355115","volume":"115","author":"PJ Phillips","year":"2018","unstructured":"Phillips, P.J., et al.: Face recognition accuracy of forensic examiners, superrecognizers, and face recognition algorithms. Proc. Natl. Acad. Sci. 115(24), 6171\u20136176 (2018)","journal-title":"Proc. Natl. Acad. Sci."},{"key":"7_CR50","doi-asserted-by":"crossref","unstructured":"Qian, S., Sun, K., Wu, W., Qian, C., Jia, J.: Aggregation via separation: boosting facial landmark detector with semi-supervised style translation. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 10153\u201310163 (2019)","DOI":"10.1109\/ICCV.2019.01025"},{"issue":"1","key":"7_CR51","doi-asserted-by":"publisher","first-page":"121","DOI":"10.1109\/TPAMI.2017.2781233","volume":"41","author":"R Ranjan","year":"2017","unstructured":"Ranjan, R., Patel, V.M., Chellappa, R.: HyperFace: a deep multi-task learning framework for face detection, landmark localization, pose estimation, and gender recognition. IEEE TPAMI 41(1), 121\u2013135 (2017)","journal-title":"IEEE TPAMI"},{"key":"7_CR52","doi-asserted-by":"crossref","unstructured":"Ranjan, R., Sankaranarayanan, S., Castillo, C.D., Chellappa, R.: An all-in-one convolutional neural network for face analysis. In: IEEE FG 2017 (2017)","DOI":"10.1109\/FG.2017.137"},{"key":"7_CR53","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. In: Advances in Neural Information Processing Systems, pp. 91\u201399 (2015)"},{"key":"7_CR54","doi-asserted-by":"crossref","unstructured":"Robinson, J.P., Livitz, G., Henon, Y., Qin, C., Fu, Y., Timoner, S.: Face recognition: too bias, or not too bias? In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops, pp. 0\u20131 (2020)","DOI":"10.1109\/CVPRW50498.2020.00008"},{"key":"7_CR55","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1016\/j.imavis.2016.01.002","volume":"47","author":"C Sagonas","year":"2016","unstructured":"Sagonas, C., Antonakos, E., Tzimiropoulos, G., Zafeiriou, S., Pantic, M.: 300 faces in-the-wild challenge: database and results. Image Vis. Comput. 47, 3\u201318 (2016)","journal-title":"Image Vis. Comput."},{"key":"7_CR56","doi-asserted-by":"crossref","unstructured":"Sanchez, E., Bulat, A., Zaganidis, A., Tzimiropoulos, G.: Semi-supervised au intensity estimation with contrastive learning. arXiv preprint arXiv:2011.01864 (2020)","DOI":"10.1007\/978-3-030-69541-5_7"},{"key":"7_CR57","doi-asserted-by":"crossref","unstructured":"Schroff, F., Kalenichenko, D., Philbin, J.: FaceNet: a unified embedding for face recognition and clustering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 815\u2013823 (2015)","DOI":"10.1109\/CVPR.2015.7298682"},{"issue":"2","key":"7_CR58","doi-asserted-by":"publisher","first-page":"420","DOI":"10.1037\/0033-2909.86.2.420","volume":"86","author":"PE Shrout","year":"1979","unstructured":"Shrout, P.E., Fleiss, J.L.: Intraclass correlations: uses in assessing rater reliability. Psychol. Bull. 86(2), 420 (1979)","journal-title":"Psychol. Bull."},{"key":"7_CR59","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)"},{"key":"7_CR60","doi-asserted-by":"crossref","unstructured":"Sixta, T., Junior, J., Jacques, C., Buch-Cardona, P., Vazquez, E., Escalera, S.: FairFace challenge at ECCV 2020: analyzing bias in face recognition. arXiv preprint arXiv:2009.07838 (2020)","DOI":"10.1007\/978-3-030-65414-6_32"},{"key":"7_CR61","doi-asserted-by":"crossref","unstructured":"Taigman, Y., Yang, M., Ranzato, M., Wolf, L.: DeepFace: closing the gap to human-level performance in face verification. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1701\u20131708 (2014)","DOI":"10.1109\/CVPR.2014.220"},{"issue":"8","key":"7_CR62","doi-asserted-by":"publisher","first-page":"1301","DOI":"10.1109\/JSTSP.2017.2764438","volume":"11","author":"P Tzirakis","year":"2017","unstructured":"Tzirakis, P., Trigeorgis, G., Nicolaou, M.A., Schuller, B.W., Zafeiriou, S.: End-to-end multimodal emotion recognition using deep neural networks. IEEE J. Sel. Top. Sig. Process. 11(8), 1301\u20131309 (2017)","journal-title":"IEEE J. Sel. Top. Sig. Process."},{"key":"7_CR63","doi-asserted-by":"crossref","unstructured":"Valstar, M., et al.: AVEC 2016: depression, mood, and emotion recognition workshop and challenge. In: Proceedings of the 6th International Workshop on Audio\/Visual Emotion Challenge, pp. 3\u201310 (2016)","DOI":"10.1145\/2988257.2988258"},{"key":"7_CR64","doi-asserted-by":"crossref","unstructured":"Valstar, M.F., et al.: FERA 2015-second facial expression recognition and analysis challenge. In: 2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), vol. 6, pp. 1\u20138. IEEE (2015)","DOI":"10.1109\/FG.2015.7284874"},{"key":"7_CR65","unstructured":"Vielzeuf, V., Lechervy, A., Pateux, S., Jurie, F.: Towards a general model of knowledge for facial analysis by multi-source transfer learning (2020)"},{"key":"7_CR66","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"780","DOI":"10.1007\/978-3-030-01240-3_47","volume-title":"Computer Vision \u2013 ECCV 2018","author":"F Wang","year":"2018","unstructured":"Wang, F., et al.: The devil of face recognition is in the noise. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11213, pp. 780\u2013795. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01240-3_47"},{"issue":"7","key":"7_CR67","doi-asserted-by":"publisher","first-page":"926","DOI":"10.1109\/LSP.2018.2822810","volume":"25","author":"F Wang","year":"2018","unstructured":"Wang, F., Cheng, J., Liu, W., Liu, H.: Additive margin softmax for face verification. IEEE Signal Process. Lett. 25(7), 926\u2013930 (2018)","journal-title":"IEEE Signal Process. Lett."},{"key":"7_CR68","doi-asserted-by":"crossref","unstructured":"Wang, H., et al.: CosFace: large margin cosine loss for deep face recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5265\u20135274 (2018)","DOI":"10.1109\/CVPR.2018.00552"},{"key":"7_CR69","doi-asserted-by":"crossref","unstructured":"Wang, J., et al.: Deep high-resolution representation learning for visual recognition. IEEE Trans. Pattern Anal. Mach. Intell. 43, 3349\u20133364 (2020)","DOI":"10.1109\/TPAMI.2020.2983686"},{"key":"7_CR70","doi-asserted-by":"crossref","unstructured":"Whitelam, C., et al.: IARPA Janus benchmark-B face dataset. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 90\u201398 (2017)","DOI":"10.1109\/CVPRW.2017.87"},{"key":"7_CR71","doi-asserted-by":"crossref","unstructured":"Wiles, O., Koepke, A., Zisserman, A.: Self-supervised learning of a facial attribute embedding from video. In: BMVC (2018)","DOI":"10.1109\/ICCVW.2019.00364"},{"key":"7_CR72","doi-asserted-by":"crossref","unstructured":"Wu, W., Qian, C., Yang, S., Wang, Q., Cai, Y., Zhou, Q.: Look at boundary: A boundary-aware face alignment algorithm. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2129\u20132138 (2018)","DOI":"10.1109\/CVPR.2018.00227"},{"key":"7_CR73","doi-asserted-by":"crossref","unstructured":"Wu, Z., Xiong, Y., Yu, S.X., Lin, D.: Unsupervised feature learning via non-parametric instance discrimination. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3733\u20133742 (2018)","DOI":"10.1109\/CVPR.2018.00393"},{"key":"7_CR74","doi-asserted-by":"crossref","unstructured":"Xie, Q., Luong, M.T., Hovy, E., Le, Q.V.: Self-training with noisy student improves ImageNet classification. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10687\u201310698 (2020)","DOI":"10.1109\/CVPR42600.2020.01070"},{"key":"7_CR75","unstructured":"Yalniz, I.Z., J\u00e9gou, H., Chen, K., Paluri, M., Mahajan, D.: Billion-scale semi-supervised learning for image classification. arXiv preprint arXiv:1905.00546 (2019)"},{"key":"7_CR76","doi-asserted-by":"crossref","unstructured":"Yang, J., et al.: Neural aggregation network for video face recognition. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.554"},{"key":"7_CR77","doi-asserted-by":"crossref","unstructured":"Yang, J., Bulat, A., Tzimiropoulos, G.: Fan-face: a simple orthogonal improvement to deep face recognition. In: AAAI, pp. 12621\u201312628 (2020)","DOI":"10.1609\/aaai.v34i07.6953"},{"key":"7_CR78","doi-asserted-by":"crossref","unstructured":"Yang, S., Luo, P., Loy, C.C., Tang, X.: WIDER FACE: a face detection benchmark. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)","DOI":"10.1109\/CVPR.2016.596"},{"key":"7_CR79","doi-asserted-by":"crossref","unstructured":"Ye, M., Zhang, X., Yuen, P.C., Chang, S.F.: Unsupervised embedding learning via invariant and spreading instance feature. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6210\u20136219 (2019)","DOI":"10.1109\/CVPR.2019.00637"},{"key":"7_CR80","doi-asserted-by":"crossref","unstructured":"Zhang, S., Zhu, X., Lei, Z., Shi, H., Wang, X., Li, S.Z.: S3FD: single shot scale-invariant face detector. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 192\u2013201 (2017)","DOI":"10.1109\/ICCV.2017.30"},{"issue":"10","key":"7_CR81","doi-asserted-by":"publisher","first-page":"692","DOI":"10.1016\/j.imavis.2014.06.002","volume":"32","author":"X Zhang","year":"2014","unstructured":"Zhang, X., et al.: BP4D-spontaneous: a high-resolution spontaneous 3D dynamic facial expression database. Image Vis. Comput. 32(10), 692\u2013706 (2014)","journal-title":"Image Vis. Comput."},{"key":"7_CR82","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Dong, W., Hu, B.G., Ji, Q.: Weakly-supervised deep convolutional neural network learning for facial action unit intensity estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2314\u20132323 (2018)","DOI":"10.1109\/CVPR.2018.00246"},{"key":"7_CR83","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Jiang, H., Wu, B., Fan, Y., Ji, Q.: Context-aware feature and label fusion for facial action unit intensity estimation with partially labeled data. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 733\u2013742 (2019)","DOI":"10.1109\/ICCV.2019.00082"},{"key":"7_CR84","doi-asserted-by":"crossref","unstructured":"Zhang, Y., et al.: Joint representation and estimator learning for facial action unit intensity estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3457\u20133466 (2019)","DOI":"10.1109\/CVPR.2019.00357"},{"key":"7_CR85","doi-asserted-by":"crossref","unstructured":"Zhu, X., Lei, Z., Liu, X., Shi, H., Li, S.Z.: Face alignment across large poses: a 3D solution. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 146\u2013155 (2016)","DOI":"10.1109\/CVPR.2016.23"},{"issue":"1","key":"7_CR86","doi-asserted-by":"publisher","first-page":"78","DOI":"10.1109\/TPAMI.2017.2778152","volume":"41","author":"X Zhu","year":"2017","unstructured":"Zhu, X., Liu, X., Lei, Z., Li, S.Z.: Face alignment in full pose range: a 3D total solution. IEEE Trans. Pattern Anal. Mach. Intell. 41(1), 78\u201392 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19778-9_7","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,7]],"date-time":"2024-10-07T03:42:27Z","timestamp":1728272547000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19778-9_7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031197772","9783031197789"],"references-count":86,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19778-9_7","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"3 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}