{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T03:08:22Z","timestamp":1742958502624,"version":"3.40.3"},"publisher-location":"Cham","reference-count":45,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783031133237"},{"type":"electronic","value":"9783031133244"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-13324-4_20","type":"book-chapter","created":{"date-parts":[[2022,8,3]],"date-time":"2022-08-03T20:21:50Z","timestamp":1659558110000},"page":"228-239","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Generating High-Resolution 3D Faces Using VQ-VAE-2 with PixelSNAIL Networks"],"prefix":"10.1007","author":[{"given":"Alessio","family":"Gallucci","sequence":"first","affiliation":[]},{"given":"Dmitry","family":"Znamenskiy","sequence":"additional","affiliation":[]},{"given":"Nicola","family":"Pezzotti","sequence":"additional","affiliation":[]},{"given":"Milan","family":"Petkovic","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,8,4]]},"reference":[{"key":"20_CR1","doi-asserted-by":"crossref","unstructured":"Liu, S.-L., Liu, Y., Dong, L.-F., Tong, X.: RAS: a data-driven rigidity-aware skinning model for 3D facial animation. In: Computer Graphics Forum, pp. 581\u2013594 (2020)","DOI":"10.1111\/cgf.13892"},{"key":"20_CR2","doi-asserted-by":"crossref","unstructured":"Carrigan, E., Zell, E., Guiard, C., McDonnell, R.: Expression packing: as-few-as-possible training expressions for blendshape transfer. In: Computer Graphics Forum, pp. 219\u2013233 (2020)","DOI":"10.1111\/cgf.13925"},{"key":"20_CR3","doi-asserted-by":"publisher","first-page":"191","DOI":"10.1145\/3130800.3130813","volume":"36","author":"T Li","year":"2017","unstructured":"Li, T., Bolkart, T., Black, M.J., Li, H., Romero, J.: Learning a model of facial shape and expression from 4D scans. ACM Trans. Graph. 36, 191\u2013194 (2017)","journal-title":"ACM Trans. Graph."},{"key":"20_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"730","DOI":"10.1007\/978-3-030-68796-0_53","volume-title":"Pattern Recognition. ICPR International Workshops and Challenges","author":"H Valev","year":"2021","unstructured":"Valev, H., Gallucci, A., Leufkens, T., Westerink, J., Sas, C.: Applying delaunay triangulation augmentation for deep learning facial expression generation and recognition. In: Del Bimbo, A., et al. (eds.) ICPR 2021. LNCS, vol. 12663, pp. 730\u2013740. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-68796-0_53"},{"key":"20_CR5","doi-asserted-by":"crossref","unstructured":"Taigman, Y., Yang, M., Ranzato, M., Wolf, L.: DeepFace: closing the gap to human-level performance in face verification. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1701\u20131708 (2014)","DOI":"10.1109\/CVPR.2014.220"},{"key":"20_CR6","doi-asserted-by":"crossref","unstructured":"Varol, G., et al.: Learning from synthetic humans. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 109\u2013117 (2017)","DOI":"10.1109\/CVPR.2017.492"},{"key":"20_CR7","doi-asserted-by":"crossref","unstructured":"Blanz, V., Vetter, T.: A morphable model for the synthesis of 3D faces. In: Proceedings 26th Annual Conference on Computer Graphics and Interactive Techniques, pp. 187\u2013194 (1999)","DOI":"10.1145\/311535.311556"},{"key":"20_CR8","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1109\/MSP.2017.2693418","volume":"34","author":"MM Bronstein","year":"2017","unstructured":"Bronstein, M.M., Bruna, J., LeCun, Y., Szlam, A., Vandergheynst, P.: Geometric deep learning: going beyond euclidean data. IEEE Signal Process. Mag. 34, 18\u201342 (2017)","journal-title":"IEEE Signal Process. Mag."},{"key":"20_CR9","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"725","DOI":"10.1007\/978-3-030-01219-9_43","volume-title":"Computer Vision \u2013 ECCV 2018","author":"A Ranjan","year":"2018","unstructured":"Ranjan, A., Bolkart, T., Sanyal, S., Black, M.J.: Generating 3D faces using convolutional mesh autoencoders. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11207, pp. 725\u2013741. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01219-9_43"},{"key":"20_CR10","unstructured":"De Haan, P., Weiler, M., Cohen, T., Welling, M.: Gauge equivariant mesh CNNs: anisotropic convolutions on geometric graphs. arXiv Prepr. arXiv2003.05425 (2020)"},{"issue":"1","key":"20_CR11","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1186\/s40649-019-0069-y","volume":"6","author":"S Zhang","year":"2019","unstructured":"Zhang, S., Tong, H., Xu, J., Maciejewski, R.: Graph convolutional networks: a comprehensive review. Comput. Soc. Netw. 6(1), 1\u201323 (2019). https:\/\/doi.org\/10.1186\/s40649-019-0069-y","journal-title":"Comput. Soc. Netw."},{"key":"20_CR12","unstructured":"Razavi, A., van den Oord, A., Vinyals, O.: Generating diverse high-fidelity images with VQ-VAE-2. In: Advances in Neural Information Processing Systems, pp. 14837\u201314847 (2019)"},{"key":"20_CR13","unstructured":"Van Oord, A., Kalchbrenner, N., Kavukcuoglu, K.: Pixel recurrent neural networks. In: International Conference on Machine Learning, pp. 1747\u20131756 (2016)"},{"key":"20_CR14","unstructured":"den Oord, A., Kalchbrenner, N., Espeholt, L., Vinyals, O., Graves, A., et al.: Conditional image generation with pixelcnn decoders. In: Advances in Neural Information Processing Systems, pp. 4790\u20134798 (2016)"},{"key":"20_CR15","unstructured":"Vaswani, A., e al.: Attention is all you need. In: Advances in Neural Information Processing Systems, pp. 5998\u20136008 (2017)"},{"key":"20_CR16","unstructured":"Chen, X., Mishra, N., Rohaninejad, M., Abbeel, P.: PixelSNAIL: an improved autoregressive generative model. In: 35th International Conference\u00a0on\u00a0Machine Learning ICML 2018, vol. 2, pp. 1364\u20131372 (2018)"},{"key":"20_CR17","doi-asserted-by":"publisher","unstructured":"Davies, R., Twining, C., Taylor, C.: Statistical Models of Shape: Optimisation and Evaluation. Springer, London (2008). https:\/\/doi.org\/10.1007\/978-1-84800-138-1","DOI":"10.1007\/978-1-84800-138-1"},{"key":"20_CR18","doi-asserted-by":"crossref","unstructured":"Abrevaya, V.F., Boukhayma, A., Wuhrer, S., Boyer, E.: A decoupled 3D facial shape model by adversarial training. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9419\u20139428 (2019)","DOI":"10.1109\/ICCV.2019.00951"},{"key":"20_CR19","doi-asserted-by":"crossref","unstructured":"Thies, J., Zollhofer, M., Stamminger, M., Theobalt, C., Nie\u00dfner, M.: Face2face: real-time face capture and reenactment of RGB videos. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2387\u20132395 (2016)","DOI":"10.1109\/CVPR.2016.262"},{"key":"20_CR20","doi-asserted-by":"crossref","unstructured":"Vlasic, D., Brand, M., Pfister, H., Popovic, J.: Face transfer with multilinear models. In: ACM SIGGRAPH 2006 Courses, pp. 24\u2013es (2006)","DOI":"10.1145\/1185657.1185864"},{"key":"20_CR21","doi-asserted-by":"crossref","unstructured":"Booth, J., Roussos, A., Zafeiriou, S., Ponniah, A., Dunaway, D.: A 3D morphable model learnt from 10,000 faces. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5543\u20135552 (2016)","DOI":"10.1109\/CVPR.2016.598"},{"key":"20_CR22","doi-asserted-by":"crossref","unstructured":"Tuan Tran, A., Hassner, T., Masi, I., Medioni, G.: Regressing robust and discriminative 3D morphable models with a very deep neural network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5163\u20135172 (2017)","DOI":"10.1109\/CVPR.2017.163"},{"key":"20_CR23","doi-asserted-by":"crossref","unstructured":"Gu, X., Gortler, S.J., Hoppe, H.: Geometry images. In: Proceedings of the 29th Annual Conference on Computer Graphics and Interactive Techniques, pp. 355\u2013361 (2002)","DOI":"10.1145\/566654.566589"},{"key":"20_CR24","doi-asserted-by":"crossref","unstructured":"Booth, J., Zafeiriou, S.: Optimal UV spaces for facial morphable model construction. In: 2014 IEEE International Conference on Image Processing (ICIP), pp. 4672\u20134676 (2014)","DOI":"10.1109\/ICIP.2014.7025947"},{"key":"20_CR25","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: Advances in Neural Information Processing Systems, pp. 2672\u20132680 (2014)"},{"key":"20_CR26","unstructured":"Arjovsky, M., Chintala, S., Bottou, L.: Wasserstein generative adversarial networks. In: International Conference on Machine Learning, pp. 214\u2013223 (2017)"},{"key":"20_CR27","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"498","DOI":"10.1007\/978-3-030-11015-4_36","volume-title":"Computer Vision \u2013 ECCV 2018 Workshops","author":"R Slossberg","year":"2019","unstructured":"Slossberg, R., Shamai, G., Kimmel, R.: High quality facial surface and texture synthesis via generative adversarial networks. In: Leal-Taix\u00e9, L., Roth, S. (eds.) ECCV 2018. LNCS, vol. 11131, pp. 498\u2013513. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-11015-4_36"},{"key":"20_CR28","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3337067","volume":"15","author":"G Shamai","year":"2019","unstructured":"Shamai, G., Slossberg, R., Kimmel, R.: Synthesizing facial photometries and corresponding geometries using generative adversarial networks. ACM Trans. Multimedia Comput. Commun. Appl. 15, 1\u201324 (2019)","journal-title":"ACM Trans. Multimedia Comput. Commun. Appl."},{"key":"20_CR29","doi-asserted-by":"publisher","first-page":"2534","DOI":"10.1007\/s11263-020-01329-8","volume":"128","author":"S Moschoglou","year":"2020","unstructured":"Moschoglou, S., Ploumpis, S., Nicolaou, M.A., Papaioannou, A., Zafeiriou, S.: 3DFaceGAN: adversarial nets for 3D face representation, generation, and translation. Int. J. Comput. Vis. 128, 2534\u20132551 (2020)","journal-title":"Int. J. Comput. Vis."},{"key":"20_CR30","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational Bayes. In: 2nd International Conference\u00a0on\u00a0Learning\u00a0Representations ICLR 2014 - Conference Track\u00a0Proceedings, pp. 1\u201314 (2014)"},{"key":"20_CR31","doi-asserted-by":"crossref","unstructured":"Bagautdinov, T., Wu, C., Saragih, J., Fua, P., Sheikh, Y.: Modeling facial geometry using compositional VAEs. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3877\u20133886 (2018)","DOI":"10.1109\/CVPR.2018.00408"},{"key":"20_CR32","doi-asserted-by":"crossref","unstructured":"Abrevaya, V.F., Wuhrer, S., Boyer, E.: Multilinear autoencoder for 3D face model learning. In: 2018 IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 1\u20139 (2018)","DOI":"10.1109\/WACV.2018.00007"},{"key":"20_CR33","doi-asserted-by":"crossref","unstructured":"Li, K., Liu, J., Lai, Y.-K., Yang, J.: Generating 3D faces using multi-column graph convolutional networks. In: Computer Graphics Forum, pp. 215\u2013224 (2019)","DOI":"10.1111\/cgf.13830"},{"key":"20_CR34","doi-asserted-by":"crossref","unstructured":"Tam, G.K.L.L., et al.: Registration of 3D point clouds and meshes: a survey from rigid to Nonrigid. IEEE Trans. Vis. Comput. Graph. 19, 1199\u20131217 (2013)","DOI":"10.1109\/TVCG.2012.310"},{"key":"20_CR35","doi-asserted-by":"crossref","unstructured":"van Kaick, O., Zhang, H., Hamarneh, G., Cohen-Or, D.: A survey on shape correspondence. In: Eurographics Symposium\u00a0on\u00a0Geometry Processing (2011)","DOI":"10.1111\/j.1467-8659.2011.01884.x"},{"key":"20_CR36","doi-asserted-by":"crossref","unstructured":"Gallucci, A., Znamenskiy, D., Petkovic, M.: Prediction of 3D body parts from face shape and anthropometric measurements. J. Image Graph. 8, 67\u201377 (2020)","DOI":"10.18178\/joig.8.3.67-74"},{"key":"20_CR37","unstructured":"van den Oord, A., Vinyals, O., et al.: Neural discrete representation learning. In: Advances in Neural Information Processing Systems, pp. 6306\u20136315 (2017)"},{"key":"20_CR38","doi-asserted-by":"crossref","unstructured":"Kingma, D.P., Welling, M.: An introduction to variational autoencoders. arXiv Prepr. arXiv1906.02691 (2019)","DOI":"10.1561\/9781680836233"},{"issue":"3","key":"20_CR39","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., et al.: ImageNet large scale visual recognition challenge. Int. J. Comput. Vision 115(3), 211\u2013252 (2015). https:\/\/doi.org\/10.1007\/s11263-015-0816-y","journal-title":"Int. J. Comput. Vision"},{"key":"20_CR40","doi-asserted-by":"crossref","unstructured":"Kabsch, W.: A solution for the best rotation to relate two sets of vectors. Acta Crystallogr. Sect. A Cryst. Phys. Diffr. Theor. Gen. Crystallogr. 32, 922\u2013923 (1976)","DOI":"10.1107\/S0567739476001873"},{"key":"20_CR41","unstructured":"Ball, R., Molenbroek, J.F.M.: Measuring Chinese heads and faces. In: Proceedings of the 9th International Congress of Physiological Anthropology, Human Diversity Design for Life, pp. 150\u2013155 (2008)"},{"key":"20_CR42","unstructured":"Robinette, K.M., Daanen, H., Paquet, E.: The CAESAR project: a 3-D surface anthropometry survey. In: Second International Conference on 3-D Digital Imaging and Modeling (Cat. No.PR00062), pp. 380\u2013386 (1999)"},{"key":"20_CR43","doi-asserted-by":"crossref","unstructured":"Robinette, K.M., Daanen, H.: Lessons learned from CAESAR: a 3-D anthropometric survey, 5 (2003)","DOI":"10.21236\/ADA430674"},{"key":"20_CR44","doi-asserted-by":"crossref","unstructured":"Gallucci, A., Pezzotti, N., Znamenskiy, D., Petkovic, M.: A latent space exploration for microscopic skin lesion augmentations with VQ-VAE-2 and PixelSNAIL. In: SPIE Medical Imaging Proceedings (2021)","DOI":"10.1117\/12.2580664"},{"key":"20_CR45","unstructured":"Paszke, A., et al.: Automatic differentiation in PyTorch (2017)"}],"container-title":["Lecture Notes in Computer Science","Image Analysis and Processing. ICIAP 2022 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-13324-4_20","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,22]],"date-time":"2022-10-22T19:07:11Z","timestamp":1666465631000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-13324-4_20"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031133237","9783031133244"],"references-count":45,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-13324-4_20","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"4 August 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIAP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Image Analysis and Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Lecce","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 May 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 May 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iciap2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.iciap2021.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Microsoft","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"307","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"168","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"55% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}