{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,10]],"date-time":"2026-03-10T14:53:56Z","timestamp":1773154436810,"version":"3.50.1"},"publisher-location":"Cham","reference-count":68,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726323","type":"print"},{"value":"9783031726330","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,22]],"date-time":"2024-11-22T00:00:00Z","timestamp":1732233600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,22]],"date-time":"2024-11-22T00:00:00Z","timestamp":1732233600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72633-0_12","type":"book-chapter","created":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T07:54:44Z","timestamp":1732175684000},"page":"209-228","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["Avatar Fingerprinting for\u00a0Authorized Use of\u00a0Synthetic Talking-Head Videos"],"prefix":"10.1007","author":[{"given":"Ekta","family":"Prashnani","sequence":"first","affiliation":[]},{"given":"Koki","family":"Nagano","sequence":"additional","affiliation":[]},{"given":"Shalini","family":"De Mello","sequence":"additional","affiliation":[]},{"given":"David","family":"Luebke","sequence":"additional","affiliation":[]},{"given":"Orazio","family":"Gallo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,22]]},"reference":[{"key":"12_CR1","unstructured":"Apple\u2019s vision pro. https:\/\/www.apple.com\/apple-vision-pro\/. Accessed 06 Mar 2024"},{"key":"12_CR2","unstructured":"Heygen. https:\/\/www.heygen.com. Accessed 16 Nov 2023"},{"key":"12_CR3","unstructured":"Microsoft teams mesh. https:\/\/www.microsoft.com\/en-us\/microsoft-teams\/microsoft-mesh. Accessed 06 Mar 2024"},{"key":"12_CR4","unstructured":"Myheritage. https:\/\/www.myheritage.com. Accessed 16 Nov 2023"},{"key":"12_CR5","unstructured":"Nvidia\u2019s maxine. https:\/\/developer.nvidia.com\/maxine. Accessed 06 Mar 2024"},{"key":"12_CR6","doi-asserted-by":"crossref","unstructured":"Agarwal, S., El-Gaaly, T., Farid, H., Lim, S.N.: Detecting deep-fake videos from appearance and behavior. In: 2020 IEEE International Workshop on Information Forensics and Security (WIFS), pp.\u00a01\u20136 (2020)","DOI":"10.1109\/WIFS49906.2020.9360904"},{"key":"12_CR7","doi-asserted-by":"crossref","unstructured":"Agarwal, S., Farid, H.: Detecting deep-fake videos from aural and oral dynamics. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, June 2021","DOI":"10.1109\/CVPRW53098.2021.00109"},{"key":"12_CR8","doi-asserted-by":"crossref","unstructured":"Agarwal, S., Farid, H., Fried, O., Agrawala, M.: Detecting deep-fake videos from phoneme-viseme mismatches. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops (2020)","DOI":"10.1109\/CVPRW50498.2020.00338"},{"key":"12_CR9","unstructured":"Agarwal, S., Farid, H., Gu, Y., He, M., Nagano, K., Li, H.: Protecting world leaders against deep fakes. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops (2019)"},{"key":"12_CR10","doi-asserted-by":"crossref","unstructured":"Agarwal, S., Hu, L., Ng, E., Darrell, T., Li, H., Rohrbach, A.: Watch those words: video falsification detection using word-conditioned facial motion. In: IEEE Winter Conference on Applications of Computer Vision (WACV) (2023)","DOI":"10.1109\/WACV56688.2023.00469"},{"key":"12_CR11","unstructured":"Albright, M., McCloskey, S.: Source generator attribution via inversion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops (2019)"},{"key":"12_CR12","unstructured":"Baluja, S.: Hiding images in plain sight: deep steganography. In: Advances in Neural Information Processing Systems (NeurIPS) (2017)"},{"key":"12_CR13","doi-asserted-by":"crossref","unstructured":"Blanz, V., Vetter, T.: A morphable model for the synthesis of 3d faces. In: Proceedings of SIGGRAPH (1999)","DOI":"10.1145\/311535.311556"},{"key":"12_CR14","doi-asserted-by":"crossref","unstructured":"Blanz, V., Vetter, T.: Face recognition based on fitting a 3D morphable model. IEEE Trans. Pattern Anal. Mach. Intell. (TPAMI) (2003)","DOI":"10.1109\/TPAMI.2003.1227983"},{"key":"12_CR15","doi-asserted-by":"crossref","unstructured":"Boh\u00e1\u010dek, M., Farid, H.: Protecting world leaders against deep fakes using facial, gestural, and vocal mannerisms. In: Proceedings of the National Academy of Sciences (2022)","DOI":"10.1073\/pnas.2216035119"},{"key":"12_CR16","doi-asserted-by":"crossref","unstructured":"Cao, H., Cooper, D.G., Keutmann, M.K., Gur, R.C., Nenkova, A., Verma, R.: CREMA-D: crowd-sourced emotional multimodal actors dataset. IEEE Trans. Affect. Comput. (2014)","DOI":"10.1109\/TAFFC.2014.2336244"},{"key":"12_CR17","doi-asserted-by":"crossref","unstructured":"Chung, J.S., Nagrani, A., Zisserman, A.: Voxceleb2: deep speaker recognition. In: INTERSPEECH (2018)","DOI":"10.21437\/Interspeech.2018-1929"},{"key":"12_CR18","doi-asserted-by":"crossref","unstructured":"Cozzolino, D., Nie\u00dfner, M., Verdoliva, L.: Audio-visual person-of-interest deepfake detection (2022)","DOI":"10.1109\/CVPRW59228.2023.00101"},{"key":"12_CR19","unstructured":"Cozzolino, D., Poggi, G., Verdoliva, L.: Extracting camera-based fingerprints for video forensics. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops (June 2019)"},{"key":"12_CR20","doi-asserted-by":"crossref","unstructured":"Cozzolino, D., R\u00f6ssler, A., Thies, J., Nie\u00dfner, M., Verdoliva, L.: ID-Reveal: identity-aware DeepFake video detection. In: IEEE International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.01483"},{"key":"12_CR21","unstructured":"Dolhansky, B., et al.: The deepfake detection challenge dataset. arXiv preprint arXiv:2006.07397 (2020)"},{"key":"12_CR22","doi-asserted-by":"crossref","unstructured":"Dong, X., et al.: Protecting celebrities from deepfake with identity consistency transformer. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)","DOI":"10.1109\/CVPR52688.2022.00925"},{"key":"12_CR23","doi-asserted-by":"crossref","unstructured":"Drobyshev, N., Chelishev, J., Khakhulin, T., Ivakhnenko, A., Lempitsky, V., Zakharov, E.: MegaPortraits: one-shot megapixel neural head avatars (2022)","DOI":"10.1145\/3503161.3547838"},{"key":"12_CR24","doi-asserted-by":"crossref","unstructured":"Fox, G., Liu, W., Kim, H., Seidel, H.P., Elgharib, M., Theobalt, C.: VideoForensicsHQ: detecting high-quality manipulated face videos. In: IEEE International Conference on Multimedia and Expo (2021)","DOI":"10.1109\/ICME51207.2021.9428101"},{"key":"12_CR25","doi-asserted-by":"crossref","unstructured":"Fridrich, J.: Steganography in Digital Media: Principles, Algorithms, and Applications. Cambridge University Press, Cambridge (2009)","DOI":"10.1017\/CBO9781139192903"},{"key":"12_CR26","doi-asserted-by":"crossref","unstructured":"Ge, S., Lin, F., Li, C., Zhang, D., Wang, W., Zeng, D.: Deepfake video detection via predictive representation learning. ACM Trans. Multimed. Comput. Commun. Appl. (TOMM) (2022)","DOI":"10.1145\/3469877.3490586"},{"key":"12_CR27","unstructured":"Hadsell, R., Chopra, S., LeCun, Y.: Dimensionality reduction by learning an invariant mapping. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2006)"},{"key":"12_CR28","doi-asserted-by":"crossref","unstructured":"Haliassos, A., Vougioukas, K., Petridis, S., Pantic, M.: Lips don\u2019t lie: a generalisable and robust approach to face forgery detection. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)","DOI":"10.1109\/CVPR46437.2021.00500"},{"key":"12_CR29","doi-asserted-by":"crossref","unstructured":"Haliassos, A., Vougioukas, K., Petridis, S., Pantic, M.: Lips don\u2019t lie: a generalisable and robust approach to face forgery detection. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)","DOI":"10.1109\/CVPR46437.2021.00500"},{"key":"12_CR30","doi-asserted-by":"crossref","unstructured":"He, Y., et al.: Forgerynet: a versatile benchmark for comprehensive forgery analysis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4360\u20134369 (2021)","DOI":"10.1109\/CVPR46437.2021.00434"},{"key":"12_CR31","doi-asserted-by":"crossref","unstructured":"Hill, H., Johnston, A.: Categorizing sex and identity from the biological motion of faces. Curr. Biol. (2001)","DOI":"10.1016\/S0960-9822(01)00243-3"},{"key":"12_CR32","doi-asserted-by":"crossref","unstructured":"Honari, S., Molchanov, P., Tyree, S., Vincent, P., Pal, C., Kautz, J.: Improving landmark localization with semi-supervised learning. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)","DOI":"10.1109\/CVPR.2018.00167"},{"key":"12_CR33","doi-asserted-by":"crossref","unstructured":"Hong, F.T., Zhang, L., Shen, L., Xu, D.: Depth-aware generative adversarial network for talking head video generation (2022)","DOI":"10.1109\/CVPR52688.2022.00339"},{"key":"12_CR34","doi-asserted-by":"crossref","unstructured":"Jian\u00a0Zhao, H.Z.: Thin-plate spline motion model for image animation (2022)","DOI":"10.1109\/CVPR52688.2022.00364"},{"key":"12_CR35","doi-asserted-by":"crossref","unstructured":"Jiang, L., Li, R., Wu, W., Qian, C., Loy, C.C.: Deeperforensics-1.0: a large-scale dataset for real-world face forgery detection. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.00296"},{"key":"12_CR36","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of StyleGAN. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"12_CR37","doi-asserted-by":"publisher","unstructured":"Khakhulin, T., Sklyarova, V., Lempitsky, V., Zakharov, E.: Realistic one-shot mesh-based head avatars. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision \u2013 ECCV 2022. ECCV 2022. LNCS, vol. 13662, pp. 345\u2013362. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20086-1_20","DOI":"10.1007\/978-3-031-20086-1_20"},{"key":"12_CR38","unstructured":"Khosla, P., et al.: Supervised contrastive learning. Adv. Neural Inf. Process. Syst. (NeurIPS) (2020)"},{"key":"12_CR39","doi-asserted-by":"crossref","unstructured":"Kim, H., et al.: Neural style-preserving visual dubbing. ACM Trans. Graph. (ToG) (2019)","DOI":"10.1145\/3355089.3356500"},{"key":"12_CR40","doi-asserted-by":"crossref","unstructured":"Kim, H., et al.: Deep video portraits. ACM Trans. Graph. (ToG) (2018)","DOI":"10.1145\/3197517.3201283"},{"key":"12_CR41","unstructured":"Knappmeyer, B., Thornton, I., B\u00fclthoff, H.: Facial motion can determine facial identity. J. Vis. (2001)"},{"key":"12_CR42","doi-asserted-by":"crossref","unstructured":"Kwon, P., You, J., Nam, G., Park, S., Chae, G.: Kodf: a large-scale korean deepfake detection dataset. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10744\u201310753 (2021)","DOI":"10.1109\/ICCV48922.2021.01057"},{"key":"12_CR43","doi-asserted-by":"crossref","unstructured":"Li, J., Xie, H., Yu, L., Zhang, Y.: Wavelet-enhanced weakly supervised local feature learning for face forgery detection. In: Proceedings of the 30th ACM International Conference on Multimedia (2022)","DOI":"10.1145\/3503161.3547832"},{"key":"12_CR44","doi-asserted-by":"crossref","unstructured":"Li, Y., Sun, P., Qi, H., Lyu, S.: Celeb-DF: a large-scale challenging dataset for DeepFake forensics. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.00327"},{"key":"12_CR45","doi-asserted-by":"crossref","unstructured":"Liu, B., Liu, B., Ding, M., Zhu, T., Yu, X.: Ti2net: temporal identity inconsistency network for deepfake detection. In: IEEE Winter Conference on Applications of Computer Vision (WACV) (2023)","DOI":"10.1109\/WACV56688.2023.00467"},{"key":"12_CR46","doi-asserted-by":"crossref","unstructured":"Livingstone, S.R., Russo, F.A.: The ryerson audio-visual database of emotional speech and song (RAVDESS): a dynamic, multimodal set of facial and vocal expressions in North American English. PloS one (2018)","DOI":"10.1371\/journal.pone.0196391"},{"key":"12_CR47","doi-asserted-by":"crossref","unstructured":"Luo, X., Zhan, R., Chang, H., Yang, F., Milanfar, P.: Distortion agnostic deep watermarking. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.01356"},{"key":"12_CR48","doi-asserted-by":"crossref","unstructured":"Ma, S., et al.: Pixel codec avatars. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)","DOI":"10.1109\/CVPR46437.2021.00013"},{"key":"12_CR49","unstructured":"Mallya, A., Wang, T.C., Liu, M.Y.: Implicit warping for animation with image sets. Adv. Neural Inf. Process. Syst. (NeurIPS) (2022)"},{"key":"12_CR50","doi-asserted-by":"crossref","unstructured":"Munir, S., Batool, B., Shafiq, Z., Srinivasan, P., Zaffar, F.: Through the looking glass: learning to attribute synthetic text generated by language models. In: Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume (2021)","DOI":"10.18653\/v1\/2021.eacl-main.155"},{"key":"12_CR51","doi-asserted-by":"crossref","unstructured":"O\u2019Toole, A.J., Roark, D.A., Abdi, H.: Recognizing moving faces: a psychological and neural synthesis. Trends Cogn. Sci. (2002)","DOI":"10.1016\/S1364-6613(02)01908-3"},{"key":"12_CR52","doi-asserted-by":"crossref","unstructured":"Passos, L.A., et al.: A review of deep learning-based approaches for deepfake content detection. arXiv preprint arXiv:2202.06095 (2022)","DOI":"10.22541\/au.169735672.27713914\/v1"},{"key":"12_CR53","doi-asserted-by":"crossref","unstructured":"R\u00f6ssler, A., Cozzolino, D., Verdoliva, L., Riess, C., Thies, J., Nie\u00dfner, M.: FaceForensics++: learning to detect manipulated facial images. In: IEEE International Conference on Computer Vision (ICCV) (2019)","DOI":"10.1109\/ICCV.2019.00009"},{"key":"12_CR54","doi-asserted-by":"crossref","unstructured":"R\u00f6ssler, A., Cozzolino, D., Verdoliva, L., Riess, C., Thies, J., Nie\u00dfner, M.: Faceforensics++: learning to detect manipulated facial images. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1\u201311 (2019)","DOI":"10.1109\/ICCV.2019.00009"},{"key":"12_CR55","doi-asserted-by":"crossref","unstructured":"Schroff, F., Kalenichenko, D., Philbin, J.: FaceNet: a unified embedding for face recognition and clustering. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)","DOI":"10.1109\/CVPR.2015.7298682"},{"key":"12_CR56","unstructured":"Siarohin, A., Lathuili\u00e8re, S., Tulyakov, S., Ricci, E., Sebe, N.: First order motion model for image animation. Adv. Neural Inf. Process. Syst. (NeurIPS) (2019)"},{"key":"12_CR57","doi-asserted-by":"crossref","unstructured":"Sun, K., Yao, T., Chen, S., Ding, S., Li, J., Ji, R.: Dual contrastive learning for general face forgery detection (2022)","DOI":"10.1609\/aaai.v36i2.20130"},{"key":"12_CR58","doi-asserted-by":"crossref","unstructured":"Tancik, M., Mildenhall, B., Ng, R.: Stegastamp: invisible hyperlinks in physical photographs. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.00219"},{"key":"12_CR59","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"700","DOI":"10.1007\/978-3-030-58589-1_42","volume-title":"Computer Vision \u2013 ECCV 2020","author":"K Wang","year":"2020","unstructured":"Wang, K., et al.: MEAD: a large-scale audio-visual dataset for emotional talking-face generation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12366, pp. 700\u2013717. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58589-1_42"},{"key":"12_CR60","doi-asserted-by":"crossref","unstructured":"Wang, T.C., Mallya, A., Liu, M.Y.: One-shot free-view neural talking-head synthesis for video conferencing. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)","DOI":"10.1109\/CVPR46437.2021.00991"},{"key":"12_CR61","doi-asserted-by":"crossref","unstructured":"Wang, X., Han, X., Huang, W., Dong, D., Scott, M.R.: Multi-similarity loss with general pair weighting for deep metric learning. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2019)","DOI":"10.1109\/CVPR.2019.00516"},{"key":"12_CR62","unstructured":"Wang, Y., Yang, D., Bremond, F., Dantcheva, A.: Latent image animator: learning to animate images via latent space navigation. In: International Conference on Learning Representations (ICLR) (2022)"},{"key":"12_CR63","unstructured":"Yacoob, Y.: Gan-scanner: a detector for faces of stylegan+ (2021). https:\/\/github.com\/yaseryacoob\/GAN-Scanner"},{"key":"12_CR64","doi-asserted-by":"crossref","unstructured":"Yu, N., Skripniuk, V., Abdelnabi, S., Fritz, M.: Artificial fingerprinting for generative models: rooting deepfake attribution in training data. In: IEEE International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.01418"},{"key":"12_CR65","unstructured":"Yu, N., Skripniuk, V., Chen, D., Davis, L., Fritz, M.: Responsible disclosure of generative models using scalable fingerprinting. In: International Conference on Learning Representations (ICLR) (2022)"},{"key":"12_CR66","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"524","DOI":"10.1007\/978-3-030-58610-2_31","volume-title":"Computer Vision \u2013 ECCV 2020","author":"E Zakharov","year":"2020","unstructured":"Zakharov, E., Ivakhnenko, A., Shysheya, A., Lempitsky, V.: Fast Bi-layer neural synthesis of one-shot realistic head avatars. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12357, pp. 524\u2013540. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58610-2_31"},{"key":"12_CR67","doi-asserted-by":"crossref","unstructured":"Zakharov, E., Shysheya, A., Burkov, E., Lempitsky, V.: Few-shot adversarial learning of realistic neural talking head models. In: IEEE International Conference on Computer Vision (ICCV) (2019)","DOI":"10.1109\/ICCV.2019.00955"},{"key":"12_CR68","doi-asserted-by":"crossref","unstructured":"Zheng, Y., Bao, J., Chen, D., Zeng, M., Wen, F.: Exploring temporal coherence for more general video face forgery detection. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)","DOI":"10.1109\/ICCV48922.2021.01477"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72633-0_12","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T08:11:38Z","timestamp":1732176698000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72633-0_12"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,22]]},"ISBN":["9783031726323","9783031726330"],"references-count":68,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72633-0_12","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,22]]},"assertion":[{"value":"22 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}