{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,24]],"date-time":"2025-05-24T04:02:26Z","timestamp":1748059346664,"version":"3.41.0"},"publisher-location":"Cham","reference-count":63,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031925900","type":"print"},{"value":"9783031925917","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-92591-7_27","type":"book-chapter","created":{"date-parts":[[2025,5,23]],"date-time":"2025-05-23T07:25:29Z","timestamp":1747985129000},"page":"415-426","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Towards Motion from\u00a0Video Diffusion Models"],"prefix":"10.1007","author":[{"given":"Paul","family":"Janson","sequence":"first","affiliation":[]},{"given":"Tiberiu","family":"Popa","sequence":"additional","affiliation":[]},{"given":"Eugene","family":"Belilovsky","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,12]]},"reference":[{"key":"27_CR1","doi-asserted-by":"crossref","unstructured":"Ahn, H., Ha, T., Choi, Y., Yoo, H., Oh, S.: Text2action: generative adversarial synthesis from language to action. In: 2018 IEEE International Conference on Robotics and Automation (ICRA), pp. 5915\u20135920. IEEE (2018)","DOI":"10.1109\/ICRA.2018.8460608"},{"key":"27_CR2","doi-asserted-by":"crossref","unstructured":"Ahuja, C., Morency, L.P.: Language2pose: natural language grounded pose forecasting. In: 2019 International Conference on 3D Vision (3DV), pp. 719\u2013728. IEEE (2019)","DOI":"10.1109\/3DV.2019.00084"},{"key":"27_CR3","doi-asserted-by":"crossref","unstructured":"Bahmani, S., Skorokhodov, I., et al.: 4d-fy: text-to-4d generation using hybrid score distillation sampling. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7996\u20138006 (June 2024)","DOI":"10.1109\/CVPR52733.2024.00764"},{"key":"27_CR4","unstructured":"Bommasani, R., et al.: On the opportunities and risks of foundation models. ArXiv (2021),.https:\/\/crfm.stanford.edu\/assets\/report.pdf"},{"key":"27_CR5","doi-asserted-by":"crossref","unstructured":"Cao, Y., Cao, Y.P., Han, K., Shan, Y., Wong, K.Y.K.: Dreamavatar: text-and-shape guided 3d human avatar generation via diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 958\u2013968 (2024)","DOI":"10.1109\/CVPR52733.2024.00097"},{"key":"27_CR6","unstructured":"Cerspense: Zeroscope modelcard. https:\/\/huggingface.co\/cerspense\/zeroscope_v2_576w"},{"key":"27_CR7","doi-asserted-by":"crossref","unstructured":"Chen, H., et al.: Videocrafter1: open diffusion models for high-quality video generation (2023)","DOI":"10.1109\/CVPR52729.2023.10308948"},{"key":"27_CR8","doi-asserted-by":"crossref","unstructured":"Chen, H., et al.: Videocrafter2: overcoming data limitations for high-quality video diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7310\u20137320 (2024)","DOI":"10.1109\/CVPR52733.2024.00698"},{"key":"27_CR9","first-page":"8780","volume":"34","author":"P Dhariwal","year":"2021","unstructured":"Dhariwal, P., Nichol, A.: Diffusion models beat gans on image synthesis. Adv. Neural. Inf. Process. Syst. 34, 8780\u20138794 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"27_CR10","doi-asserted-by":"crossref","unstructured":"Gal, R., Vinker, Y., Alaluf, Y., Bermano, A., Cohen-Or, D., Shamir, A., Chechik, G.: Breathing life into sketches using text-to-video priors. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4325\u20134336 (2024)","DOI":"10.1109\/CVPR52733.2024.00414"},{"key":"27_CR11","first-page":"31841","volume":"35","author":"J Gao","year":"2022","unstructured":"Gao, J., et al.: Get3d: a generative model of high quality 3d textured shapes learned from images. Adv. Neural. Inf. Process. Syst. 35, 31841\u201331854 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"27_CR12","doi-asserted-by":"crossref","unstructured":"Ghosh, A., Cheema, N., Oguz, C., Theobalt, C., Slusallek, P.: Synthesis of compositional animations from textual descriptions. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1396\u20131406 (2021)","DOI":"10.1109\/ICCV48922.2021.00143"},{"key":"27_CR13","doi-asserted-by":"crossref","unstructured":"Guo, C., et al.: Generating diverse and natural 3d human motions from text. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5152\u20135161 (2022)","DOI":"10.1109\/CVPR52688.2022.00509"},{"key":"27_CR14","unstructured":"Ho, J., et\u00a0al.: Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303 (2022)"},{"key":"27_CR15","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Adv. Neural. Inf. Process. Syst. 33, 6840\u20136851 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"27_CR16","unstructured":"Ho, J., Salimans, T.: Classifier-free diffusion guidance. In: NeurIPS 2021 Workshop on Deep Generative Models and Downstream Applications (2021)"},{"key":"27_CR17","doi-asserted-by":"publisher","unstructured":"Jain, A., Mildenhall, B., Barron, J.T., Abbeel, P., Poole, B.: Zero-shot text-guided object generation with dream fields. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR). IEEE (Jun 2022). https:\/\/doi.org\/10.1109\/cvpr52688.2022.00094","DOI":"10.1109\/cvpr52688.2022.00094"},{"key":"27_CR18","doi-asserted-by":"crossref","unstructured":"Karthikeyan, A., Ren, R., Kant, Y., Gilitschenski, I.: Avatarone: Monocular 3d human animation. 2024 IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), pp. 3635\u20133645 (2024). https:\/\/api.semanticscholar.org\/CorpusID:267751541","DOI":"10.1109\/WACV57701.2024.00361"},{"issue":"4","key":"27_CR19","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3592433","volume":"42","author":"B Kerbl","year":"2023","unstructured":"Kerbl, B., Kopanas, G., Leimk\u00fchler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph. 42(4), 1\u2013139 (2023)","journal-title":"ACM Trans. Graph."},{"key":"27_CR20","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. In: International Conference on Learning Representations (2014)"},{"key":"27_CR21","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114 (2013)"},{"issue":"6","key":"27_CR22","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3414685.3417861","volume":"39","author":"S Laine","year":"2020","unstructured":"Laine, S., Hellsten, J., Karras, T., Seol, Y., Lehtinen, J., Aila, T.: Modular primitives for high-performance differentiable rendering. ACM Trans. Graph. (ToG) 39(6), 1\u201314 (2020)","journal-title":"ACM Trans. Graph. (ToG)"},{"key":"27_CR23","doi-asserted-by":"crossref","unstructured":"Liao, T., et al.: Tada! text to animatable digital avatars. In: 2024 International Conference on 3D Vision (3DV), pp. 1508\u20131519. IEEE (2024)","DOI":"10.1109\/3DV62453.2024.00150"},{"key":"27_CR24","doi-asserted-by":"publisher","unstructured":"Lin, C.H., et al.: Magic3d: high-resolution text-to-3d content creation. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR). IEEE (Jun 2023). https:\/\/doi.org\/10.1109\/cvpr52729.2023.00037","DOI":"10.1109\/cvpr52729.2023.00037"},{"key":"27_CR25","doi-asserted-by":"crossref","unstructured":"Ling, H., Kim, S.W., Torralba, A., Fidler, S., Kreis, K.: Align your gaussians: text-to-4d with dynamic 3d gaussians and composed diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8576\u20138588 (2024)","DOI":"10.1109\/CVPR52733.2024.00819"},{"issue":"6","key":"27_CR26","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2816795.2818013","volume":"34","author":"M Loper","year":"2015","unstructured":"Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: Smpl: a skinned multi-person linear model. ACM Trans. Graph. (TOG) 34(6), 1\u201316 (2015)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"27_CR27","doi-asserted-by":"publisher","unstructured":"Mahmood, N., Ghorbani, N., Troje, N.F., Pons-Moll, G., Black, M.: Amass: archive of motion capture as surface shapes. In: 2019 IEEE\/CVF International Conference on Computer Vision (ICCV). IEEE (Oct 2019). https:\/\/doi.org\/10.1109\/iccv.2019.00554","DOI":"10.1109\/iccv.2019.00554"},{"key":"27_CR28","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1007\/978-3-030-58452-8_24","volume-title":"Computer Vision \u2013 ECCV 2020","author":"B Mildenhall","year":"2020","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 405\u2013421. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_24"},{"key":"27_CR29","doi-asserted-by":"crossref","unstructured":"Mohammad\u00a0Khalid, N., Xie, T., Belilovsky, E., Popa, T.: Clip-mesh: generating textured meshes from text using pretrained image-text models. In: SIGGRAPH Asia 2022 Conference Papers, pp.\u00a01\u20138 (2022)","DOI":"10.1145\/3550469.3555392"},{"key":"27_CR30","unstructured":"OpenAI: Video generation models as world simulators. https:\/\/openai.com\/index\/video-generation-models-as-world-simulators\/"},{"key":"27_CR31","unstructured":"Paszke, A., et\u00a0al.: Pytorch: An imperative style, high-performance deep learning library. Adv. Neural Inform. Process. Syst. (2019)"},{"key":"27_CR32","doi-asserted-by":"crossref","unstructured":"Pavlakos, G., et al.: Expressive body capture: 3d hands, face, and body from a single image. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10975\u201310985 (2019)","DOI":"10.1109\/CVPR.2019.01123"},{"key":"27_CR33","doi-asserted-by":"crossref","unstructured":"Petrovich, M., Black, M.J., Varol, G.: Action-conditioned 3d human motion synthesis with transformer vae. 2021 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 10965\u201310975 (2021). https:\/\/api.semanticscholar.org\/CorpusID:233210075","DOI":"10.1109\/ICCV48922.2021.01080"},{"key":"27_CR34","doi-asserted-by":"publisher","unstructured":"Petrovich, M., Black, M.J., Varol, G.: Temos: generating diverse human motions from textual descriptions. In: European Conference on Computer Vision, pp. 480\u2013497. Springer (2022). https:\/\/doi.org\/10.1007\/978-3-031-20047-2_28","DOI":"10.1007\/978-3-031-20047-2_28"},{"issue":"4","key":"27_CR35","doi-asserted-by":"publisher","first-page":"236","DOI":"10.1089\/big.2016.0028","volume":"4","author":"M Plappert","year":"2016","unstructured":"Plappert, M., Mandery, C., Asfour, T.: The kit motion-language dataset. Big Data 4(4), 236\u2013252 (2016)","journal-title":"Big Data"},{"key":"27_CR36","doi-asserted-by":"publisher","first-page":"13","DOI":"10.1016\/j.robot.2018.07.006","volume":"109","author":"M Plappert","year":"2018","unstructured":"Plappert, M., Mandery, C., Asfour, T.: Learning a bidirectional mapping between human whole-body motion and natural language using deep recurrent neural networks. Robot. Auton. Syst. 109, 13\u201326 (2018)","journal-title":"Robot. Auton. Syst."},{"key":"27_CR37","unstructured":"Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: text-to-3d using 2d diffusion. In: The Eleventh International Conference on Learning Representations (2023)"},{"key":"27_CR38","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"27_CR39","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. 2022 IEEE. In: CVF Conference on Computer Vision and Pattern Recognition (CVPR), vol.\u00a01 (2021)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"27_CR40","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"27_CR41","unstructured":"Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. arXiv preprint arXiv:2201.02610 (2022)"},{"key":"27_CR42","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"234","DOI":"10.1007\/978-3-319-24574-4_28","volume-title":"Medical Image Computing and Computer-Assisted Intervention \u2013 MICCAI 2015","author":"O Ronneberger","year":"2015","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-Net: convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W.M., Frangi, A.F. (eds.) MICCAI 2015. LNCS, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28"},{"key":"27_CR43","first-page":"36479","volume":"35","author":"C Saharia","year":"2022","unstructured":"Saharia, C., et al.: Photorealistic text-to-image diffusion models with deep language understanding. Adv. Neural. Inf. Process. Syst. 35, 36479\u201336494 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"27_CR44","doi-asserted-by":"publisher","unstructured":"Sanghi, A., et al.: Clip-forge: Towards zero-shot text-to-shape generation. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR). IEEE (Jun 2022). https:\/\/doi.org\/10.1109\/cvpr52688.2022.01805","DOI":"10.1109\/cvpr52688.2022.01805"},{"key":"27_CR45","doi-asserted-by":"crossref","unstructured":"Sanyal, S., Bolkart, T., Feng, H., Black, M.J.: Learning to regress 3d face shape and expression from an image without 3d supervision. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7763\u20137772 (2019)","DOI":"10.1109\/CVPR.2019.00795"},{"key":"27_CR46","unstructured":"Shafir, Y., Tevet, G., Kapon, R., Bermano, A.H.: Human motion diffusion as a generative prior. In: The Twelfth International Conference on Learning Representations (2024)"},{"key":"27_CR47","unstructured":"Shen, T., Gao, J., Yin, K., Liu, M.Y., Fidler, S.: Deep marching tetrahedra: a hybrid representation for high-resolution 3d shape synthesis. In: Advances in Neural Information Processing Systems (NeurIPS) (2021)"},{"key":"27_CR48","unstructured":"Singer, U., et\u00a0al.: Make-a-video: text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792 (2022)"},{"key":"27_CR49","unstructured":"Singer, U., Sheynin, S., Polyak, A., Ashual, O., Makarov, I., Kokkinos, F., Goyal, N., Vedaldi, A., Parikh, D., Johnson, J., Taigman, Y.: Text-to-4D dynamic scene generation. In: Krause, A., Brunskill, E., Cho, K., Engelhardt, B., Sabato, S., Scarlett, J. (eds.) Proceedings of the 40th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol.\u00a0202, pp. 31915\u201331929. PMLR (23\u201329 Jul 2023), https:\/\/proceedings.mlr.press\/v202\/singer23a.html"},{"key":"27_CR50","unstructured":"Sivakumar, P., Janson, P., Rajasegaran, J., Ambegoda, T.: Fewshotnerf: meta-learning-based novel view synthesis for rapid scene-specific adaptation. arXiv preprint arXiv:2408.04803 (2024)"},{"key":"27_CR51","unstructured":"Sohl-Dickstein, J., Weiss, E., Maheswaranathan, N., Ganguli, S.: Deep unsupervised learning using nonequilibrium thermodynamics. In: International Conference on Machine Learning, pp. 2256\u20132265. PMLR (2015)"},{"key":"27_CR52","unstructured":"Song, Y., Sohl-Dickstein, J., Kingma, D.P., Kumar, A., Ermon, S., Poole, B.: Score-based generative modeling through stochastic differential equations. In: International Conference on Learning Representations (2021)"},{"key":"27_CR53","unstructured":"Tang, J., Ren, J., Zhou, H., Liu, Z., Zeng, G.: Dreamgaussian: generative gaussian splatting for efficient 3d content creation. In: International Conference on Learning Representations (2024)"},{"key":"27_CR54","doi-asserted-by":"crossref","unstructured":"Tevet, G., Gordon, B., Hertz, A., Bermano, A.H., Cohen-Or, D.: Motionclip: Exposing human motion generation to clip space. In: European Conference on Computer Vision. pp. 358\u2013374. Springer (2022)","DOI":"10.1007\/978-3-031-20047-2_21"},{"key":"27_CR55","unstructured":"Tevet, G., Raab, S., Gordon, B., Shafir, Y., Cohen-or, D., Bermano, A.H.: Human motion diffusion model. In: The Eleventh International Conference on Learning Representations (2023). https:\/\/openreview.net\/forum?id=SJ1kSyO2jwu"},{"key":"27_CR56","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural Inform. Process. Syst. 30 (2017)"},{"key":"27_CR57","unstructured":"Villegas, R., et al.: Phenaki: Variable length video generation from open domain textual descriptions. In: International Conference on Learning Representations (2022)"},{"key":"27_CR58","doi-asserted-by":"publisher","unstructured":"Wang, C., Chai, M., He, M., Chen, D., Liao, J.: Clip-nerf: text-and-image driven manipulation of neural radiance fields. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR). IEEE (Jun 2022). https:\/\/doi.org\/10.1109\/cvpr52688.2022.00381","DOI":"10.1109\/cvpr52688.2022.00381"},{"key":"27_CR59","unstructured":"Wang, J., Yuan, H., Chen, D., Zhang, Y., Wang, X., Zhang, S.: Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571 (2023)"},{"key":"27_CR60","unstructured":"Wang, Z., Lu, C., Wang, Y., Bao, F., Li, C., Su, H., Zhu, J.: Prolificdreamer: high-fidelity and diverse text-to-3d generation with variational score distillation. Adv. Neural Inform. Process. Syst. 36 (2023)"},{"key":"27_CR61","doi-asserted-by":"publisher","unstructured":"Xu, J., et al.: Dream3d: zero-shot text-to-3d synthesis using 3d shape prior and text-to-image diffusion models. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR). IEEE (Jun 2023). https:\/\/doi.org\/10.1109\/cvpr52729.2023.02003","DOI":"10.1109\/cvpr52729.2023.02003"},{"key":"27_CR62","unstructured":"Zamani, A., Aghdam, A.G., Popa, T., Belilovsky, E.: Temporally consistent object editing in videos using extended attention. In: CVPR Workshop on AI for Content Creation (2024)"},{"key":"27_CR63","doi-asserted-by":"crossref","unstructured":"Zheng, Y., Li, X., Nagano, K., Liu, S., Hilliges, O., De\u00a0Mello, S.: A unified approach for text- and image-guided 4d scene generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7300\u20137309 (June 2024)","DOI":"10.1109\/CVPR52733.2024.00697"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-92591-7_27","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,23]],"date-time":"2025-05-23T07:25:53Z","timestamp":1747985153000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-92591-7_27"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031925900","9783031925917"],"references-count":63,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-92591-7_27","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"12 May 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}