{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T16:05:48Z","timestamp":1778083548305,"version":"3.51.4"},"publisher-location":"Cham","reference-count":62,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726545","type":"print"},{"value":"9783031726552","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,12,6]],"date-time":"2024-12-06T00:00:00Z","timestamp":1733443200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,6]],"date-time":"2024-12-06T00:00:00Z","timestamp":1733443200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72655-2_16","type":"book-chapter","created":{"date-parts":[[2024,12,5]],"date-time":"2024-12-05T10:11:02Z","timestamp":1733393462000},"page":"273-290","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Beat-It: Beat-Synchronized Multi-condition 3D Dance Generation"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-4526-440X","authenticated-orcid":false,"given":"Zikai","family":"Huang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8006-3663","authenticated-orcid":false,"given":"Xuemiao","family":"Xu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4281-6214","authenticated-orcid":false,"given":"Cheng","family":"Xu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7662-9831","authenticated-orcid":false,"given":"Huaidong","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0006-0344-2439","authenticated-orcid":false,"given":"Chenxi","family":"Zheng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7059-0929","authenticated-orcid":false,"given":"Jing","family":"Qin","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3802-4644","authenticated-orcid":false,"given":"Shengfeng","family":"He","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,6]]},"reference":[{"issue":"17","key":"16_CR1","first-page":"26","volume":"8","author":"O Alemi","year":"2017","unstructured":"Alemi, O., Fran\u00e7oise, J., Pasquier, P.: GrooveNet: real-time music-driven dance movement generation using artificial neural networks. Networks 8(17), 26 (2017)","journal-title":"Networks"},{"issue":"4","key":"16_CR2","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3592458","volume":"42","author":"S Alexanderson","year":"2023","unstructured":"Alexanderson, S., Nagy, R., Beskow, J., Henter, G.E.: Listen, denoise, action! audio-driven motion synthesis with diffusion models. ACM TOG 42(4), 1\u201320 (2023)","journal-title":"ACM TOG"},{"key":"16_CR3","doi-asserted-by":"crossref","unstructured":"Ao, T., Zhang, Z., Liu, L.: Gesturediffuclip: gesture diffusion model with clip latents. arXiv preprint arXiv:2303.14613 (2023)","DOI":"10.1145\/3592097"},{"key":"16_CR4","doi-asserted-by":"crossref","unstructured":"Ara\u00fajo, J.P., et al.: Circle: capture in rich contextual environments. In: CVPR, pp. 21211\u201321221 (2023)","DOI":"10.1109\/CVPR52729.2023.02032"},{"key":"16_CR5","doi-asserted-by":"crossref","unstructured":"Dabral, R., Mughal, M.H., Golyanik, V., Theobalt, C.: Mofusion: a framework for denoising-diffusion-based motion synthesis. In: CVPR, pp. 9760\u20139770 (2023)","DOI":"10.1109\/CVPR52729.2023.00941"},{"key":"16_CR6","unstructured":"Ding, M., et al.: Cogview: mastering text-to-image generation via transformers. In: NeurIPS, vol. 34, pp. 19822\u201319835 (2021)"},{"key":"16_CR7","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1016\/j.cag.2020.09.009","volume":"94","author":"JP Ferreira","year":"2021","unstructured":"Ferreira, J.P., et al.: Learning to dance: a graph convolutional adversarial network to generate realistic dance motions from audio. Comput. Graph. 94, 11\u201321 (2021)","journal-title":"Comput. Graph."},{"key":"16_CR8","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"89","DOI":"10.1007\/978-3-031-19784-0_6","volume-title":"ECCV 2022","author":"O Gafni","year":"2022","unstructured":"Gafni, O., Polyak, A., Ashual, O., Sheynin, S., Parikh, D., Taigman, Y.: Make-a-scene: scene-based text-to-image generation with human priors. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13675, pp. 89\u2013106. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19784-0_6"},{"key":"16_CR9","unstructured":"Gao, X., Hu, L., Zhang, P., Zhang, B., Bo, L.: Dancemeld: unraveling dance phrases with hierarchical latent codes for music-to-dance synthesis. arXiv preprint arXiv: 2401.10242 (2023)"},{"key":"16_CR10","doi-asserted-by":"crossref","unstructured":"Ghosh, A., Dabral, R., Golyanik, V., Theobalt, C., Slusallek, P.: Imos: intent-driven full-body motion synthesis for human-object interactions. In: CGF, vol.\u00a042, pp. 1\u201312. Wiley Online Library (2023)","DOI":"10.1111\/cgf.14739"},{"key":"16_CR11","doi-asserted-by":"crossref","unstructured":"Gong, K., et al.: TM2D: bimodality driven 3D dance generation via music-text integration. In: ICCV, pp. 9942\u20139952 (2023)","DOI":"10.1109\/ICCV51070.2023.00912"},{"key":"16_CR12","unstructured":"Gopinath, D., Won, J.: fairmotion - tools to load, process and visualize motion capture data. Github (2020). https:\/\/github.com\/facebookresearch\/fairmotion"},{"key":"16_CR13","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: NeurIPS, vol. 33, pp. 6840\u20136851 (2020)"},{"issue":"4","key":"16_CR14","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2897824.2925975","volume":"35","author":"D Holden","year":"2016","unstructured":"Holden, D., Saito, J., Komura, T.: A deep learning framework for character motion synthesis and editing. ACM TOG 35(4), 1\u201311 (2016)","journal-title":"ACM TOG"},{"key":"16_CR15","unstructured":"Huang, R., Hu, H., Wu, W., Sawada, K., Zhang, M., Jiang, D.: Dance revolution: long-term dance generation with music via curriculum learning. arXiv preprint arXiv:2006.06119 (2020)"},{"key":"16_CR16","doi-asserted-by":"crossref","unstructured":"Huang, S., et al.: Diffusion-based generation, optimization, and planning in 3D scenes. In: CVPR, pp. 16750\u201316761 (2023)","DOI":"10.1109\/CVPR52729.2023.01607"},{"key":"16_CR17","unstructured":"Jin, Y., Zhang, J., Li, M., Tian, Y., Zhu, H., Fang, Z.: Towards the automatic anime characters creation with generative adversarial networks. arXiv preprint arXiv:1708.05509 (2017)"},{"key":"16_CR18","doi-asserted-by":"crossref","unstructured":"Kim, J., Kim, J., Choi, S.: Flame: free-form language-based motion synthesis & editing. In: AAAI, vol.\u00a037, pp. 8255\u20138263 (2023)","DOI":"10.1609\/aaai.v37i7.25996"},{"key":"16_CR19","doi-asserted-by":"crossref","unstructured":"Kim, J., Oh, H., Kim, S., Tong, H., Lee, S.: A brand new dance partner: music-conditioned pluralistic dancing controlled by multiple dance genres. In: CVPR, pp. 3490\u20133500 (2022)","DOI":"10.1109\/CVPR52688.2022.00348"},{"key":"16_CR20","unstructured":"Lee, H.Y., et al.: Dancing to music. In: NeurIPS, vol. 32 (2019)"},{"key":"16_CR21","doi-asserted-by":"crossref","unstructured":"Li, B., Zhao, Y., Zhelun, S., Sheng, L.: DanceFormer: music conditioned 3D dance generation with parametric motion transformer. In: AAAI, vol.\u00a036, pp. 1272\u20131279 (2022)","DOI":"10.1609\/aaai.v36i2.20014"},{"key":"16_CR22","unstructured":"Li, J., et al.: Learning to generate diverse dance motions with transformer. arXiv preprint arXiv:2008.08171 (2020)"},{"key":"16_CR23","doi-asserted-by":"crossref","unstructured":"Li, R., Yang, S., Ross, D.A., Kanazawa, A.: AI choreographer: music conditioned 3D dance generation with AIST++. In: ICCV, pp. 13401\u201313412 (2021)","DOI":"10.1109\/ICCV48922.2021.01315"},{"key":"16_CR24","doi-asserted-by":"crossref","unstructured":"Lin, J., et al.: Being comes from not-being: open-vocabulary text-to-motion generation with wordless training. In: CVPR, pp. 23222\u201323231 (2023)","DOI":"10.1109\/CVPR52729.2023.02224"},{"key":"16_CR25","doi-asserted-by":"crossref","unstructured":"Liu, H., Xu, C., Yang, Y., Zeng, L., He, S.: Drag your noise: interactive point-based editing via diffusion semantic propagation. In: CVPR, pp. 6743\u20136752 (2024)","DOI":"10.1109\/CVPR52733.2024.00644"},{"issue":"5","key":"16_CR26","first-page":"2386","volume":"44","author":"X Lu","year":"2020","unstructured":"Lu, X., Ma, C., Shen, J., Yang, X., Reid, I., Yang, M.H.: Deep object tracking with shrinkage loss. IEEE TPAMI 44(5), 2386\u20132401 (2020)","journal-title":"IEEE TPAMI"},{"key":"16_CR27","doi-asserted-by":"crossref","unstructured":"Mou, C., et al.: T2I-adapter: learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453 (2023)","DOI":"10.1609\/aaai.v38i5.28226"},{"key":"16_CR28","unstructured":"Nichol, A., et al.: Glide: towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741 (2021)"},{"key":"16_CR29","doi-asserted-by":"crossref","unstructured":"Qi, Q., et al.: Diffdance: cascaded human motion diffusion model for dance generation. In: ACM MM, pp. 1374\u20131382 (2023)","DOI":"10.1145\/3581783.3612307"},{"key":"16_CR30","unstructured":"Qin, C., et\u00a0al.: Unicontrol: a unified diffusion model for controllable visual generation in the wild. arXiv preprint arXiv:2305.11147 (2023)"},{"key":"16_CR31","unstructured":"Ramesh, A., et al.: Zero-shot text-to-image generation. In: ICML, pp. 8821\u20138831. PMLR (2021)"},{"key":"16_CR32","doi-asserted-by":"crossref","unstructured":"Ren, X., Li, H., Huang, Z., Chen, Q.: Self-supervised dance video synthesis conditioned on music. In: ACM MM, pp. 46\u201354 (2020)","DOI":"10.1145\/3394171.3413932"},{"key":"16_CR33","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"16_CR34","unstructured":"Saharia, C., et al.: Photorealistic text-to-image diffusion models with deep language understanding. In: NeurIPS 35, pp. 36479\u201336494 (2022)"},{"key":"16_CR35","doi-asserted-by":"crossref","unstructured":"Siyao, L., et al.: Bailando: 3D dance generation by actor-critic GPT with choreographic memory. In: CVPR, pp. 11050\u201311059 (2022)","DOI":"10.1109\/CVPR52688.2022.01077"},{"key":"16_CR36","doi-asserted-by":"crossref","unstructured":"Siyao, L., et al.: Bailando++: 3D dance GPT with choreographic memory. IEEE TPAMI (2023)","DOI":"10.1109\/TPAMI.2023.3319435"},{"key":"16_CR37","first-page":"497","volume":"23","author":"G Sun","year":"2020","unstructured":"Sun, G., Wong, Y., Cheng, Z., Kankanhalli, M.S., Geng, W., Li, X.: Deepdance: music-to-dance motion choreography with adversarial learning. IEEE TMM 23, 497\u2013509 (2020)","journal-title":"IEEE TMM"},{"key":"16_CR38","unstructured":"Sun, J., Wang, C., Hu, H., Lai, H., Jin, Z., Hu, J.F.: You never stop dancing: non-freezing dance generation via bank-constrained manifold projection. In: NeurIPS, vol. 35, pp. 9995\u201310007 (2022)"},{"key":"16_CR39","doi-asserted-by":"crossref","unstructured":"Tang, T., Jia, J., Mao, H.: Dance with melody: an LSTM-autoencoder approach to music-oriented dance synthesis. In: ACM MM, pp. 1598\u20131606 (2018)","DOI":"10.1145\/3240508.3240526"},{"key":"16_CR40","unstructured":"Tang, Z., Yang, Z., Zhu, C., Zeng, M., Bansal, M.: Any-to-any generation via composable diffusion. arXiv preprint arXiv: 2305.11846 (2023)"},{"key":"16_CR41","unstructured":"Tevet, G., Raab, S., Gordon, B., Shafir, Y., Cohen-Or, D., Bermano, A.H.: Human motion diffusion model. arXiv preprint arXiv:2209.14916 (2022)"},{"key":"16_CR42","doi-asserted-by":"crossref","unstructured":"Tseng, J., Castellon, R., Liu, K.: Edge: editable dance generation from music. In: CVPR, pp. 448\u2013458 (2023)","DOI":"10.1109\/CVPR52729.2023.00051"},{"issue":"6","key":"16_CR43","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3478513.3480570","volume":"40","author":"G Valle-P\u00e9rez","year":"2021","unstructured":"Valle-P\u00e9rez, G., Henter, G.E., Beskow, J., Holzapfel, A., Oudeyer, P.Y., Alexanderson, S.: Transflower: probabilistic autoregressive dance generation with multimodal attention. ACM TOG 40(6), 1\u201314 (2021)","journal-title":"ACM TOG"},{"key":"16_CR44","unstructured":"Wang, T., et al.: Pretraining is all you need for image-to-image translation. arXiv preprint arXiv:2205.12952 (2022)"},{"key":"16_CR45","unstructured":"Wang, Z., Chen, Y., Liu, T., Zhu, Y., Liang, W., Huang, S.: Humanise: language-conditioned human motion generation in 3D scenes. In: NeurIPS, vol. 35, pp. 14959\u201314971 (2022)"},{"key":"16_CR46","unstructured":"Wei, D., et al.: Understanding text-driven motion synthesis with keyframe collaboration via diffusion models. arXiv preprint arXiv:2305.13773 (2023)"},{"key":"16_CR47","doi-asserted-by":"crossref","unstructured":"Xie, Y., et al.: D3still: decoupled differential distillation for asymmetric image retrieval. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 17181\u201317190 (2024)","DOI":"10.1109\/CVPR52733.2024.01626"},{"issue":"2s","key":"16_CR48","first-page":"1","volume":"19","author":"C Xu","year":"2023","unstructured":"Xu, C., Chen, Z., Mai, J., Xu, X., He, S.: Pose-and attribute-consistent person image synthesis. ACM TOMM 19(2s), 1\u201321 (2023)","journal-title":"ACM TOMM"},{"key":"16_CR49","unstructured":"Xu, C., Li, K., Luo, X., Xu, X., He, S., Zhang, K.: Fully deformable network for multiview face image synthesis. IEEE TNNLS (1-15) (2022)"},{"key":"16_CR50","doi-asserted-by":"crossref","unstructured":"Xu, C., Xu, Y., Zhang, H., Xu, X., He, S.: Dreamanime: learning style-identity textual disentanglement for anime and beyond. IEEE TVCG (2024)","DOI":"10.1109\/TVCG.2024.3397712"},{"key":"16_CR51","doi-asserted-by":"crossref","unstructured":"Yalta, N., Watanabe, S., Nakadai, K., Ogata, T.: Weakly-supervised deep recurrent neural networks for basic dance step generation. In: IJCNN, pp.\u00a01\u20138. IEEE (2019)","DOI":"10.1109\/IJCNN.2019.8851872"},{"key":"16_CR52","doi-asserted-by":"crossref","unstructured":"Yang, Z., et al.: Keyframe control of music-driven 3D dance generation. IEEE TVCG (2023)","DOI":"10.1109\/TVCG.2023.3235538"},{"key":"16_CR53","doi-asserted-by":"crossref","unstructured":"Yin, W., Yin, H., Baraka, K., Kragic, D., Bj\u00f6rkman, M.: Dance style transfer with cross-modal transformer. In: WACV, pp. 5058\u20135067 (2023)","DOI":"10.1007\/s00138-023-01399-x"},{"key":"16_CR54","doi-asserted-by":"crossref","unstructured":"Yu, Y., Liu, B., Zheng, C., Xu, X., Zhang, H., He, S.: Beyond textual constraints: learning novel diffusion conditions with fewer examples. In: CVPR, pp. 7109\u20137118 (2024)","DOI":"10.1109\/CVPR52733.2024.00679"},{"key":"16_CR55","unstructured":"Zhang, C., et al.: Bidirectional autoregressive diffusion model for dance generation. arXiv preprint arXiv: 2402.04356 (2024)"},{"key":"16_CR56","doi-asserted-by":"crossref","unstructured":"Zhang, J., et al.: T2M-GPT: generating human motion from textual descriptions with discrete representations. arXiv preprint arXiv:2301.06052 (2023)","DOI":"10.1109\/CVPR52729.2023.01415"},{"key":"16_CR57","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: ICCV, pp. 3836\u20133847 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"16_CR58","unstructured":"Zhao, S., et al.: Uni-controlnet: all-in-one control to text-to-image diffusion models. arXiv preprint arXiv:2305.16322 (2023)"},{"key":"16_CR59","doi-asserted-by":"crossref","unstructured":"Zheng, C., Liu, B., Xu, X., Zhang, H., He, S.: Learning an interpretable stylized subspace for 3D-aware animatable artforms. IEEE TVCG (2024)","DOI":"10.1109\/TVCG.2024.3364162"},{"key":"16_CR60","doi-asserted-by":"crossref","unstructured":"Zheng, C., Liu, B., Zhang, H., Xu, X., He, S.: Where is my spot? Few-shot image generation via latent subspace optimization. In: CVPR, pp. 3272\u20133281 (2023)","DOI":"10.1109\/CVPR52729.2023.00319"},{"key":"16_CR61","doi-asserted-by":"crossref","unstructured":"Zhou, Z., Wang, B.: UDE: a unified driving engine for human motion generation. In: CVPR, pp. 5632\u20135641 (2023)","DOI":"10.1109\/CVPR52729.2023.00545"},{"key":"16_CR62","doi-asserted-by":"crossref","unstructured":"Zhu, L., Liu, X., Liu, X., Qian, R., Liu, Z., Yu, L.: Taming diffusion models for audio-driven co-speech gesture generation. In: CVPR, pp. 10544\u201310553 (2023)","DOI":"10.1109\/CVPR52729.2023.01016"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72655-2_16","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,5]],"date-time":"2024-12-05T11:31:22Z","timestamp":1733398282000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72655-2_16"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,6]]},"ISBN":["9783031726545","9783031726552"],"references-count":62,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72655-2_16","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12,6]]},"assertion":[{"value":"6 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}