{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,28]],"date-time":"2025-03-28T08:15:44Z","timestamp":1743149744547,"version":"3.40.3"},"publisher-location":"Cham","reference-count":50,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031728471"},{"type":"electronic","value":"9783031728488"}],"license":[{"start":{"date-parts":[[2024,11,29]],"date-time":"2024-11-29T00:00:00Z","timestamp":1732838400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,29]],"date-time":"2024-11-29T00:00:00Z","timestamp":1732838400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72848-8_24","type":"book-chapter","created":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T13:38:40Z","timestamp":1732801120000},"page":"409-425","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Animate Your Motion: Turning Still Images into Dynamic Videos"],"prefix":"10.1007","author":[{"given":"Mingxiao","family":"Li","sequence":"first","affiliation":[]},{"given":"Bo","family":"Wan","sequence":"additional","affiliation":[]},{"given":"Marie-Francine","family":"Moens","sequence":"additional","affiliation":[]},{"given":"Tinne","family":"Tuytelaars","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,29]]},"reference":[{"key":"24_CR1","unstructured":"Bao, F., Li, C., Zhu, J., Zhang, B.: Analytic-DPM: an analytic estimate of the optimal reverse variance in diffusion probabilistic models (2022)"},{"key":"24_CR2","unstructured":"Chen, C., Shu, J., Chen, L., He, G., Wang, C., Li, Y.: Motion-zero: zero-shot moving object control framework for diffusion-based video generation. arXiv preprint arXiv:2401.10150 (2024)"},{"key":"24_CR3","unstructured":"Chen, N., Zhang, Y., Zen, H., Weiss, R.J., Norouzi, M., Chan, W.: Wavegrad: estimating gradients for waveform generation. arXiv preprint arXiv:2009.00713 (2020)"},{"key":"24_CR4","unstructured":"Chen, W., et al.: Control-a-video: controllable text-to-video generation with diffusion models. arXiv preprint arXiv:2305.13840 (2023)"},{"key":"24_CR5","doi-asserted-by":"crossref","unstructured":"Deng, Y., Wang, R., Zhang, Y., Tai, Y.W., Tang, C.K.: Dragvideo: interactive drag-style video editing. arXiv preprint arXiv:2312.02216 (2023)","DOI":"10.1007\/978-3-031-72992-8_11"},{"key":"24_CR6","unstructured":"Gal, R., et al.: An image is worth one word: personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618 (2022)"},{"key":"24_CR7","doi-asserted-by":"crossref","unstructured":"Girdhar, R., et al.: Emu video: factorizing text-to-video generation by explicit image conditioning. arXiv preprint arXiv:2311.10709 (2023)","DOI":"10.1007\/978-3-031-73033-7_12"},{"key":"24_CR8","unstructured":"He, Y., Yang, T., Zhang, Y., Shan, Y., Chen, Q.: Latent video diffusion models for high-fidelity video generation with arbitrary lengths. arXiv preprint arXiv:2211.13221 (2022)"},{"key":"24_CR9","unstructured":"Ho, J., et\u00a0al.: Imagen video: high definition video generation with diffusion models. arXiv preprint arXiv:2210.02303 (2022)"},{"key":"24_CR10","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: Advances in Neural Information Processing Systems, vol. 33, pp. 6840\u20136851 (2020)"},{"key":"24_CR11","unstructured":"Ho, J., Salimans, T., Gritsenko, A., Chan, W., Norouzi, M., Fleet, D.J.: Video diffusion models (2022)"},{"issue":"5","key":"24_CR12","doi-asserted-by":"publisher","first-page":"1562","DOI":"10.1109\/TPAMI.2019.2957464","volume":"43","author":"L Huang","year":"2019","unstructured":"Huang, L., Zhao, X., Huang, K.: Got-10k: a large high-diversity benchmark for generic object tracking in the wild. IEEE Trans. Pattern Anal. Mach. Intell. 43(5), 1562\u20131577 (2019)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"24_CR13","doi-asserted-by":"crossref","unstructured":"Kahatapitiya, K., Karjauv, A., Abati, D., Porikli, F., Asano, Y.M., Habibian, A.: Object-centric diffusion for efficient video editing. arXiv preprint arXiv:2401.05735 (2024)","DOI":"10.1007\/978-3-031-72998-0_6"},{"key":"24_CR14","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114 (2013)"},{"key":"24_CR15","unstructured":"Li, M., Qu, T., Yao, R., Sun, W., Moens, M.F.: Alleviating exposure bias in diffusion models through sampling with shifted time steps. In: International Conference on Learning Representations (2024)"},{"key":"24_CR16","unstructured":"Li, P., et al.: Trackdiffusion: multi-object tracking data generation via diffusion models. arXiv preprint arXiv:2312.00651 (2023)"},{"key":"24_CR17","doi-asserted-by":"crossref","unstructured":"Li, X., Ma, C., Yang, X., Yang, M.H.: Vidtome: video token merging for zero-shot video editing. arXiv preprint arXiv:2312.10656 (2023)","DOI":"10.1109\/CVPR52733.2024.00715"},{"key":"24_CR18","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: Gligen: open-set grounded text-to-image generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22511\u201322521 (2023)","DOI":"10.1109\/CVPR52729.2023.02156"},{"key":"24_CR19","doi-asserted-by":"crossref","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning. arXiv preprint arXiv:2310.03744 (2023)","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"24_CR20","unstructured":"Liu, L., Ren, Y., Lin, Z., Zhao, Z.: Pseudo numerical methods for diffusion models on manifolds. arXiv preprint arXiv:2202.09778 (2022)"},{"key":"24_CR21","unstructured":"Liu, Y., et\u00a0al.: Sora: a review on background, technology, limitations, and opportunities of large vision models. arXiv preprint arXiv:2402.17177 (2024)"},{"key":"24_CR22","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101 (2017)"},{"key":"24_CR23","unstructured":"Lu, C., Zhou, Y., Bao, F., Chen, J., Li, C., Zhu, J.: DPM-solver: a fast ode solver for diffusion probabilistic model sampling in around 10 steps. In: Advances in Neural Information Processing Systems, vol. 35, pp. 5775\u20135787 (2022)"},{"key":"24_CR24","unstructured":"Lu, C., Zhou, Y., Bao, F., Chen, J., Li, C., Zhu, J.: DPM-solver++: fast solver for guided sampling of diffusion probabilistic models. arXiv preprint arXiv:2211.01095 (2022)"},{"key":"24_CR25","unstructured":"Ma, W.D.K., Lewis, J., Kleijn, W.B.: Trailblazer: trajectory control for diffusion-based video generation. arXiv preprint arXiv:2401.00896 (2023)"},{"key":"24_CR26","doi-asserted-by":"crossref","unstructured":"Ma, Y., Tang, Y., Yang, W., Zhang, T., Zhang, J., Kang, M.: Unifying visual and vision-language tracking via contrastive learning. arXiv preprint arXiv:2401.11228 (2024)","DOI":"10.1609\/aaai.v38i5.28205"},{"key":"24_CR27","unstructured":"Nichol, A.Q., Dhariwal, P.: Improved denoising diffusion probabilistic models. In: International Conference on Machine Learning, pp. 8162\u20138171. PMLR (2021)"},{"key":"24_CR28","unstructured":"Ning, M., Li, M., Su, J., Salah, A.A., Ertugrul, I.O.: Elucidating the exposure bias in diffusion models. In: International Conference on Learning Representations (2024)"},{"key":"24_CR29","unstructured":"Ning, M., Sangineto, E., Porrello, A., Calderara, S., Cucchiara, R.: Input perturbation reduces exposure bias in diffusion models. arXiv preprint arXiv:2301.11706 (2023)"},{"key":"24_CR30","unstructured":"Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: text-to-3D using 2D diffusion. arXiv preprint arXiv:2209.14988 (2022)"},{"key":"24_CR31","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"24_CR32","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"24_CR33","doi-asserted-by":"crossref","unstructured":"Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22500\u201322510 (2023)","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"24_CR34","unstructured":"Singer, U., et\u00a0al.: Make-a-video: text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792 (2022)"},{"key":"24_CR35","unstructured":"Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. arXiv:2010.02502 (2020). https:\/\/arxiv.org\/abs\/2010.02502"},{"key":"24_CR36","unstructured":"Sun, J., Li, M., Chen, Z., Moens, M.F.: Neurocine: decoding vivid video sequences from human brain activties. arXiv preprint arXiv:2402.01590 (2024)"},{"key":"24_CR37","unstructured":"Sun, J., Li, M., Chen, Z., Zhang, Y., Wang, S., Moens, M.F.: Contrast, attend and diffuse to decode high-resolution images from brain activities. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"24_CR38","doi-asserted-by":"crossref","unstructured":"Sun, J., Li, M., Moens, M.F.: Decoding realistic images from brain activity with contrastive self-supervision and latent diffusion. arXiv preprint arXiv:2310.00318 (2023)","DOI":"10.3233\/FAIA230523"},{"key":"24_CR39","unstructured":"Tancik, M., et al.: Fourier features let networks learn high frequency functions in low dimensional domains. In: NeurIPS (2020)"},{"key":"24_CR40","unstructured":"Unterthiner, T., Van\u00a0Steenkiste, S., Kurach, K., Marinier, R., Michalski, M., Gelly, S.: Towards accurate generative models of video: a new metric & challenges. arXiv preprint arXiv:1812.01717 (2018)"},{"key":"24_CR41","unstructured":"Wang, J., et al.: Boximator: generating rich and controllable motions for video synthesis. arXiv preprint arXiv:2402.01566 (2024)"},{"key":"24_CR42","unstructured":"Wang, J., Yuan, H., Chen, D., Zhang, Y., Wang, X., Zhang, S.: Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571 (2023)"},{"key":"24_CR43","unstructured":"Wang, X., et al.: Videocomposer: compositional video synthesis with motion controllability. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"24_CR44","doi-asserted-by":"crossref","unstructured":"Wei, X., Bai, Y., Zheng, Y., Shi, D., Gong, Y.: Autoregressive visual tracking. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9697\u20139706 (2023)","DOI":"10.1109\/CVPR52729.2023.00935"},{"key":"24_CR45","doi-asserted-by":"crossref","unstructured":"Yang, L., Fan, Y., Xu, N.: Video instance segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5188\u20135197 (2019)","DOI":"10.1109\/ICCV.2019.00529"},{"key":"24_CR46","unstructured":"Yang, L., Fan, Y., Xu, N.: The 4th large-scale video object segmentation challenge-video instance segmentation track (2022)"},{"key":"24_CR47","unstructured":"Zhang, H., et al.: Dino: DETR with improved denoising anchor boxes for end-to-end object detection (2022)"},{"key":"24_CR48","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3836\u20133847 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"24_CR49","unstructured":"Zhang, Y., Wei, Y., Jiang, D., Zhang, X., Zuo, W., Tian, Q.: Controlvideo: training-free controllable text-to-video generation. arXiv preprint arXiv:2305.13077 (2023)"},{"key":"24_CR50","unstructured":"Zhou, D., Wang, W., Yan, H., Lv, W., Zhu, Y., Feng, J.: Magicvideo: efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018 (2022)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72848-8_24","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T14:12:22Z","timestamp":1732803142000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72848-8_24"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,29]]},"ISBN":["9783031728471","9783031728488"],"references-count":50,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72848-8_24","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,11,29]]},"assertion":[{"value":"29 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}