{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T04:12:24Z","timestamp":1774498344115,"version":"3.50.1"},"publisher-location":"Cham","reference-count":44,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031736674","type":"print"},{"value":"9783031736681","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73668-1_2","type":"book-chapter","created":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T02:01:05Z","timestamp":1733018465000},"page":"19-36","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["HARIVO: Harnessing Text-to-Image Models for\u00a0Video Generation"],"prefix":"10.1007","author":[{"given":"Mingi","family":"Kwon","sequence":"first","affiliation":[]},{"given":"Seoung Wug","family":"Oh","sequence":"additional","affiliation":[]},{"given":"Yang","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Difan","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Joon-Young","family":"Lee","sequence":"additional","affiliation":[]},{"given":"Haoran","family":"Cai","sequence":"additional","affiliation":[]},{"given":"Baqiao","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Feng","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Youngjung","family":"Uh","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,1]]},"reference":[{"key":"2_CR1","doi-asserted-by":"crossref","unstructured":"Bain, M., Nagrani, A., Varol, G., Zisserman, A.: Frozen in time: a joint video and image encoder for end-to-end retrieval. In: IEEE International Conference on Computer Vision (2021)","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"2_CR2","unstructured":"Balaji, Y., et\u00a0al.: eDiff-I: text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324 (2022)"},{"issue":"2","key":"2_CR3","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRevLett.100.020603","volume":"100","author":"A Barducci","year":"2008","unstructured":"Barducci, A., Bussi, G., Parrinello, M.: Well-tempered metadynamics: a smoothly converging and tunable free-energy method. Phys. Rev. Lett. 100(2), 020603 (2008)","journal-title":"Phys. Rev. Lett."},{"key":"2_CR4","doi-asserted-by":"crossref","unstructured":"Blattmann, A., et al.: Align your latents: high-resolution video synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22563\u201322575 (2023)","DOI":"10.1109\/CVPR52729.2023.02161"},{"key":"2_CR5","doi-asserted-by":"crossref","unstructured":"Cao, M., Wang, X., Qi, Z., Shan, Y., Qie, X., Zheng, Y.: Masactrl: tuning-free mutual self-attention control for consistent image synthesis and editing. arXiv preprint arXiv:2304.08465 (2023)","DOI":"10.1109\/ICCV51070.2023.02062"},{"key":"2_CR6","doi-asserted-by":"crossref","unstructured":"Choi, J., Kim, S., Jeong, Y., Gwon, Y., Yoon, S.: ILVR: conditioning method for denoising diffusion probabilistic models. arXiv preprint arXiv:2108.02938 (2021)","DOI":"10.1109\/ICCV48922.2021.01410"},{"key":"2_CR7","first-page":"8780","volume":"34","author":"P Dhariwal","year":"2021","unstructured":"Dhariwal, P., Nichol, A.: Diffusion models beat GANs on image synthesis. Adv. Neural. Inf. Process. Syst. 34, 8780\u20138794 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"2_CR8","doi-asserted-by":"crossref","unstructured":"Gafni, O., Polyak, A., Ashual, O., Sheynin, S., Parikh, D., Taigman, Y.: Make-a-scene: scene-based text-to-image generation with human priors. arXiv preprint arXiv:2203.13131 (2022)","DOI":"10.1007\/978-3-031-19784-0_6"},{"key":"2_CR9","doi-asserted-by":"publisher","unstructured":"Gal, R., et al.: An image is worth one word: personalizing text-to-image generation using textual inversion (2022). https:\/\/doi.org\/10.48550\/ARXIV.2208.01618. https:\/\/arxiv.org\/abs\/2208.01618","DOI":"10.48550\/ARXIV.2208.01618"},{"key":"2_CR10","doi-asserted-by":"crossref","unstructured":"Ge, S., et al.: Preserve your own correlation: a noise prior for video diffusion models. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 22930\u201322941 (2023)","DOI":"10.1109\/ICCV51070.2023.02096"},{"key":"2_CR11","doi-asserted-by":"crossref","unstructured":"Girdhar, R., et al.: Emu video: factorizing text-to-video generation by explicit image conditioning (2023). https:\/\/emu-video.metademolab.com\/","DOI":"10.1007\/978-3-031-73033-7_12"},{"key":"2_CR12","unstructured":"Guo, Y., et al.: Animatediff: animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725 (2023)"},{"key":"2_CR13","unstructured":"Han, I., Yang, S., Kwon, T., Ye, J.C.: Highly personalized text embedding for image manipulation by stable diffusion. arXiv preprint arXiv:2303.08767 (2023)"},{"key":"2_CR14","unstructured":"He, Y., Yang, T., Zhang, Y., Shan, Y., Chen, Q.: Latent video diffusion models for high-fidelity long video generation (2023)"},{"key":"2_CR15","unstructured":"Hertz, A., Mokady, R., Tenenbaum, J., Aberman, K., Pritch, Y., Cohen-Or, D.: Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626 (2022)"},{"key":"2_CR16","unstructured":"Ho, J., et\u00a0al.: Imagen video: high definition video generation with diffusion models. arXiv preprint arXiv:2210.02303 (2022)"},{"key":"2_CR17","unstructured":"Ho, J., Salimans, T., Gritsenko, A., Chan, W., Norouzi, M., Fleet, D.J.: Video diffusion models. arXiv preprint arXiv:2204.03458 (2022)"},{"key":"2_CR18","unstructured":"Hu, E.J., et al.: Lora: low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)"},{"issue":"3","key":"2_CR19","doi-asserted-by":"publisher","first-page":"255","DOI":"10.1016\/0165-1765(80)90024-5","volume":"6","author":"CM Jarque","year":"1980","unstructured":"Jarque, C.M., Bera, A.K.: Efficient tests for normality, homoscedasticity and serial independence of regression residuals. Econ. Lett. 6(3), 255\u2013259 (1980)","journal-title":"Econ. Lett."},{"key":"2_CR20","unstructured":"Jeong, J., Kwon, M., Uh, Y.: Training-free style transfer emerges from h-space in diffusion models. arXiv preprint arXiv:2303.15403 (2023)"},{"key":"2_CR21","doi-asserted-by":"crossref","unstructured":"Khachatryan, L., et al.: Text2video-zero: text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439 (2023)","DOI":"10.1109\/ICCV51070.2023.01462"},{"key":"2_CR22","unstructured":"Khrulkov, V., Ryzhakov, G., Chertkov, A., Oseledets, I.: Understanding DDPM latent codes through optimal transport. arXiv preprint arXiv:2202.07477 (2022)"},{"key":"2_CR23","unstructured":"Kwon, M., Jeong, J., Uh, Y.: Diffusion models already have a semantic latent space. arXiv preprint arXiv:2210.10960 (2022)"},{"issue":"20","key":"2_CR24","doi-asserted-by":"publisher","first-page":"12562","DOI":"10.1073\/pnas.202427399","volume":"99","author":"A Laio","year":"2002","unstructured":"Laio, A., Parrinello, M.: Escaping free-energy minima. Proc. Natl. Acad. Sci. 99(20), 12562\u201312566 (2002)","journal-title":"Proc. Natl. Acad. Sci."},{"issue":"1","key":"2_CR25","doi-asserted-by":"publisher","first-page":"72","DOI":"10.1162\/neco.1995.7.1.72","volume":"7","author":"DG Lowe","year":"1995","unstructured":"Lowe, D.G.: Similarity metric learning for a variable-kernel classifier. Neural Comput. 7(1), 72\u201385 (1995)","journal-title":"Neural Comput."},{"key":"2_CR26","unstructured":"Meng, C., Song, Y., Song, J., Wu, J., Zhu, J.Y., Ermon, S.: Sdedit: image synthesis and editing with stochastic differential equations. arXiv preprint arXiv:2108.01073 (2021)"},{"key":"2_CR27","doi-asserted-by":"crossref","unstructured":"Mokady, R., Hertz, A., Aberman, K., Pritch, Y., Cohen-Or, D.: Null-text inversion for editing real images using guided diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6038\u20136047 (2023)","DOI":"10.1109\/CVPR52729.2023.00585"},{"key":"2_CR28","unstructured":"Molad, E., et al.: Dreamix: video diffusion models are general video editors. arXiv preprint arXiv:2302.01329 (2023)"},{"key":"2_CR29","unstructured":"Nichol, A., et al.: Glide: towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741 (2021)"},{"key":"2_CR30","unstructured":"Park, Y.H., Kwon, M., Choi, J., Jo, J., Uh, Y.: Understanding the latent space of diffusion models through the lens of riemannian geometry. arXiv preprint arXiv:2307.12868 (2023)"},{"key":"2_CR31","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"2_CR32","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"2_CR33","doi-asserted-by":"crossref","unstructured":"Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22500\u201322510 (2023)","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"2_CR34","unstructured":"Singer, U., et al.: Make-a-video: text-to-video generation without text-video data (2022)"},{"key":"2_CR35","unstructured":"Soomro, K., Zamir, A.R., Shah, M.: UCF101: a dataset of 101 human actions classes from videos in the wild (2012)"},{"key":"2_CR36","doi-asserted-by":"crossref","unstructured":"Tumanyan, N., Geyer, M., Bagon, S., Dekel, T.: Plug-and-play diffusion features for text-driven image-to-image translation. arXiv preprint arXiv:2211.12572 (2022)","DOI":"10.1109\/CVPR52729.2023.00191"},{"key":"2_CR37","unstructured":"Unterthiner, T., van Steenkiste, S., Kurach, K., Marinier, R., Michalski, M., Gelly, S.: Towards accurate generative models of video: a new metric & challenges (2019)"},{"key":"2_CR38","unstructured":"Wang, J., Yuan, H., Chen, D., Zhang, Y., Wang, X., Zhang, S.: Modelscope text-to-video technical report (2023)"},{"key":"2_CR39","unstructured":"Wu, C.H., De\u00a0la Torre, F.: Unifying diffusion models\u2019 latent space, with applications to cyclediffusion and guidance. arXiv preprint arXiv:2210.05559 (2022)"},{"key":"2_CR40","doi-asserted-by":"crossref","unstructured":"Xu, J., Mei, T., Yao, T., Rui, Y.: MSR-VTT: a large video description dataset for bridging video and language. In: IEEE International Conference on Computer Vision and Pattern Recognition (CVPR) (2016). https:\/\/www.microsoft.com\/en-us\/research\/publication\/msr-vtt-a-large-video-description-dataset-for-bridging-video-and-language\/","DOI":"10.1109\/CVPR.2016.571"},{"key":"2_CR41","unstructured":"Ye, H., Zhang, J., Liu, S., Han, X., Yang, W.: IP-adapter: text compatible image prompt adapter for text-to-image diffusion models (2023)"},{"key":"2_CR42","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"668","DOI":"10.1007\/978-3-031-19809-0_38","volume-title":"European Conference on Computer Vision","author":"CH Yeh","year":"2022","unstructured":"Yeh, C.H., Hong, C.Y., Hsu, Y.C., Liu, T.L., Chen, Y., LeCun, Y.: Decoupled contrastive learning. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13686, pp. 668\u2013684. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19809-0_38"},{"key":"2_CR43","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"2_CR44","unstructured":"Zhou, D., Wang, W., Yan, H., Lv, W., Zhu, Y., Feng, J.: Magicvideo: efficient video generation with latent diffusion models. arXiv preprint arXiv:2211.11018 (2022)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73668-1_2","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T02:05:34Z","timestamp":1733018734000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73668-1_2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,1]]},"ISBN":["9783031736674","9783031736681"],"references-count":44,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73668-1_2","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12,1]]},"assertion":[{"value":"1 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}