{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T08:06:38Z","timestamp":1772525198488,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":87,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,10,27]]},"DOI":"10.1145\/3746027.3755462","type":"proceedings-article","created":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T05:47:42Z","timestamp":1761371262000},"page":"10171-10180","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":4,"title":["UniEdit: A Unified Tuning-Free Framework for Video Motion and Appearance Editing"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3121-7259","authenticated-orcid":false,"given":"Jianhong","family":"Bai","sequence":"first","affiliation":[{"name":"Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4828-3228","authenticated-orcid":false,"given":"Tianyu","family":"He","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-1906-8820","authenticated-orcid":false,"given":"Yuchi","family":"Wang","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8360-5483","authenticated-orcid":false,"given":"Junliang","family":"Guo","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6048-6549","authenticated-orcid":false,"given":"Haoji","family":"Hu","sequence":"additional","affiliation":[{"name":"Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7816-502X","authenticated-orcid":false,"given":"Zuozhu","family":"Liu","sequence":"additional","affiliation":[{"name":"Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9472-600X","authenticated-orcid":false,"given":"Jiang","family":"Bian","sequence":"additional","affiliation":[{"name":"Microsoft Research Asia, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2025,10,27]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02161"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01764"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02062"},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02121"},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02106"},{"key":"e_1_3_2_1_6_1","unstructured":"Haoxin Chen Menghan Xia Yingqing He Yong Zhang Xiaodong Cun Shaoshu Yang Jinbo Xing Yaofang Liu Qifeng Chen Xintao Wang et al. 2023c. Videocrafter1: Open diffusion models for high-quality video generation. arXiv preprint arXiv:2310.19512 (2023)."},{"key":"e_1_3_2_1_7_1","volume-title":"Videocrafter2: Overcoming data limitations for high-quality video diffusion models. arXiv preprint arXiv:2401.09047","author":"Chen Haoxin","year":"2024","unstructured":"Haoxin Chen, Yong Zhang, Xiaodong Cun, Menghan Xia, Xintao Wang, Chao Weng, and Ying Shan. 2024. Videocrafter2: Overcoming data limitations for high-quality video diffusion models. arXiv preprint arXiv:2401.09047 (2024)."},{"key":"e_1_3_2_1_8_1","volume-title":"Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang.","author":"Chen Tsai-Shien","year":"2023","unstructured":"Tsai-Shien Chen, Chieh Hubert Lin, Hung-Yu Tseng, Tsung-Yi Lin, and Ming-Hsuan Yang. 2023a. Motion-Conditioned Diffusion Model for Controllable Video Synthesis. arXiv preprint arXiv:2304.14404 (2023)."},{"key":"e_1_3_2_1_9_1","volume-title":"Control-A-Video: Controllable Text-to-Video Generation with Diffusion Models. arXiv preprint arXiv:2305.13840","author":"Chen Weifeng","year":"2023","unstructured":"Weifeng Chen, Jie Wu, Pan Xie, Hefeng Wu, Jiashi Li, Xin Xia, Xuefeng Xiao, and Liang Lin. 2023b. Control-A-Video: Controllable Text-to-Video Generation with Diffusion Models. arXiv preprint arXiv:2305.13840 (2023)."},{"key":"e_1_3_2_1_10_1","volume-title":"The Twelfth International Conference on Learning Representations.","author":"Cong Yuren","unstructured":"Yuren Cong, Mengmeng Xu, Shoufa Chen, Jiawei Ren, Yanping Xie, Juan-Manuel Perez-Rua, Bodo Rosenhahn, Tao Xiang, Sen He, et al., [n.d.]. FLATTEN: optical FLow-guided ATTENtion for consistent text-to-video editing. In The Twelfth International Conference on Learning Representations."},{"key":"e_1_3_2_1_11_1","volume-title":"FLATTEN: optical FLow-guided ATTENtion for consistent text-to-video editing. arXiv preprint arXiv:2310.05922","author":"Cong Yuren","year":"2023","unstructured":"Yuren Cong, Mengmeng Xu, Christian Simon, Shoufa Chen, Jiawei Ren, Yanping Xie, Juan-Manuel Perez-Rua, Bodo Rosenhahn, Tao Xiang, and Sen He. 2023. FLATTEN: optical FLow-guided ATTENtion for consistent text-to-video editing. arXiv preprint arXiv:2310.05922 (2023)."},{"key":"e_1_3_2_1_12_1","volume-title":"Diffedit: Diffusion-based semantic image editing with mask guidance. arXiv preprint arXiv:2210.11427","author":"Couairon Guillaume","year":"2022","unstructured":"Guillaume Couairon, Jakob Verbeek, Holger Schwenk, and Matthieu Cord. 2022. Diffedit: Diffusion-based semantic image editing with mask guidance. arXiv preprint arXiv:2210.11427 (2022)."},{"key":"e_1_3_2_1_13_1","volume-title":"Videdit: Zero-shot and spatially aware text-driven video editing. arXiv preprint arXiv:2306.08707","author":"Couairon Paul","year":"2023","unstructured":"Paul Couairon, Cl\u00e9ment Rambour, Jean-Emmanuel Haugeard, and Nicolas Thome. 2023. Videdit: Zero-shot and spatially aware text-driven video editing. arXiv preprint arXiv:2306.08707 (2023)."},{"key":"e_1_3_2_1_14_1","volume-title":"DragVideo: Interactive Drag-style Video Editing. arXiv preprint arXiv:2312.02216","author":"Deng Yufan","year":"2023","unstructured":"Yufan Deng, Ruida Wang, Yuhao Zhang, Yu-Wing Tai, and Chi-Keung Tang. 2023b. DragVideo: Interactive Drag-style Video Editing. arXiv preprint arXiv:2312.02216 (2023)."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612405"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00675"},{"key":"e_1_3_2_1_17_1","volume-title":"Vitron: A unified pixel-level vision llm for understanding, generating, segmenting, editing. arXiv preprint arXiv:2412.19806","author":"Fei Hao","year":"2024","unstructured":"Hao Fei, Shengqiong Wu, Hanwang Zhang, Tat-Seng Chua, and Shuicheng Yan. 2024. Vitron: A unified pixel-level vision llm for understanding, generating, segmenting, editing. arXiv preprint arXiv:2412.19806 (2024)."},{"key":"e_1_3_2_1_18_1","volume-title":"Ccedit: Creative and controllable video editing via diffusion models. arXiv preprint arXiv:2309.16496","author":"Feng Ruoyu","year":"2023","unstructured":"Ruoyu Feng, Wenming Weng, Yanhui Wang, Yuhui Yuan, Jianmin Bao, Chong Luo, Zhibo Chen, and Baining Guo. 2023. Ccedit: Creative and controllable video editing via diffusion models. arXiv preprint arXiv:2309.16496 (2023)."},{"key":"e_1_3_2_1_19_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Geyer Michal","year":"2024","unstructured":"Michal Geyer, Omer Bar-Tal, Shai Bagon, and Tali Dekel. 2024. Tokenflow: Consistent diffusion features for consistent video editing. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_20_1","volume-title":"Akbar Shah, Xi Yin, Devi Parikh, and Ishan Misra.","author":"Girdhar Rohit","year":"2023","unstructured":"Rohit Girdhar, Mannat Singh, Andrew Brown, Quentin Duval, Samaneh Azadi, Sai Saketh Rambhatla, Akbar Shah, Xi Yin, Devi Parikh, and Ishan Misra. 2023. Emu Video: Factorizing Text-to-Video Generation by Explicit Image Conditioning. arXiv preprint arXiv:2311.10709 (2023)."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00728"},{"key":"e_1_3_2_1_22_1","volume-title":"SparseCtrl: Adding Sparse Controls to Text-to-Video Diffusion Models. arXiv preprint arXiv:2311.16933","author":"Guo Yuwei","year":"2023","unstructured":"Yuwei Guo, Ceyuan Yang, Anyi Rao, Maneesh Agrawala, Dahua Lin, and Bo Dai. 2023. SparseCtrl: Adding Sparse Controls to Text-to-Video Diffusion Models. arXiv preprint arXiv:2311.16933 (2023)."},{"key":"e_1_3_2_1_23_1","volume-title":"GAIA: Zero-shot Talking Avatar Generation. In International Conference on Learning Representations (ICLR).","author":"He Tianyu","year":"2024","unstructured":"Tianyu He, Junliang Guo, Runyi Yu, Yuchi Wang, Jialiang Zhu, Kaikai An, Leyi Li, Xu Tan, Chunyu Wang, Han Hu, et al., 2024. GAIA: Zero-shot Talking Avatar Generation. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_24_1","volume-title":"Latent video diffusion models for high-fidelity video generation with arbitrary lengths. arXiv preprint arXiv:2211.13221","author":"He Yingqing","year":"2022","unstructured":"Yingqing He, Tianyu Yang, Yong Zhang, Ying Shan, and Qifeng Chen. 2022. Latent video diffusion models for high-fidelity video generation with arbitrary lengths. arXiv preprint arXiv:2211.13221 (2022)."},{"key":"e_1_3_2_1_25_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Hertz Amir","year":"2023","unstructured":"Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. 2023. Prompt-to-prompt image editing with cross attention control. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_26_1","unstructured":"Jonathan Ho William Chan Chitwan Saharia Jay Whang Ruiqi Gao Alexey Gritsenko Diederik P Kingma Ben Poole Mohammad Norouzi David J Fleet et al. 2022a. Imagen video: High definition video generation with diffusion models. arXiv preprint arXiv:2210.02303 (2022)."},{"key":"e_1_3_2_1_27_1","volume-title":"Denoising diffusion probabilistic models. Advances in neural information processing systems","author":"Ho Jonathan","year":"2020","unstructured":"Jonathan Ho, Ajay Jain, and Pieter Abbeel. 2020. Denoising diffusion probabilistic models. Advances in neural information processing systems, Vol. 33 (2020), 6840-6851."},{"key":"e_1_3_2_1_28_1","volume-title":"Video diffusion models. arXiv:2204.03458","author":"Ho Jonathan","year":"2022","unstructured":"Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. 2022b. Video diffusion models. arXiv:2204.03458 (2022)."},{"key":"e_1_3_2_1_29_1","volume-title":"Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868","author":"Hong Wenyi","year":"2022","unstructured":"Wenyi Hong, Ming Ding, Wendi Zheng, Xinghan Liu, and Jie Tang. 2022. Cogvideo: Large-scale pretraining for text-to-video generation via transformers. arXiv preprint arXiv:2205.15868 (2022)."},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3680600"},{"key":"e_1_3_2_1_31_1","volume-title":"Free-bloom: Zero-shot text-to-video generator with llm director and ldm animator. arXiv preprint arXiv:2309.14494","author":"Huang Hanzhuo","year":"2023","unstructured":"Hanzhuo Huang, Yufan Feng, Cheng Shi, Lan Xu, Jingyi Yu, and Sibei Yang. 2023. Free-bloom: Zero-shot text-to-video generator with llm director and ldm animator. arXiv preprint arXiv:2309.14494 (2023)."},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02060"},{"key":"e_1_3_2_1_33_1","volume-title":"Geon Yeong Park, and Jong Chul Ye","author":"Jeong Hyeonho","year":"2023","unstructured":"Hyeonho Jeong, Geon Yeong Park, and Jong Chul Ye. 2023. VMC: Video Motion Customization using Temporal Attention Adaption for Text-to-Video Diffusion Models. arXiv preprint arXiv:2312.00845 (2023)."},{"key":"e_1_3_2_1_34_1","volume-title":"Ground-A-Video: Zero-shot Grounded Video Editing using Text-to-image Diffusion Models. arXiv preprint arXiv:2310.01107","author":"Jeong Hyeonho","year":"2023","unstructured":"Hyeonho Jeong and Jong Chul Ye. 2023. Ground-A-Video: Zero-shot Grounded Video Editing using Text-to-image Diffusion Models. arXiv preprint arXiv:2310.01107 (2023)."},{"key":"e_1_3_2_1_35_1","volume-title":"Chen Change Loy, and Ziwei Liu","author":"Jiang Yuming","year":"2023","unstructured":"Yuming Jiang, Tianxing Wu, Shuai Yang, Chenyang Si, Dahua Lin, Yu Qiao, Chen Change Loy, and Ziwei Liu. 2023. VideoBooth: Diffusion-based Video Generation with Image Prompts. arXiv preprint arXiv:2312.00777 (2023)."},{"key":"e_1_3_2_1_36_1","volume-title":"Dreampose: Fashion image-to-video synthesis via stable diffusion. arXiv preprint arXiv:2304.06025","author":"Karras Johanna","year":"2023","unstructured":"Johanna Karras, Aleksander Holynski, Ting-Chun Wang, and Ira Kemelmacher-Shlizerman. 2023. Dreampose: Fashion image-to-video synthesis via stable diffusion. arXiv preprint arXiv:2304.06025 (2023)."},{"key":"e_1_3_2_1_37_1","volume-title":"Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439","author":"Khachatryan Levon","year":"2023","unstructured":"Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. 2023. Text2video-zero: Text-to-image diffusion models are zero-shot video generators. arXiv preprint arXiv:2303.13439 (2023)."},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"crossref","unstructured":"Alexander Kirillov Eric Mintun Nikhila Ravi Hanzi Mao Chloe Rolland Laura Gustafson Tete Xiao Spencer Whitehead Alexander C Berg Wan-Yen Lo et al. 2023. Segment anything. arXiv preprint arXiv:2304.02643 (2023).","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"e_1_3_2_1_39_1","volume-title":"Anyv2v: A plug-and-play framework for any video-to-video editing tasks. arXiv preprint arXiv:2403.14468","author":"Ku Max","year":"2024","unstructured":"Max Ku, Cong Wei, Weiming Ren, Huan Yang, and Wenhu Chen. 2024. Anyv2v: A plug-and-play framework for any video-to-video editing tasks. arXiv preprint arXiv:2403.14468 (2024)."},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3680718"},{"key":"e_1_3_2_1_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00715"},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00784"},{"key":"e_1_3_2_1_43_1","volume-title":"Video-p2p: Video editing with cross-attention control. arXiv:2303.04761","author":"Liu Shaoteng","year":"2023","unstructured":"Shaoteng Liu, Yuechen Zhang, Wenbo Li, Zhe Lin, and Jiaya Jia. 2023. Video-p2p: Video editing with cross-attention control. arXiv:2303.04761 (2023)."},{"key":"e_1_3_2_1_44_1","volume-title":"Customizing Motion in Text-to-Video Diffusion Models. arXiv preprint arXiv:2312.04966","author":"Materzynska Joanna","year":"2023","unstructured":"Joanna Materzynska, Josef Sivic, Eli Shechtman, Antonio Torralba, Richard Zhang, and Bryan Russell. 2023. Customizing Motion in Text-to-Video Diffusion Models. arXiv preprint arXiv:2312.04966 (2023)."},{"key":"e_1_3_2_1_45_1","volume-title":"SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations. In International Conference on Learning Representations.","author":"Meng Chenlin","year":"2022","unstructured":"Chenlin Meng, Yutong He, Yang Song, Jiaming Song, Jiajun Wu, Jun-Yan Zhu, and Stefano Ermon. 2022. SDEdit: Guided Image Synthesis and Editing with Stochastic Differential Equations. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_46_1","volume-title":"Yossi Matias, Yael Pritch, Yaniv Leviathan, and Yedid Hoshen.","author":"Molad Eyal","year":"2023","unstructured":"Eyal Molad, Eliahu Horwitz, Dani Valevski, Alex Rav Acha, Yossi Matias, Yael Pritch, Yaniv Leviathan, and Yedid Hoshen. 2023. Dreamix: Video diffusion models are general video editors. arXiv preprint arXiv:2302.01329 (2023)."},{"key":"e_1_3_2_1_47_1","volume-title":"ReVideo: Remake a Video with Motion and Content Control. arXiv preprint arXiv:2405.13865","author":"Mou Chong","year":"2024","unstructured":"Chong Mou, Mingdeng Cao, Xintao Wang, Zhaoyang Zhang, Ying Shan, and Jian Zhang. 2024. ReVideo: Remake a Video with Motion and Content Control. arXiv preprint arXiv:2405.13865 (2024)."},{"key":"e_1_3_2_1_48_1","volume-title":"Codef: Content deformation fields for temporally consistent video processing. arXiv preprint arXiv:2308.07926","author":"Ouyang Hao","year":"2023","unstructured":"Hao Ouyang, Qiuyu Wang, Yuxi Xiao, Qingyan Bai, Juntao Zhang, Kecheng Zheng, Xiaowei Zhou, Qifeng Chen, and Yujun Shen. 2023. Codef: Content deformation fields for temporally consistent video processing. arXiv preprint arXiv:2308.07926 (2023)."},{"key":"e_1_3_2_1_49_1","volume-title":"I2VEdit: First-Frame-Guided Video Editing via Image-to-Video Diffusion Models. arXiv preprint arXiv:2405.16537","author":"Ouyang Wenqi","year":"2024","unstructured":"Wenqi Ouyang, Yi Dong, Lei Yang, Jianlou Si, and Xingang Pan. 2024. I2VEdit: First-Frame-Guided Video Editing via Image-to-Video Diffusion Models. arXiv preprint arXiv:2405.16537 (2024)."},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01460"},{"key":"e_1_3_2_1_51_1","volume-title":"Freenoise: Tuning-free longer video diffusion via noise rescheduling. arXiv preprint arXiv:2310.15169","author":"Qiu Haonan","year":"2023","unstructured":"Haonan Qiu, Menghan Xia, Yong Zhang, Yingqing He, Xintao Wang, Ying Shan, and Ziwei Liu. 2023. Freenoise: Tuning-free longer video diffusion via noise rescheduling. arXiv preprint arXiv:2310.15169 (2023)."},{"key":"e_1_3_2_1_52_1","volume-title":"Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125","author":"Ramesh Aditya","year":"2022","unstructured":"Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. 2022. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, Vol. 1, 2 (2022), 3."},{"key":"e_1_3_2_1_53_1","volume-title":"European Conference on Computer Vision.","author":"Ren Yixuan","year":"2024","unstructured":"Yixuan Ren, Yang Zhou, Jimei Yang, Jing Shi, Difan Liu, Feng Liu, Mingi Kwon, and Abhinav Shrivastava. 2024. Customize-a-video: One-shot motion customization of text-to-video diffusion models. In European Conference on Computer Vision."},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"e_1_3_2_1_55_1","first-page":"36479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume":"35","author":"Saharia Chitwan","year":"2022","unstructured":"Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al., 2022. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, Vol. 35 (2022), 36479-36494.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.308"},{"key":"e_1_3_2_1_57_1","volume-title":"ACM SIGGRAPH 2024 Conference Papers. 1-11","author":"Qin Hongwei","year":"2024","unstructured":"Xiaoyu Shi, Zhaoyang Huang, Fu-Yun Wang, Weikang Bian, Dasong Li, Yi Zhang, Manyuan Zhang, Ka Chun Cheung, Simon See, Hongwei Qin, et al., 2024. Motion-i2v: Consistent and controllable image-to-video generation with explicit motion modeling. In ACM SIGGRAPH 2024 Conference Papers. 1-11."},{"key":"e_1_3_2_1_58_1","volume-title":"Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792","author":"Singer Uriel","year":"2022","unstructured":"Uriel Singer, Adam Polyak, Thomas Hayes, Xi Yin, Jie An, Songyang Zhang, Qiyuan Hu, et al., 2022. Make-a-video: Text-to-video generation without text-video data. arXiv preprint arXiv:2209.14792 (2022)."},{"key":"e_1_3_2_1_59_1","volume-title":"International Conference on Learning Representations (ICLR).","author":"Song Jiaming","year":"2021","unstructured":"Jiaming Song, Chenlin Meng, and Stefano Ermon. 2021. Denoising diffusion implicit models. In International Conference on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_60_1","volume-title":"Drag-A-Video: Non-rigid Video Editing with Point-based Interaction. arXiv preprint arXiv:2312.02936","author":"Teng Yao","year":"2023","unstructured":"Yao Teng, Enze Xie, Yue Wu, Haoyu Han, Zhenguo Li, and Xihui Liu. 2023. Drag-A-Video: Non-rigid Video Editing with Point-based Interaction. arXiv preprint arXiv:2312.02936 (2023)."},{"key":"e_1_3_2_1_61_1","volume-title":"MotionEditor","author":"Tu Shuyuan","year":"1883","unstructured":"Shuyuan Tu, Qi Dai, Zhi-Qi Cheng, Han Hu, Xintong Han, Zuxuan Wu, and Yu-Gang Jiang. 2023. MotionEditor: Editing Video Motion via Content-Aware Diffusion. arXiv preprint arXiv:2311.18830 (2023)."},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00191"},{"key":"e_1_3_2_1_63_1","volume-title":"Generating videos with scene dynamics. Advances in neural information processing systems","author":"Vondrick Carl","year":"2016","unstructured":"Carl Vondrick, Hamed Pirsiavash, and Antonio Torralba. 2016. Generating videos with scene dynamics. Advances in neural information processing systems, Vol. 29 (2016)."},{"key":"e_1_3_2_1_64_1","doi-asserted-by":"publisher","DOI":"10.1145\/3640543.3645143"},{"key":"e_1_3_2_1_65_1","volume-title":"DreamVideo: High-Fidelity Image-to-Video Generation with Image Retention and Text Guidance. arXiv preprint arXiv:2312.03018","author":"Wang Cong","year":"2023","unstructured":"Cong Wang, Jiaxi Gu, Panwen Hu, Songcen Xu, Hang Xu, and Xiaodan Liang. 2023b. DreamVideo: High-Fidelity Image-to-Video Generation with Image Retention and Text Guidance. arXiv preprint arXiv:2312.03018 (2023)."},{"key":"e_1_3_2_1_66_1","volume-title":"COVE: Unleashing the Diffusion Feature Correspondence for Consistent Video Editing. arXiv preprint arXiv:2406.08850","author":"Wang Jiangshan","year":"2024","unstructured":"Jiangshan Wang, Yue Ma, Jiayi Guo, Yicheng Xiao, Gao Huang, and Xiu Li. 2024b. COVE: Unleashing the Diffusion Feature Correspondence for Consistent Video Editing. arXiv preprint arXiv:2406.08850 (2024)."},{"key":"e_1_3_2_1_67_1","volume-title":"Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571","author":"Wang Jiuniu","year":"2023","unstructured":"Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. 2023d. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571 (2023)."},{"key":"e_1_3_2_1_68_1","volume-title":"Advances in Neural Information Processing Systems","volume":"32","author":"Wang Ting-Chun","year":"2019","unstructured":"Ting-Chun Wang, Ming-Yu Liu, Andrew Tao, Guilin Liu, Bryan Catanzaro, and Jan Kautz. 2019. Few-shot Video-to-Video Synthesis. Advances in Neural Information Processing Systems, Vol. 32 (2019)."},{"key":"e_1_3_2_1_69_1","volume-title":"Zero-shot video editing using off-the-shelf image diffusion models. arXiv preprint arXiv:2303.17599","author":"Wang Wen","year":"2023","unstructured":"Wen Wang, Yan Jiang, Kangyang Xie, Zide Liu, Hao Chen, Yue Cao, Xinlong Wang, and Chunhua Shen. 2023c. Zero-shot video editing using off-the-shelf image diffusion models. arXiv preprint arXiv:2303.17599 (2023)."},{"key":"e_1_3_2_1_70_1","volume-title":"Lavie: High-quality video generation with cascaded latent diffusion models. arXiv preprint arXiv:2309.15103","author":"Wang Yaohui","year":"2023","unstructured":"Yaohui Wang, Xinyuan Chen, Xin Ma, Shangchen Zhou, Ziqi Huang, Yi Wang, Ceyuan Yang, Yinan He, Jiashuo Yu, Peiqing Yang, et al., 2023a. Lavie: High-quality video generation with cascaded latent diffusion models. arXiv preprint arXiv:2309.15103 (2023)."},{"key":"e_1_3_2_1_71_1","volume-title":"2023 e. MotionCtrl: A Unified and Flexible Motion Controller for Video Generation. arXiv preprint arXiv:2312.03641","author":"Wang Zhouxia","year":"2023","unstructured":"Zhouxia Wang, Ziyang Yuan, Xintao Wang, Tianshui Chen, Menghan Xia, Ping Luo, and Ying Shan. 2023 e. MotionCtrl: A Unified and Flexible Motion Controller for Video Generation. arXiv preprint arXiv:2312.03641 (2023)."},{"key":"e_1_3_2_1_72_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00789"},{"key":"e_1_3_2_1_73_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00701"},{"key":"e_1_3_2_1_74_1","volume-title":"CVPR 2023 Text Guided Video Editing Competition. arXiv:2310","author":"Wu Jay Zhangjie","year":"2023","unstructured":"Jay Zhangjie Wu, Xiuyu Li, Difei Gao, Zhen Dong, Jinbin Bai, Aishani Singh, Xiaoyu Xiang, Youzeng Li, Zuwei Huang, Yuanxi Sun, Rui He, Feng Hu, Junhua Hu, Hai Huang, Hanyu Zhu, Xu Cheng, Jie Tang, Mike Zheng Shou, Kurt Keutzer, and Forrest Iandola. 2023b. CVPR 2023 Text Guided Video Editing Competition. arXiv:2310.16003 [cs.CV]"},{"key":"e_1_3_2_1_75_1","unstructured":"Jinbo Xing Menghan Xia Yuxin Liu Yuechen Zhang Yong Zhang Yingqing He Hanyuan Liu Haoxin Chen Xiaodong Cun Xintao Wang et al. 2023. Make-Your-Video: Customized Video Generation Using Textual and Structural Guidance. arXiv preprint arXiv:2306.00943 (2023)."},{"key":"e_1_3_2_1_76_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00748"},{"key":"e_1_3_2_1_77_1","first-page":"77132","article-title":"Hoi-swap: Swapping objects in videos with hand-object interaction awareness","volume":"37","author":"Xue Zihui Sherry","year":"2024","unstructured":"Zihui Sherry Xue, Romy Luo, Changan Chen, and Kristen Grauman. 2024. Hoi-swap: Swapping objects in videos with hand-object interaction awareness. Advances in Neural Information Processing Systems, Vol. 37 (2024), 77132-77164.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_78_1","volume-title":"Motion-Conditioned Image Animation for Video Editing. arXiv preprint arXiv:2311.18827","author":"Yan Wilson","year":"2023","unstructured":"Wilson Yan, Andrew Brown, Pieter Abbeel, Rohit Girdhar, and Samaneh Azadi. 2023. Motion-Conditioned Image Animation for Video Editing. arXiv preprint arXiv:2311.18827 (2023)."},{"key":"e_1_3_2_1_79_1","volume-title":"Videogpt: Video generation using vq-vae and transformers. arXiv:2104.10157","author":"Yan Wilson","year":"2021","unstructured":"Wilson Yan, Yunzhi Zhang, Pieter Abbeel, and Aravind Srinivas. 2021. Videogpt: Video generation using vq-vae and transformers. arXiv:2104.10157 (2021)."},{"key":"e_1_3_2_1_80_1","volume-title":"Rerender A Video: Zero-Shot Text-Guided Video-to-Video Translation. In ACM SIGGRAPH Asia 2023 Conference Proceedings.","author":"Yang Shuai","year":"2023","unstructured":"Shuai Yang, Yifan Zhou, Ziwei Liu, and Chen Change Loy. 2023. Rerender A Video: Zero-Shot Text-Guided Video-to-Video Translation. In ACM SIGGRAPH Asia 2023 Conference Proceedings."},{"key":"e_1_3_2_1_81_1","volume-title":"Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072","author":"Yang Zhuoyi","year":"2024","unstructured":"Zhuoyi Yang, Jiayan Teng, Wendi Zheng, Ming Ding, Shiyu Huang, Jiazheng Xu, Yuanming Yang, Wenyi Hong, Xiaohan Zhang, Guanyu Feng, et al., 2024. Cogvideox: Text-to-video diffusion models with an expert transformer. arXiv preprint arXiv:2408.06072 (2024)."},{"key":"e_1_3_2_1_82_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01008"},{"key":"e_1_3_2_1_83_1","volume-title":"Caiming Xiong, and Doyen Sahoo.","author":"Zhang David Junhao","year":"2024","unstructured":"David Junhao Zhang, Dongxu Li, Hung Le, Mike Zheng Shou, Caiming Xiong, and Doyen Sahoo. 2024. Moonshot: Towards controllable video generation and editing with multimodal conditions. arXiv preprint arXiv:2401.01827 (2024)."},{"key":"e_1_3_2_1_84_1","volume-title":"Jia-Wei Liu, Rui Zhao, Lingmin Ran, Yuchao Gu, Difei Gao, and Mike Zheng Shou.","author":"Zhang David Junhao","year":"2023","unstructured":"David Junhao Zhang, Jay Zhangjie Wu, Jia-Wei Liu, Rui Zhao, Lingmin Ran, Yuchao Gu, Difei Gao, and Mike Zheng Shou. 2023d. Show-1: Marrying pixel and latent diffusion models for text-to-video generation. arXiv:2309.15818 (2023)."},{"key":"e_1_3_2_1_85_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"e_1_3_2_1_86_1","volume-title":"MotionCrafter: One-Shot Motion Customization of Diffusion Models. arXiv preprint arXiv:2312.05288","author":"Zhang Yuxin","year":"2023","unstructured":"Yuxin Zhang, Fan Tang, Nisha Huang, Haibin Huang, Chongyang Ma, Weiming Dong, and Changsheng Xu. 2023b. MotionCrafter: One-Shot Motion Customization of Diffusion Models. arXiv preprint arXiv:2312.05288 (2023)."},{"key":"e_1_3_2_1_87_1","volume-title":"ControlVideo: Training-free Controllable Text-to-Video Generation. arXiv preprint arXiv:2305.13077","author":"Zhang Yabo","year":"2023","unstructured":"Yabo Zhang, Yuxiang Wei, Dongsheng Jiang, Xiaopeng Zhang, Wangmeng Zuo, and Qi Tian. 2023c. ControlVideo: Training-free Controllable Text-to-Video Generation. arXiv preprint arXiv:2305.13077 (2023)."}],"event":{"name":"MM '25: The 33rd ACM International Conference on Multimedia","location":"Dublin Ireland","acronym":"MM '25","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 33rd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3746027.3755462","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,9]],"date-time":"2025-12-09T19:21:37Z","timestamp":1765308097000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3746027.3755462"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,27]]},"references-count":87,"alternative-id":["10.1145\/3746027.3755462","10.1145\/3746027"],"URL":"https:\/\/doi.org\/10.1145\/3746027.3755462","relation":{},"subject":[],"published":{"date-parts":[[2025,10,27]]},"assertion":[{"value":"2025-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}