{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T07:09:25Z","timestamp":1773299365259,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":68,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T00:00:00Z","timestamp":1730073600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["No. 62032006, 62172103"],"award-info":[{"award-number":["No. 62032006, 62172103"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,10,28]]},"DOI":"10.1145\/3664647.3681506","type":"proceedings-article","created":{"date-parts":[[2024,10,26]],"date-time":"2024-10-26T06:59:27Z","timestamp":1729925967000},"page":"7075-7084","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":4,"title":["FreeEnhance: Tuning-Free Image Enhancement via Content-Consistent Noising-and-Denoising Process"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-2223-2411","authenticated-orcid":false,"given":"Yang","family":"Luo","sequence":"first","affiliation":[{"name":"School of Computer Science, Fudan University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1940-6137","authenticated-orcid":false,"given":"Yiheng","family":"Zhang","sequence":"additional","affiliation":[{"name":"HiDream.ai Inc., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7485-9198","authenticated-orcid":false,"given":"Zhaofan","family":"Qiu","sequence":"additional","affiliation":[{"name":"HiDream.ai Inc., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7587-101X","authenticated-orcid":false,"given":"Ting","family":"Yao","sequence":"additional","affiliation":[{"name":"HiDream.ai Inc., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1543-6889","authenticated-orcid":false,"given":"Zhineng","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Computer Science, Fudan University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1907-8567","authenticated-orcid":false,"given":"Yu-Gang","family":"Jiang","sequence":"additional","affiliation":[{"name":"School of Computer Science, Fudan University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5990-7307","authenticated-orcid":false,"given":"Tao","family":"Mei","sequence":"additional","affiliation":[{"name":"HiDream.ai Inc., Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2024,10,28]]},"reference":[{"key":"e_1_3_2_2_1_1","unstructured":"2024. Dreamshaper xl. https:\/\/civitai.com\/models\/112902\/dreamshaper-xl"},{"key":"e_1_3_2_2_2_1","unstructured":"2024. Fooocus. https:\/\/github.com\/lllyasviel\/Fooocus"},{"key":"e_1_3_2_2_3_1","volume-title":"Kyong Hwan Jin, and Seungryong Kim.","author":"Ahn Donghoon","year":"2024","unstructured":"Donghoon Ahn, Hyoungwon Cho, Jaewon Min, Wooseok Jang, Jungwoo Kim, SeonHwa Kim, Hyun Hee Park, Kyong Hwan Jin, and Seungryong Kim. 2024. Self-Rectifying Diffusion Sampling with Perturbed-Attention Guidance. arXiv preprint arXiv:2403.17377 (2024)."},{"key":"e_1_3_2_2_4_1","volume-title":"Score Distillation Sampling with Learned Manifold Corrective. arXiv preprint arXiv:2401.05293","author":"Alldieck Thiemo","year":"2024","unstructured":"Thiemo Alldieck, Nikos Kolotouros, and Cristian Sminchisescu. 2024. Score Distillation Sampling with Learned Manifold Corrective. arXiv preprint arXiv:2401.05293 (2024)."},{"key":"e_1_3_2_2_5_1","unstructured":"Jacob Austin Daniel D Johnson Jonathan Ho Daniel Tarlow and Rianne Van Den Berg. 2021. Structured denoising diffusion models in discrete state-spaces. In NeurIPS."},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"crossref","unstructured":"Arpit Bansal Hong-Min Chu Avi Schwarzschild Soumyadip Sengupta Micah Goldblum Jonas Geiping and Tom Goldstein. 2024. Universal Guidance for Diffusion Models. In ICLR.","DOI":"10.1109\/CVPRW59228.2023.00091"},{"key":"e_1_3_2_2_7_1","unstructured":"Omer Bar-Tal Lior Yariv Yaron Lipman and Tali Dekel. 2023. MultiDiffusion: Fusing Diffusion Paths for Controlled Image Generation. In ICML."},{"key":"e_1_3_2_2_8_1","unstructured":"James Betker Gabriel Goh Improving Image Generation with Better Captions. https:\/\/api.semanticscholar.org\/CorpusID:264403242"},{"key":"e_1_3_2_2_9_1","unstructured":"Andreas Blattmann Tim Dockhorn Sumith Kulal Daniel Mendelevitch Maciej Kilian Dominik Lorenz Yam Levi Zion English Vikram Voleti Adam Letts et al. 2023. Stable video diffusion: Scaling latent video diffusion models to large datasets. arXiv preprint arXiv:2311.15127 (2023)."},{"key":"e_1_3_2_2_10_1","volume-title":"Controlstyle: Text-driven stylized image generation using diffusion priors. In ACM MM. 7540--7548.","author":"Chen Jingwen","year":"2023","unstructured":"Jingwen Chen, Yingwei Pan, Ting Yao, and Tao Mei. 2023. Controlstyle: Text-driven stylized image generation using diffusion priors. In ACM MM. 7540--7548."},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"crossref","unstructured":"Yang Chen Yingwei Pan Yehao Li Ting Yao and Tao Mei. 2023. Control3d: Towards controllable text-to-3d generation. In ACM MM.","DOI":"10.1145\/3581783.3612489"},{"key":"e_1_3_2_2_12_1","doi-asserted-by":"crossref","unstructured":"Yang Chen Yingwei Pan Haibo Yang Ting Yao and Tao Mei. 2024. Vp3d: Unleashing 2d visual prompt for text-to-3d generation. In CVPR.","DOI":"10.1109\/CVPR52733.2024.00468"},{"key":"e_1_3_2_2_13_1","unstructured":"Hyungjin Chung Jeongsol Kim Michael T Mccann Marc L Klasky and Jong Chul Ye. 2023. Diffusion posterior sampling for general noisy inverse problems. In ICLR."},{"key":"e_1_3_2_2_14_1","unstructured":"Hyungjin Chung Byeongsu Sim Dohoon Ryu and Jong Chul Ye. 2022. Improving diffusion models for inverse problems using manifold constraints. In NeurIPS."},{"key":"e_1_3_2_2_15_1","volume":"202","author":"Clark Kevin","unstructured":"Kevin Clark, Paul Vicol, Kevin Swersky, and David J Fleet. 2024. Directly finetuning diffusion models on differentiable rewards. In ICLR.","journal-title":"David J Fleet."},{"key":"e_1_3_2_2_16_1","unstructured":"Chengliang Dai Shuo Wang Yuanhan Mo Kaichen Zhou Elsa Angelini Yike Guo and Wenjia Bai. 2020. Suggestive annotation of brain tumour images with gradient-guided sampling. In MICCAI."},{"key":"e_1_3_2_2_17_1","volume-title":"Emu: Enhancing image generation models using photogenic needles in a haystack. arXiv preprint arXiv:2309.15807","author":"Dai Xiaoliang","year":"2023","unstructured":"Xiaoliang Dai, Ji Hou, Chih-Yao Ma, Sam Tsai, Jialiang Wang, Rui Wang, Peizhao Zhang, Simon Vandenhende, Xiaofang Wang, Abhimanyu Dubey, et al. 2023. Emu: Enhancing image generation models using photogenic needles in a haystack. arXiv preprint arXiv:2309.15807 (2023)."},{"key":"e_1_3_2_2_18_1","unstructured":"Prafulla Dhariwal and Alexander Nichol. 2021. Diffusion models beat gans on image synthesis. In NeurIPS."},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"crossref","unstructured":"Wenkai Dong Song Xue Xiaoyue Duan and Shumin Han. 2023. Prompt tuning inversion for text-driven image editing using diffusion models. In ICCV.","DOI":"10.1109\/ICCV51070.2023.00683"},{"key":"e_1_3_2_2_20_1","unstructured":"Ruoyi Du Dongliang Chang Timothy Hospedales Yi-Zhe Song and Zhanyu Ma. 2024. DemoFusion: Democratising High-Resolution Image Generation With No . In CVPR."},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"crossref","unstructured":"Deepanway Ghosal Navonil Majumder Ambuj Mehrish and Soujanya Poria. 2023. Text-to-Audio Generation using Instruction Guided Latent Diffusion Model. In ACM MM.","DOI":"10.1145\/3581783.3612348"},{"key":"e_1_3_2_2_22_1","unstructured":"Shuyang Gu Dong Chen Jianmin Bao FangWen Bo Zhang Dongdong Chen Lu Yuan and Baining Guo. 2022. Vector quantized diffusion model for text-to-image synthesis. In CVPR."},{"key":"e_1_3_2_2_23_1","unstructured":"Yingqing He Shaoshu Yang Haoxin Chen Xiaodong Cun Menghan Xia Yong Zhang Xintao Wang Ran He Qifeng Chen and Ying Shan. 2024. ScaleCrafter: Tuning-free Higher-Resolution Visual Generation with Diffusion Models. In ICLR."},{"key":"e_1_3_2_2_24_1","unstructured":"Jonathan Ho Ajay Jain and Pieter Abbeel. 2020. Denoising diffusion probabilistic models. In NeurIPS."},{"key":"e_1_3_2_2_25_1","unstructured":"Jonathan Ho and Tim Salimans. 2021. Classifier-free diffusion guidance. In NeurIPS."},{"key":"e_1_3_2_2_26_1","volume":"202","author":"Ho Jonathan","unstructured":"Jonathan Ho, Tim Salimans, Alexey Gritsenko, William Chan, Mohammad Norouzi, and David J Fleet. 2022. Video diffusion models. In NeurIPS.","journal-title":"David J Fleet."},{"key":"e_1_3_2_2_27_1","doi-asserted-by":"crossref","unstructured":"Susung Hong Gyuseong Lee Wooseok Jang and Seungryong Kim. 2023. Improving sample quality of diffusion models using self-attention guidance. In ICCV.","DOI":"10.1109\/ICCV51070.2023.00686"},{"key":"e_1_3_2_2_28_1","volume-title":"Imagic: Text-based real image editing with diffusion models. In CVPR.","author":"Kawar Bahjat","year":"2023","unstructured":"Bahjat Kawar, Shiran Zada, Oran Lang, Omer Tov, Huiwen Chang, Tali Dekel, Inbar Mosseri, and Michal Irani. 2023. Imagic: Text-based real image editing with diffusion models. In CVPR."},{"key":"e_1_3_2_2_29_1","volume-title":"Musiq: Multi-scale image quality transformer. In ICCV.","author":"Ke Junjie","year":"2021","unstructured":"Junjie Ke, QifeiWang, YilinWang, Peyman Milanfar, and Feng Yang. 2021. Musiq: Multi-scale image quality transformer. In ICCV."},{"key":"e_1_3_2_2_30_1","volume-title":"Evaluating Text-to-Visual Generation with Image-to-Text Generation. arXiv preprint arXiv:2404.01291","author":"Lin Zhiqiu","year":"2024","unstructured":"Zhiqiu Lin, Deepak Pathak, Baiqi Li, Jiayao Li, Xide Xia, Graham Neubig, Pengchuan Zhang, and Deva Ramanan. 2024. Evaluating Text-to-Visual Generation with Image-to-Text Generation. arXiv preprint arXiv:2404.01291 (2024)."},{"key":"e_1_3_2_2_31_1","volume-title":"European Conference on Computer Vision. Springer, 423--439","author":"Liu Nan","year":"2022","unstructured":"Nan Liu, Shuang Li, Yilun Du, Antonio Torralba, and Joshua B Tenenbaum. 2022. Compositional visual generation with composable diffusion models. In European Conference on Computer Vision. Springer, 423--439."},{"key":"e_1_3_2_2_32_1","volume-title":"Videodrafter: Content-consistent multi-scene video generation with llm. arXiv preprint arXiv:2401.01256","author":"Long Fuchen","year":"2024","unstructured":"Fuchen Long, Zhaofan Qiu, Ting Yao, and Tao Mei. 2024. Videodrafter: Content-consistent multi-scene video generation with llm. arXiv preprint arXiv:2401.01256 (2024)."},{"key":"e_1_3_2_2_33_1","volume-title":"Repaint: Inpainting using denoising diffusion probabilistic models. In CVPR.","author":"Lugmayr Andreas","year":"2022","unstructured":"Andreas Lugmayr, Martin Danelljan, Andres Romero, Fisher Yu, Radu Timofte, and Luc Van Gool. 2022. Repaint: Inpainting using denoising diffusion probabilistic models. In CVPR."},{"key":"e_1_3_2_2_34_1","volume-title":"Readout Guidance: Learning Control from Diffusion Features. In CVPR.","author":"Luo Grace","year":"2024","unstructured":"Grace Luo, Trevor Darrell, Oliver Wang, Dan B Goldman, and Aleksander Holynski. 2024. Readout Guidance: Learning Control from Diffusion Features. In CVPR."},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"crossref","unstructured":"Henri Ma\u00eetre. 2015. Image Quality.","DOI":"10.1002\/9781119238447.ch6"},{"key":"e_1_3_2_2_36_1","volume-title":"Sdedit: Image synthesis and editing with stochastic differential equations. In ICLR.","author":"Meng Chenlin","year":"2022","unstructured":"Chenlin Meng, Yang Song, Jiaming Song, Jiajun Wu, Jun-Yan Zhu, and Stefano Ermon. 2022. Sdedit: Image synthesis and editing with stochastic differential equations. In ICLR."},{"key":"e_1_3_2_2_37_1","doi-asserted-by":"crossref","unstructured":"Ron Mokady Amir Hertz Kfir Aberman Yael Pritch and Daniel Cohen-Or. 2023. Null-text inversion for editing real images using guided diffusion models. In CVPR.","DOI":"10.1109\/CVPR52729.2023.00585"},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"crossref","unstructured":"Chong Mou Xintao Wang Liangbin Xie Yanze Wu Jian Zhang Zhongang Qi Ying Shan and Xiaohu Qie. 2023. T2I-Adapter: Learning Adapters to Dig out More Controllable Ability for Text-to-Image Diffusion Models. arXiv:2302.08453 [cs.CV]","DOI":"10.1609\/aaai.v38i5.28226"},{"key":"e_1_3_2_2_39_1","volume-title":"Glide: Towards photorealistic image generation and editing with text-guided diffusion models.","author":"Nichol Alex","year":"2021","unstructured":"Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. 2021. Glide: Towards photorealistic image generation and editing with text-guided diffusion models. (2021)."},{"key":"e_1_3_2_2_40_1","unstructured":"Alexander Quinn Nichol and Prafulla Dhariwal. 2021. Improved denoising diffusion probabilistic models. In ICML."},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"crossref","unstructured":"William Peebles and Saining Xie. 2023. Scalable diffusion models with transformers. In ICCV.","DOI":"10.1109\/ICCV51070.2023.00387"},{"key":"e_1_3_2_2_42_1","volume-title":"SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis. In ICLR.","author":"Podell Dustin","year":"2024","unstructured":"Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas M\u00fcller, Joe Penna, and Robin Rombach. 2024. SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis. In ICLR."},{"key":"e_1_3_2_2_43_1","volume-title":"Dreamfusion: Text-to-3d using 2d diffusion. In ICLR.","author":"Poole Ben","year":"2023","unstructured":"Ben Poole, Ajay Jain, Jonathan T Barron, and Ben Mildenhall. 2023. Dreamfusion: Text-to-3d using 2d diffusion. In ICLR."},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"crossref","unstructured":"Yurui Qian Qi Cai Yingwei Pan Yehao Li Ting Yao Qibin Sun and Tao Mei. 2024. Boosting Diffusion Models with Moving Average Sampling in Frequency Domain. In CVPR.","DOI":"10.1109\/CVPR52733.2024.00851"},{"key":"e_1_3_2_2_45_1","volume-title":"Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al.","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021. Learning transferable visual models from natural language supervision. In ICML."},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"crossref","unstructured":"Robin Rombach Andreas Blattmann Dominik Lorenz Patrick Esser and Bj\u00f6rn Ommer. 2022. High-resolution image synthesis with latent diffusion models. In CVPR.","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"e_1_3_2_2_47_1","volume-title":"Burcu Karagol Ayan, Tim Salimans, et al.","author":"Saharia Chitwan","year":"2022","unstructured":"Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. 2022. Photorealistic text-to-image diffusion models with deep language understanding. In NeurIPS."},{"key":"e_1_3_2_2_48_1","doi-asserted-by":"crossref","unstructured":"Cem Sazara. 2023. Diffusion Models in Generative AI. In ACM MM.","DOI":"10.1145\/3581783.3613857"},{"key":"e_1_3_2_2_49_1","unstructured":"Christoph Schuhmann Romain Beaumont Richard Vencu Cade Gordon Ross Wightman Mehdi Cherti Theo Coombes Aarush Katta Clayton Mullis Mitchell Wortsman Patrick Schramowski Srivatsa Kundurthy Katherine Crowson Ludwig Schmidt Robert Kaczmarczyk and Jenia Jitsev. 2022. LAION-5B: An open large-scale dataset for training next generation image-text models. In NeurIPS."},{"key":"e_1_3_2_2_50_1","unstructured":"Yan Shu Weichao Zeng Zhenhang Li Fangmin Zhao and Yu Zhou. 2024. Visual Text Meets Low-level Vision: A Comprehensive Survey on Visual Text Processing. arXiv:2402.03082 [cs.CV] https:\/\/arxiv.org\/abs\/2402.03082"},{"key":"e_1_3_2_2_51_1","volume-title":"Freeu: Free lunch in diffusion u-net. In CVPR.","author":"Si Chenyang","year":"2024","unstructured":"Chenyang Si, Ziqi Huang, Yuming Jiang, and Ziwei Liu. 2024. Freeu: Free lunch in diffusion u-net. In CVPR."},{"key":"e_1_3_2_2_52_1","unstructured":"Jascha Sohl-Dickstein Eric A.Weiss Niru Maheswaranathan and Surya Ganguli. 2015. Deep Unsupervised Learning using Nonequilibrium Thermodynamics."},{"key":"e_1_3_2_2_53_1","unstructured":"Jiaming Song Chenlin Meng and Stefano Ermon. 2021. Denoising Diffusion Implicit Models. In ICLR."},{"key":"e_1_3_2_2_54_1","unstructured":"Yang Song Jascha Sohl-Dickstein Diederik P Kingma Abhishek Kumar Stefano Ermon and Ben Poole. 2020. Score-based generative modeling through stochastic differential equations. In ICLR."},{"key":"e_1_3_2_2_55_1","volume-title":"Diffusers: State-of-the-art diffusion models. https:\/\/github.com\/huggingface\/diffusers.","author":"von Platen Patrick","year":"2022","unstructured":"Patrick von Platen, Suraj Patil, Anton Lozhkov, Pedro Cuenca, Nathan Lambert, Kashif Rasul, Mishig Davaadorj, Dhruv Nair, Sayak Paul, William Berman, Yiyi Xu, Steven Liu, and Thomas Wolf. 2022. Diffusers: State-of-the-art diffusion models. https:\/\/github.com\/huggingface\/diffusers."},{"key":"e_1_3_2_2_56_1","volume-title":"Kelvin CK Chan, and Chen Change Loy","author":"Wang Jianyi","year":"2023","unstructured":"Jianyi Wang, Kelvin CK Chan, and Chen Change Loy. 2023. Exploring clip for assessing the look and feel of images. In AAAI."},{"key":"e_1_3_2_2_57_1","doi-asserted-by":"crossref","unstructured":"Zhixin Wang Xiaoyun Zhang Ziying Zhang Huangjie Zheng Mingyuan Zhou Ya Zhang and Yanfeng Wang. 2023. DR2: Diffusion-based Robust Degradation Remover for Blind Face Restoration. In CVPR.","DOI":"10.1109\/CVPR52729.2023.00170"},{"key":"e_1_3_2_2_58_1","volume-title":"Stylediffusion: Controllable disentangled style transfer via diffusion models. In ICCV.","author":"Wang Zhizhong","year":"2023","unstructured":"Zhizhong Wang, Lei Zhao, and Wei Xing. 2023. Stylediffusion: Controllable disentangled style transfer via diffusion models. In ICCV."},{"key":"e_1_3_2_2_59_1","volume-title":"Contrastive Prompts Improve Disentanglement in Text-to-Image Diffusion Models. arXiv preprint arXiv:2402.13490","author":"Wu Chen","year":"2024","unstructured":"Chen Wu and Fernando De la Torre. 2024. Contrastive Prompts Improve Disentanglement in Text-to-Image Diffusion Models. arXiv preprint arXiv:2402.13490 (2024)."},{"key":"e_1_3_2_2_60_1","volume-title":"Human preference score v2: A solid benchmark for evaluating human preferences of text-to-image synthesis. arXiv preprint arXiv:2306.09341","author":"Wu Xiaoshi","year":"2023","unstructured":"Xiaoshi Wu, Yiming Hao, Keqiang Sun, Yixiong Chen, Feng Zhu, Rui Zhao, and Hongsheng Li. 2023. Human preference score v2: A solid benchmark for evaluating human preferences of text-to-image synthesis. arXiv preprint arXiv:2306.09341 (2023)."},{"key":"e_1_3_2_2_61_1","volume-title":"Smartbrush: Text and shape guided object inpainting with diffusion model. In CVPR.","author":"Xie Shaoan","year":"2023","unstructured":"Shaoan Xie, Zhifei Zhang, Zhe Lin, Tobias Hinz, and Kun Zhang. 2023. Smartbrush: Text and shape guided object inpainting with diffusion model. In CVPR."},{"key":"e_1_3_2_2_62_1","volume-title":"A Survey on Video Diffusion Models. ArXiv abs\/2310.10647","author":"Xing Zhen","year":"2023","unstructured":"Zhen Xing, Qijun Feng, Haoran Chen, Qi Dai, Hang-Rui Hu, Hang Xu, Zuxuan Wu, and Yu-Gang Jiang. 2023. A Survey on Video Diffusion Models. ArXiv abs\/2310.10647 (2023)."},{"key":"e_1_3_2_2_63_1","doi-asserted-by":"crossref","unstructured":"Haibo Yang Yang Chen Yingwei Pan Ting Yao Zhineng Chen and Tao Mei. 2023. 3dstyle-diffusion: Pursuing fine-grained text-driven 3d stylization with 2d diffusion models. In ACM MM. 6860--6868.","DOI":"10.1145\/3581783.3612363"},{"key":"e_1_3_2_2_64_1","volume-title":"Maniqa: Multi-dimension attention network for no-reference image quality assessment. In CVPR.","author":"Yang Sidi","year":"2022","unstructured":"Sidi Yang, Tianhe Wu, Shuwei Shi, Shanshan Lao, Yuan Gong, Mingdeng Cao, Jiahao Wang, and Yujiu Yang. 2022. Maniqa: Multi-dimension attention network for no-reference image quality assessment. In CVPR."},{"key":"e_1_3_2_2_65_1","doi-asserted-by":"crossref","unstructured":"Lvmin Zhang Anyi Rao and Maneesh Agrawala. 2023. Adding conditional control to text-to-image diffusion models. In ICCV.","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"e_1_3_2_2_66_1","doi-asserted-by":"crossref","unstructured":"Yuxin Zhang Nisha Huang Fan Tang Haibin Huang Chongyang Ma Weiming Dong and Changsheng Xu. 2023. Inversion-based style transfer with diffusion models. In CVPR.","DOI":"10.1109\/CVPR52729.2023.00978"},{"key":"e_1_3_2_2_67_1","volume-title":"TRIP: Temporal Residual Learning with Image Noise Prior for Image-to-Video Diffusion Models. In CVPR.","author":"Zhang Zhongwei","year":"2024","unstructured":"Zhongwei Zhang, Fuchen Long, Yingwei Pan, Zhaofan Qiu, Ting Yao, Yang Cao, and Tao Mei. 2024. TRIP: Temporal Residual Learning with Image Noise Prior for Image-to-Video Diffusion Models. In CVPR."},{"key":"e_1_3_2_2_68_1","volume-title":"Sd-dit: Unleashing the power of self-supervised discrimination in diffusion transformer. In CVPR.","author":"Zhu Rui","year":"2024","unstructured":"Rui Zhu, Yingwei Pan, Yehao Li, Ting Yao, Zhenglong Sun, Tao Mei, and Chang Wen Chen. 2024. Sd-dit: Unleashing the power of self-supervised discrimination in diffusion transformer. In CVPR."}],"event":{"name":"MM '24: The 32nd ACM International Conference on Multimedia","location":"Melbourne VIC Australia","acronym":"MM '24","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 32nd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3681506","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3664647.3681506","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T00:57:48Z","timestamp":1750294668000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3681506"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,28]]},"references-count":68,"alternative-id":["10.1145\/3664647.3681506","10.1145\/3664647"],"URL":"https:\/\/doi.org\/10.1145\/3664647.3681506","relation":{},"subject":[],"published":{"date-parts":[[2024,10,28]]},"assertion":[{"value":"2024-10-28","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}