{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,20]],"date-time":"2026-02-20T18:40:04Z","timestamp":1771612804259,"version":"3.50.1"},"reference-count":96,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,6,16]],"date-time":"2024-06-16T00:00:00Z","timestamp":1718496000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,16]],"date-time":"2024-06-16T00:00:00Z","timestamp":1718496000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"NSF","doi-asserted-by":"publisher","award":["IIS-239076"],"award-info":[{"award-number":["IIS-239076"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,6,16]]},"DOI":"10.1109\/cvpr52733.2024.00695","type":"proceedings-article","created":{"date-parts":[[2024,9,16]],"date-time":"2024-09-16T17:34:53Z","timestamp":1726508093000},"page":"7277-7288","source":"Crossref","is-referenced-by-count":19,"title":["On the Content Bias in Fr\u00e9chet Video Distance"],"prefix":"10.1109","author":[{"given":"Songwei","family":"Ge","sequence":"first","affiliation":[{"name":"University of Maryland, College Park"}]},{"given":"Aniruddha","family":"Mahapatra","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University"}]},{"given":"Gaurav","family":"Parmar","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University"}]},{"given":"Jun-Yan","family":"Zhu","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University"}]},{"given":"Jia-Bin","family":"Huang","sequence":"additional","affiliation":[{"name":"University of Maryland, College Park"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19790-1_2"},{"key":"ref2","author":"An","year":"2023","journal-title":"Latent-shift: Latent diffu-sion with temporal shift for efficient text-to-video generation."},{"key":"ref3","article-title":"Stochastic variational video prediction","author":"Babaeizadeh","year":"2018","journal-title":"ICLR"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref5","author":"Bar-Tal","year":"2024","journal-title":"Lumiere: A space-time diffusion model for video generation"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2102.05095"},{"key":"ref7","article-title":"Demystifying mmd gans","author":"Binkowski","year":"2018","journal-title":"ICLR"},{"key":"ref8","author":"Blattmann","year":"2023","journal-title":"Stable video diffusion: Scaling latent video diffusion models to large datasets"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02161"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2021.103329"},{"key":"ref11","first-page":"31769","article-title":"Generating long videos of dynamic scenes","volume":"35","author":"Brooks","year":"2022","journal-title":"NeurIPS"},{"key":"ref12","author":"Brooks","year":"2024","journal-title":"Video generation models as world simulators"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00770"},{"key":"ref16","article-title":"Why cant i dance in the mall? learning to mitigate scene bias in action recognition","author":"Choi","year":"2019","journal-title":"NeurIPS"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref18","first-page":"1174","article-title":"Stochastic video generation with a learned prior","volume-title":"ICML","author":"Denton","year":"2018"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/0047-259X(82)90077-X"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19790-1_7"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02096"},{"key":"ref22","author":"Girdhar","year":"2023","journal-title":"Emu video: Factorizing text-to-video generation by explicit image conditioning"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.622"},{"key":"ref24","author":"Gu","year":"2023","journal-title":"Reuse and diffuse: Iterative denoising for text-to-video generation"},{"key":"ref25","author":"Gupta","year":"2023","journal-title":"Photorealistic video generation with diffusion models"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref27","article-title":"Benchmarking neural network robustness to common corruptions and perturbations","author":"Hendrycks","year":"2019","journal-title":"ICLR"},{"key":"ref28","article-title":"Gans trained by a two time-scale update rule converge to a local nash equilibrium","author":"Heusel","year":"2017","journal-title":"NeurIPS"},{"key":"ref29","article-title":"Denoising diffu-sion probabilistic models","author":"Ho","year":"2020","journal-title":"NeurIPS"},{"key":"ref30","author":"Ho","year":"2022","journal-title":"Imagen video: High definition video generation with diffusion models."},{"key":"ref31","author":"Ho","year":"2022","journal-title":"Video diffusion models."},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00769"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02060"},{"key":"ref34","first-page":"2225","article-title":"Video prediction with appearance and motion conditions","author":"Jang","year":"2018","journal-title":"ICML"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00453"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01462"},{"key":"ref37","author":"Kondratyuk","year":"2024","journal-title":"Videopoet: A large language model for zero-shot video gen-eration"},{"key":"ref38","article-title":"Improved precision and recall metric for assessing generative models","author":"Kynkaanniemi","year":"2019","journal-title":"NeurIPS"},{"key":"ref39","article-title":"The role of imagenet classes in frechet inception distance","author":"Kynkaanniemi","year":"2022","journal-title":"ICLR"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3351028"},{"key":"ref41","author":"Li","year":"2022","journal-title":"Uniformerv2: Spatiotemporal learning by arming image vits with video uniformer"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01231-1_32"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01465"},{"key":"ref44","author":"Liu","year":"2023","journal-title":"Evalcrafter: Benchmarking and evalu-ating large video generation models"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00984"},{"key":"ref46","article-title":"On self-supervised image representations for gan evaluation","author":"Morozov","year":"2020","journal-title":"ICLR"},{"key":"ref47","first-page":"7176","article-title":"Reliable fidelity and diversity metrics for generative models","volume-title":"ICML","author":"Naeem","year":"2020"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01112"},{"issue":"2","key":"ref49","first-page":"3","volume":"1","author":"Ramesh","year":"2022","journal-title":"Hierarchical text-conditional image generation with clip latents."},{"key":"ref50","author":"Ranzato","year":"2014","journal-title":"Video (language) modeling: a baseline for generative models of natural videos."},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref52","article-title":"FaceForen-sics++: Learning to detect manipulated facial images","author":"Rossler","year":"2019","journal-title":"ICCV"},{"key":"ref53","article-title":"Pho-torealistic text-to-image diffusion models with deep language understanding","author":"Saharia","year":"2022","journal-title":"NeurIPS"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.308"},{"key":"ref55","article-title":"Assessing generative models via precision and recall","author":"Sajjadi","year":"2018","journal-title":"NeurIPS"},{"key":"ref56","article-title":"Improved techniques for training gans","author":"Salimans","year":"2016","journal-title":"NeurIPS"},{"key":"ref57","article-title":"LAION-5b: An open large-scale dataset for training next generation image-text models","volume-title":"Thirty-sixth Conference on Neural Information Processing Systems Datasets and Benchmarks Track","author":"Schuhmann","year":"2022"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2010.2042111"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00058"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00547"},{"key":"ref61","article-title":"First order motion model for image animation","author":"Siarohin","year":"2019","journal-title":"NeurIPS"},{"key":"ref62","author":"Singer","year":"2022","journal-title":"Make-a-video: Text-to-video generation without text-video data."},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2869673"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00361"},{"key":"ref65","article-title":"Denoising diffusion implicit models","author":"Song","year":"2021","journal-title":"ICLR"},{"key":"ref66","author":"Soomro","year":"2012","journal-title":"U cf1 0 1: A dataset of 101 human actions classes from videos in the wild."},{"key":"ref67","article-title":"Unsupervised learning of video representations using lstms","author":"Srivastava","year":"2015","journal-title":"ICML"},{"key":"ref68","article-title":"Rethinking the inception archi-tecture for computer vision","author":"Szegedy","year":"2016","journal-title":"CVP R"},{"key":"ref69","article-title":"A good image generator is what you need for high-resolution video synthesis","author":"Tian","year":"2021","journal-title":"ICLR"},{"key":"ref70","first-page":"10078","article-title":"Video-mae: Masked autoencoders are data-efficient learners for self-supervised video pre-training","volume":"35","author":"Tong","year":"2022","journal-title":"NeurIPS"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00165"},{"key":"ref72","author":"Unterthiner","year":"2018","journal-title":"To-wards accurate generative models of video: A new metric & challenges."},{"key":"ref73","article-title":"Decomposing motion and content for natural video sequence prediction","author":"Villegas","year":"2017","journal-title":"ICLR"},{"key":"ref74","article-title":"Gen-erating videos with scene dynamics","author":"Vondrick","year":"2016","journal-title":"NeurIPS"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01398"},{"key":"ref77","author":"Wang","year":"2023","journal-title":"Videofactory: Swap attention in spatiotemporal diffusions for text-to-video generation."},{"key":"ref78","author":"Wang","year":"2023","journal-title":"Videolcm: Video latent consistency model"},{"key":"ref79","article-title":"G 3an: Disentangling appearance and motion for video generation","author":"Wang","year":"2020","journal-title":"CVPR"},{"key":"ref80","author":"Wang","year":"2023","journal-title":"Lavie: High-quality video gener-ation with cascaded latent diffusion models."},{"issue":"2","key":"ref81","first-page":"121","article-title":"Video quality as-sessment based on structural distortion measurement","volume":"19","author":"Wang","year":"2004","journal-title":"Signal processing: Image communication"},{"key":"ref82","article-title":"Scaling autoregressive video models","author":"Weissenborn","year":"2019","journal-title":"ICLR"},{"key":"ref83","author":"Wu","year":"2021","journal-title":"Godiva: Generating open-domain videos from natural descriptions."},{"key":"ref84","author":"Xing","year":"2023","journal-title":"Dynamicrafter: Animating open-domain images with video diffusion priors"},{"key":"ref85","author":"Xing","year":"2023","journal-title":"Simda: Simple diffusion adapter for efficient video generation"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00251"},{"key":"ref87","author":"Yan","year":"2021","journal-title":"Videogpt: Video generation using vq-vae and transform-ers."},{"key":"ref88","author":"Yan","year":"2023","journal-title":"Temporally consistent transformers for video generation"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01008"},{"key":"ref90","author":"Yu","year":"2023","journal-title":"Language model beats diffusion-tokenizer is key to visual gen-eration"},{"key":"ref91","article-title":"Generating videos with dynamics-aware implicit generative adversarial networks","author":"Yu","year":"2022","journal-title":"ICLR"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01770"},{"key":"ref93","author":"Zhang","year":"2023","journal-title":"Show-I: Marrying pixel and latent diffusion models for text-to-video generation"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00068"},{"key":"ref95","author":"Zhou","journal-title":"Magicvideo: Efficient video generation with latent diffusion models."},{"key":"ref96","article-title":"Hype: A bench-mark for human eye perceptual evaluation of generative mod-els","author":"Zhou","year":"2019","journal-title":"NeurIPS"}],"event":{"name":"2024 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","location":"Seattle, WA, USA","start":{"date-parts":[[2024,6,16]]},"end":{"date-parts":[[2024,6,22]]}},"container-title":["2024 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10654794\/10654797\/10655312.pdf?arnumber=10655312","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,21]],"date-time":"2024-09-21T05:17:16Z","timestamp":1726895836000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10655312\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,16]]},"references-count":96,"URL":"https:\/\/doi.org\/10.1109\/cvpr52733.2024.00695","relation":{},"subject":[],"published":{"date-parts":[[2024,6,16]]}}}