{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,4]],"date-time":"2026-05-04T10:19:40Z","timestamp":1777889980803,"version":"3.51.4"},"reference-count":75,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iccv51701.2025.01605","type":"proceedings-article","created":{"date-parts":[[2026,4,29]],"date-time":"2026-04-29T19:45:49Z","timestamp":1777491949000},"page":"17281-17291","source":"Crossref","is-referenced-by-count":0,"title":["Long Context Tuning for Video Generation"],"prefix":"10.1109","author":[{"given":"YuweiGuo","family":"Guo","sequence":"first","affiliation":[{"name":"The Chinese University of Hong Kong"}]},{"given":"Ceyuan","family":"Yang","sequence":"additional","affiliation":[{"name":"ByteDance Seed"}]},{"given":"Ziyan","family":"Yang","sequence":"additional","affiliation":[{"name":"ByteDance Seed"}]},{"given":"Zhibei","family":"Ma","sequence":"additional","affiliation":[{"name":"ByteDance Seed"}]},{"given":"Zhijie","family":"Lin","sequence":"additional","affiliation":[{"name":"ByteDance Seed"}]},{"given":"Zhenheng","family":"Yang","sequence":"additional","affiliation":[{"name":"ByteDance"}]},{"given":"Dahua","family":"Lin","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong"}]},{"given":"Lu","family":"Jiang","sequence":"additional","affiliation":[{"name":"ByteDance Seed"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Multi-shot character consistency for text-tovideo generation","author":"Atzmon","year":"2024","journal-title":"arXiv preprint"},{"key":"ref2","article-title":"Talc: Time-aligned captions for multi-scene text-to-video generation","author":"Bansal","year":"2024","journal-title":"arXiv preprint"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3680528.3687614"},{"key":"ref4","volume-title":"Flux","year":"2024"},{"key":"ref5","article-title":"Stable video diffusion: Scaling latent video diffusion models to large datasets","author":"Blattmann","year":"2023","journal-title":"arXiv preprint"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02161"},{"key":"ref7","volume-title":"Video generation models as world simulators","author":"Brooks","year":"2024"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.00727"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2008.2008924"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.52202\/079017-0759"},{"key":"ref11","article-title":"Videocrafter1: Open diffusion models for high-quality video generation","author":"Chen","year":"2023","journal-title":"arXiv preprint"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00698"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3386569.3392457"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.52202\/075280-0106"},{"key":"ref15","article-title":"Scaling rectified flow transformers for high-resolution image synthesis","volume-title":"Forty-first International Conference on Machine Learning","author":"Esser","year":"2024"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02096"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73033-7_12"},{"key":"ref18","article-title":"Animatediff: Animate your personalized text-to-image diffusion models without specific tuning","author":"Guo","year":"2023","journal-title":"arXiv preprint"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72946-1_19"},{"key":"ref20","article-title":"Cameractrl: Enabling camera control for text-to-video generation","author":"He","year":"2024","journal-title":"arXiv preprint"},{"key":"ref21","article-title":"Latent video diffusion models for high-fidelity long video generation","author":"He","year":"2022","journal-title":"arXiv preprint"},{"key":"ref22","article-title":"Animate-a-story: Storytelling with retrieval-augmented video generation","author":"He","year":"2023","journal-title":"arXiv preprint"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.00245"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.52202\/068431-0628"},{"key":"ref25","article-title":"Storyagent: Customized storytelling video generation via multi-agent collaboration","author":"Hu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref26","article-title":"In-context lora for diffusion transformers","author":"Huang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref27","article-title":"Conceptmaster: Multi-concept video customization on diffusion transformer models without test-time tuning","author":"Huang","year":"2025","journal-title":"arXiv preprint"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02060"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00639"},{"key":"ref30","article-title":"Videopoet: A large language model for zero-shot video generation","author":"Kondratyuk","year":"2023","journal-title":"arXiv preprint"},{"key":"ref31","article-title":"Hunyuanvideo: A systematic framework for large video generative models","author":"Kong","year":"2024","journal-title":"arXiv preprint"},{"key":"ref32","volume-title":"Kling video model","year":"2024"},{"key":"ref33","article-title":"Flow matching for generative modeling","author":"Lipman","year":"2022","journal-title":"arXiv preprint"},{"key":"ref34","article-title":"Flow straight and fast: Learning to generate and transfer data with rectified flow","author":"Liu","year":"2022","journal-title":"arXiv preprint"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73027-6_27"},{"key":"ref36","first-page":"131434","article-title":"Freelong: Training-free long video generation with spectralblend temporal attention","volume":"37","author":"Lu","year":"2025","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref37","article-title":"Videofusion: Decomposed diffusion models for high-quality video generation","author":"Luo","year":"2023","journal-title":"arXiv preprint"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72775-7_23"},{"key":"ref39","volume-title":"o3-mini","year":"2024"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00387"},{"key":"ref41","article-title":"Freenoise: Tuning-free longer video diffusion via noise rescheduling","author":"Qiu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01016"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2003.1211489"},{"key":"ref44","article-title":"Seaweed-7b: Cost-effective training of video generation foundation model","author":"Seawead","year":"2025","journal-title":"arXiv preprint"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00361"},{"key":"ref46","article-title":"History-guided video diffusion","author":"Song","year":"2025","journal-title":"arXiv preprint"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2023.127063"},{"key":"ref48","article-title":"Video-infinity: Distributed long video generation","author":"Tan","year":"2024","journal-title":"arXiv preprint"},{"key":"ref49","article-title":"Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context","author":"Team","year":"2024","journal-title":"arXiv preprint"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00165"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref52","article-title":"Phenaki: Variable length video generation from open domain textual description","author":"Villegas","year":"2022","journal-title":"arXiv preprint"},{"key":"ref53","article-title":"Wan: Open and advanced large-scale video generative models","author":"Wan","year":"2025","journal-title":"arXiv preprint"},{"key":"ref54","article-title":"Gen-l-video: Multi-text to long video generation via temporal co-denoising","author":"Wang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref55","article-title":"Qwen2-vl: Enhancing vision-language model\u2019s perception of the world at any resolution","author":"Wang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/WACV45572.2020.9093492"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-024-02295-1"},{"key":"ref58","article-title":"Dreamrunner: Fine-grained storytelling video generation with retrieval-augmented motion adaptation","author":"Wang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.02234"},{"key":"ref60","article-title":"Dreamfactory: Pioneering multi-scene long video generation with a multi-agent framework","author":"Xie","year":"2024","journal-title":"arXiv preprint"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72952-2_23"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.513"},{"key":"ref63","article-title":"Cogvideox: Text-to-video diffusion models with an expert transformer","author":"Yang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.73"},{"key":"ref65","article-title":"From slow bidirectional to fast causal video generators","author":"Yin","year":"2024","journal-title":"arXiv preprint"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01008"},{"key":"ref67","article-title":"Language model beats diffusion-tokenizer is key to visual generation","author":"Yu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.00845"},{"key":"ref69","first-page":"1","article-title":"Show-1: Marrying pixel and latent diffusion models for text-to-video generation","author":"Zhang","year":"2024","journal-title":"International Journal of Computer Vision"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"ref71","article-title":"I2vgen-xl: High-quality image-to-video synthesis via cascaded diffusion models","author":"Zhang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref72","article-title":"Moviedreamer: Hierarchical generation for coherent long visual sequence","author":"Zhao","year":"2024","journal-title":"arXiv preprint"},{"key":"ref73","article-title":"Videogen-of-thought: A collaborative framework for multi-shot video generation","author":"Zheng","year":"2024","journal-title":"arXiv preprint"},{"key":"ref74","article-title":"Magicvideo: Efficient video generation with latent diffusion models","author":"Zhou","year":"2022","journal-title":"arXiv preprint"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.52202\/079017-3501"}],"event":{"name":"2025 IEEE\/CVF International Conference on Computer Vision (ICCV)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,10,19]]},"end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/CVF International Conference on Computer Vision (ICCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11443115\/11443287\/11445350.pdf?arnumber=11445350","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T05:25:53Z","timestamp":1777613153000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11445350\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":75,"URL":"https:\/\/doi.org\/10.1109\/iccv51701.2025.01605","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}