{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T06:45:38Z","timestamp":1767077138756,"version":"3.48.0"},"reference-count":34,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T00:00:00Z","timestamp":1764720000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T00:00:00Z","timestamp":1764720000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,3]]},"DOI":"10.1109\/dicta68720.2025.11302466","type":"proceedings-article","created":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T18:36:22Z","timestamp":1767033382000},"page":"1-9","source":"Crossref","is-referenced-by-count":0,"title":["Streaming Video Diffusion: Online Video Editing with Diffusion Models"],"prefix":"10.1109","author":[{"given":"Feng","family":"Chen","sequence":"first","affiliation":[{"name":"The University of Adelaide"}]},{"given":"Bohan","family":"Zhuang","sequence":"additional","affiliation":[{"name":"Monash University"}]},{"given":"Qi","family":"Wu","sequence":"additional","affiliation":[{"name":"The University of Adelaide"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Class-attention video transformer for engagement intensity prediction","author":"Ai","year":"2022","journal-title":"arXiv preprint"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref3","article-title":"Longformer: The long-document transformer","author":"Beltagy","year":"2020","journal-title":"arXiv preprint"},{"key":"ref4","first-page":"11079","article-title":"Recurrent memory transformer","volume":"35","author":"Bulatov","year":"2022","journal-title":"NeurIPS"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02121"},{"key":"ref6","article-title":"Videocrafter1: Open diffusion models for high-quality video generation","author":"Chen","year":"2023","journal-title":"arXiv preprint"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/IROS55552.2023.10341827"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1285"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2010.11929"},{"key":"ref10","first-page":"27953","article-title":"Flexible diffusion modeling of long videos","volume":"35","author":"Harvey","year":"2022","journal-title":"NeurIPS"},{"key":"ref11","article-title":"Latent video diffusion models for high-fidelity video generation with arbitrary lengths","author":"He","year":"2022","journal-title":"arXiv preprint"},{"key":"ref12","article-title":"Prompt-to-prompt image editing with cross attention control","author":"Hertz","year":"2023","journal-title":"ICLR"},{"key":"ref13","article-title":"Imagen video: High definition video generation with diffusion models","author":"Ho","year":"2022","journal-title":"arXiv preprint"},{"key":"ref14","article-title":"Classifier-free diffusion guidance","author":"Ho","year":"2022","journal-title":"arXiv preprint"},{"key":"ref15","first-page":"5156","article-title":"Transformers are rnns: Fast autoregressive transformers with linear attention","author":"Katharopoulos","year":"2020","journal-title":"ICML"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01462"},{"key":"ref17","article-title":"Streamdiffusion: A pipeline-level solution for real-time interactive generation","author":"Kodaira","year":"2023","journal-title":"arXiv preprint"},{"key":"ref18","article-title":"Visual instruction tuning","author":"Liu","year":"2024","journal-title":"Advances in neural information processing systems"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.00821"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19781-9_19"},{"key":"ref21","article-title":"Latent consistency models: Synthesizing high-resolution images with few-step inference","author":"Luo","year":"2023","journal-title":"arXiv preprint"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i5.28206"},{"key":"ref23","article-title":"Dreamix: Video diffusion models are general video editors","author":"Molad","year":"2023","journal-title":"arXiv"},{"key":"ref24","article-title":"Sdxl: Improving latent diffusion models for high-resolution image synthesis","volume-title":"arXiv preprint","author":"Podell","year":"2023"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01460"},{"key":"ref26","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","author":"Radford","year":"2021","journal-title":"ICML"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref28","article-title":"Make-A-Video: Text-to-video generation without text-video data","author":"Singer","year":"2023","journal-title":"ICLR"},{"key":"ref29","article-title":"Gen-l-video: Multi-text to long video generation via temporal co-denoising","author":"Wang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref30","article-title":"Video-to-video synthesis","volume-title":"NeurIPS","author":"Wang","year":"2018"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/iccv51070.2023.00701"},{"journal-title":"Cvpr 2023 text guided video editing competition","year":"2023","author":"Wu","key":"ref32"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00498"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01403"}],"event":{"name":"2025 International Conference on Digital Image Computing: Techniques and Applications (DICTA)","start":{"date-parts":[[2025,12,3]]},"location":"Adelaide, Australia","end":{"date-parts":[[2025,12,5]]}},"container-title":["2025 International Conference on Digital Image Computing: Techniques and Applications (DICTA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11302408\/11302416\/11302466.pdf?arnumber=11302466","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T06:40:56Z","timestamp":1767076856000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11302466\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,3]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/dicta68720.2025.11302466","relation":{},"subject":[],"published":{"date-parts":[[2025,12,3]]}}}