{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T01:36:59Z","timestamp":1772501819587,"version":"3.50.1"},"reference-count":92,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T00:00:00Z","timestamp":1763078400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T00:00:00Z","timestamp":1763078400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Big Data"],"DOI":"10.1186\/s40537-025-01314-3","type":"journal-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T15:44:59Z","timestamp":1763135099000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Text-to-video generators: a comprehensive survey"],"prefix":"10.1186","volume":"12","author":[{"given":"Muhammad Tanveer","family":"Jan","sequence":"first","affiliation":[]},{"given":"Mohammed G.","family":"Al-Jassani","sequence":"additional","affiliation":[]},{"given":"Martinraj","family":"Nadar","sequence":"additional","affiliation":[]},{"given":"Emmanuel Melchizedek","family":"Vunnava","sequence":"additional","affiliation":[]},{"given":"Vangmai","family":"Chakrapani","sequence":"additional","affiliation":[]},{"given":"Hayat","family":"Ullah","sequence":"additional","affiliation":[]},{"given":"Abbas","family":"Khan","sequence":"additional","affiliation":[]},{"given":"Sardar Ali","family":"Abbas","sequence":"additional","affiliation":[]},{"given":"Borko","family":"Furht","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,14]]},"reference":[{"issue":"7","key":"1314_CR1","doi-asserted-by":"publisher","first-page":"176","DOI":"10.1145\/3626314","volume":"56","author":"J Perez-Cerrolaza","year":"2024","unstructured":"Perez-Cerrolaza J, Abella J, Borg M, Donzella C, Cerquides J, Cazorla FJ, et al. Artificial intelligence for safety-critical systems in industrial and transportation domains A survey. 2024;56(7):176\u2013117640. https:\/\/doi.org\/10.1145\/3626314.(Accessed 2024-09-10).","journal-title":"Artificial intelligence for safety-critical systems in industrial and transportation domains A survey"},{"key":"1314_CR2","doi-asserted-by":"publisher","DOI":"10.1016\/j.treng.2021.100083","volume":"5","author":"LS Iyer","year":"2021","unstructured":"Iyer LS. AI enabled applications towards intelligent transportation. Transportation Engineering. 2021;5:100083. https:\/\/doi.org\/10.1016\/j.treng.2021.100083.","journal-title":"Transportation Engineering"},{"issue":"16","key":"1314_CR3","doi-asserted-by":"publisher","first-page":"49121","DOI":"10.1007\/s11042-023-17067-1","volume":"83","author":"M Alsaidi","year":"2024","unstructured":"Alsaidi M, Jan MT, Altaher A, Zhuang H, Zhu X. Tackling the class imbalanced dermoscopic image classification using data augmentation and GAN. 2024;83(16):49121\u201347. https:\/\/doi.org\/10.1007\/s11042-023-17067-1. (Accessed 2024-09-10).","journal-title":"Tackling the class imbalanced dermoscopic image classification using data augmentation and GAN"},{"key":"1314_CR4","unstructured":"Whig P, Velu A, Nadikattu RR, Alkali YJ. Role of AI and IoT in intelligent transportation. In: Artificial Intelligence for Future Intelligent Transportation. Apple Academic Press. Num Pages: 22"},{"key":"1314_CR5","doi-asserted-by":"publisher","DOI":"10.1016\/j.conb.2023.102816","volume":"84","author":"GW Lindsay","year":"2024","unstructured":"Lindsay GW. Grounding neuroscience in behavioral changes using artificial neural networks. Curr Opin Neurobiol. 2024;84:102816. https:\/\/doi.org\/10.1016\/j.conb.2023.102816.","journal-title":"Curr Opin Neurobiol"},{"issue":"1","key":"1314_CR6","doi-asserted-by":"publisher","first-page":"15","DOI":"10.1016\/j.aac.2022.10.001","volume":"2","author":"M Javaid","year":"2024","unstructured":"Javaid M, Haleem A, Khan IH, Suman R. Understanding the potential applications of artificial intelligence in agriculture sector. 2024;2(1):15\u201330. https:\/\/doi.org\/10.1016\/j.aac.2022.10.001. (Accessed 2024-09-10).","journal-title":"Understanding the potential applications of artificial intelligence in agriculture sector"},{"key":"1314_CR7","unstructured":"South African Manufacturing Industry in the Aeon of Artificial Intelligence. https:\/\/ieeexplore.ieee.org\/abstract\/document\/10577688 Accessed 2024-09-10"},{"key":"1314_CR8","unstructured":"Scenario Engineering for Autonomous Transportation: A New Stage in Open-Pit Mines. https:\/\/ieeexplore.ieee.org\/abstract\/document\/10460138 Accessed 2024-09-10"},{"key":"1314_CR9","unstructured":"Zia T, Arif S, Murtaza S, Ullah MA. Text-to-Image Generation with Attention Based Recurrent Neural Networks. arXiv:2001.06658 Accessed 2024-09-10"},{"key":"1314_CR10","unstructured":"Reed S, Akata Z, Yan X, Logeswaran L, Schiele B, Lee H. Generative Adversarial Text to Image Synthesis. arXiv:1605.05396. Accessed 2024-09-10"},{"key":"1314_CR11","unstructured":"Fotedar NA, Wang JH. Bumblebee: Text-to-image generation with transformers. In: Proceedings of the 2019 IEEE International Conference on Image Processing (ICIP), 3465\u20133469."},{"key":"1314_CR12","unstructured":"Chang H, Zhang H, Barber J, Maschinot AJ, Lezama J, Jiang L, Yang M-H, Murphy K, Freeman WT, Rubinstein M. Muse: Text-to-image generation via masked generative transformers. Accessed 2024-09-10"},{"key":"1314_CR13","doi-asserted-by":"crossref","unstructured":"Lee T, Kwon S, Kim T. Grid Diffusion Models for Text-to-Video Generation. arXiv:2404.00234 Accessed 2024-09-10","DOI":"10.1109\/CVPR52733.2024.00834"},{"key":"1314_CR14","unstructured":"Yu Y, Zhang W, Deng Y. Frechet inception distance (FID) for evaluating GANs."},{"key":"1314_CR15","unstructured":"Liu Y, Zhang K, Li Y, Yan Z, Gao C, Chen R, Yuan Z, Huang Y, Sun H, Gao J, He L, Sun L. Sora: A Review on Background, Technology, Limitations, and Opportunities of Large Vision Models. arXiv:2402.17177 2024. Accessed 2025-08-07"},{"key":"1314_CR16","unstructured":"Sora: Creating Video from Text. https:\/\/openai.com\/index\/sora\/ Accessed 2025-04-21"},{"key":"1314_CR17","unstructured":"Sun R, Zhang Y, Shah T, Sun J, Zhang S, Li W, Duan H, Wei B, Ranjan R. From Sora What We Can See: A Survey of Text-to-Video Generation. arXiv:2405.10674 Accessed 2024-09-10"},{"key":"1314_CR18","doi-asserted-by":"crossref","unstructured":"Deng K, Fei T, Huang X, Peng Y. IRC-GAN: Introspective recurrent convolutional GAN for text-to-video generation, 2216\u20132222. Accessed 2025-04-04","DOI":"10.24963\/ijcai.2019\/307"},{"key":"1314_CR19","unstructured":"Pan Y, Qiu Z, Yao T, Li H, Mei T. To Create What You Tell: Generating Videos from Captions. arXiv:1804.08264 Accessed 2025-04-04"},{"key":"1314_CR20","unstructured":"Conditional GAN with Discriminative Filter Generation for Text-to-video Synthesis | Proceedings of the 28th International Joint Conference on Artificial Intelligence. https:\/\/dl.acm.org\/doi\/10.5555\/3367243.3367316 Accessed 2025-04-04"},{"key":"1314_CR21","unstructured":"Vondrick C, Pirsiavash H, Torralba A. Generating videos with scene dynamics. In: Advances in Neural Information Processing Systems, vol. 29. Curran Associates, Inc. https:\/\/proceedings.neurips.cc\/paper\/2016\/hash\/04025959b191f8f9de3f924f0940515f-Abstract.html Accessed 2025-04-16"},{"key":"1314_CR22","doi-asserted-by":"crossref","unstructured":"Saito M, Matsumoto E, Saito S. Temporal generative adversarial nets with singular value clipping, pp. 2830\u20132839. https:\/\/openaccess.thecvf.com\/content_iccv_2017\/html\/Saito_Temporal_Generative_Adversarial_ICCV_2017_paper.html Accessed 2025-04-16","DOI":"10.1109\/ICCV.2017.308"},{"key":"1314_CR23","unstructured":"Denton EL, Chintala S, szlam a, Fergus R. Deep generative image models using a laplacian pyramid of adversarial networks. In: Advances in Neural Information Processing Systems, vol. 28. Curran Associates, Inc. https:\/\/proceedings.neurips.cc\/paper\/2015\/hash\/aa169b49b583a2b5af89203c2b78c67c-Abstract.html Accessed 2025-04-16"},{"key":"1314_CR24","unstructured":"Odena A, Olah C, Shlens J. Conditional image synthesis with auxiliary classifier GANs. In: Proceedings of the 34th International Conference on Machine Learning, pp. 2642\u20132651. PMLR. https:\/\/proceedings.mlr.press\/v70\/odena17a.html Accessed 2025-04-16"},{"key":"1314_CR25","unstructured":"Radford A, Metz L, Chintala S. Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks. arXiv:1511.06434. Accessed 2025-04-16"},{"key":"1314_CR26","doi-asserted-by":"crossref","unstructured":"Tulyakov S, Liu M-Y, Yang X, Kautz J. MoCoGAN: Decomposing motion and content for video generation, 1526\u20131535. https:\/\/openaccess.thecvf.com\/content_cvpr_2018\/html\/Tulyakov_MoCoGAN_Decomposing_Motion_CVPR_2018_paper.html Accessed 2024-09-10","DOI":"10.1109\/CVPR.2018.00165"},{"key":"1314_CR27","unstructured":"Babaeizadeh M, Saffar MT, Nair S, Levine S, Finn C, Erhan D. FitVid: Overfitting in Pixel-Level Video Prediction. arXiv:2106.13195. Accessed 2024-10-31"},{"key":"1314_CR28","unstructured":"Jia W, Liu M, Rehg JM. Generative Adversarial Network for Future Hand Segmentation from Egocentric Video. arXiv:2203.11305 Accessed 2024-10-31"},{"key":"1314_CR29","unstructured":"Li Y, Gan Z, Shen Y, Liu J, Cheng Y, Wu Y, Carin L, Carlson D, Gao J. StoryGAN: A Sequential Conditional GAN for Story Visualization. arXiv:1812.02784 Accessed 2024-10-31"},{"key":"1314_CR30","unstructured":"Yi Z, Zhang H, Tan P, Gong M. DualGAN: Unsupervised Dual Learning for Image-to-Image Translation. arXiv:1704.02510 Accessed 2024-10-31"},{"key":"1314_CR31","unstructured":"Hong W, Ding M, Zheng W, Liu X, Tang J. CogVideo: Large-scale Pretraining for Text-to-Video Generation via Transformers. arXiv:2205.15868 Accessed 2024-12-03"},{"key":"1314_CR32","doi-asserted-by":"crossref","unstructured":"Khachatryan L, Movsisyan A, Tadevosyan V, Henschel R, Wang Z, Navasardyan S, Shi H. Text2video-zero: Text-to-image diffusion models are zero-shot video generators, 15954\u201315964. https:\/\/openaccess.thecvf.com\/content\/ICCV2023\/html\/Khachatryan_Text2Video-Zero_Text-to-Image_Diffusion_Models_are_Zero-Shot_Video_Generators_ICCV_2023_paper.html Accessed 2024-12-03","DOI":"10.1109\/ICCV51070.2023.01462"},{"key":"1314_CR33","doi-asserted-by":"crossref","unstructured":"Menapace W, Siarohin A, Skorokhodov I, Deyneka E, Chen T-S, Kag A, Fang Y, Stoliar A, Ricci E, Ren J, Tulyakov S. Snap Video: Scaled Spatiotemporal Transformers for Text-to-Video Synthesis. arXiv:2402.14797 Accessed 2024-11-15","DOI":"10.1109\/CVPR52733.2024.00672"},{"key":"1314_CR34","doi-asserted-by":"crossref","unstructured":"Ma W-DK, Lewis JP, Kleijn WB. TrailBlazer: Trajectory Control for Diffusion-Based Video Generation. arXiv:2401.00896. Accessed 2024-10-22","DOI":"10.1145\/3680528.3687652"},{"key":"1314_CR35","doi-asserted-by":"crossref","unstructured":"Oh G, Jeong J, Kim S, Byeon W, Kim J, Kim S, Kim S. MEVG: Multi-event Video Generation with Text-to-Video Models. arXiv:2312.04086 Accessed 2024-10-22","DOI":"10.1007\/978-3-031-72775-7_23"},{"key":"1314_CR36","unstructured":"Lu Y, Zhu L, Fan H, Yang Y. FlowZero: Zero-Shot Text-to-Video Synthesis with LLM-Driven Dynamic Scene Syntax. arXiv:2311.15813 Accessed 2024-10-31"},{"key":"1314_CR37","doi-asserted-by":"crossref","unstructured":"Qing Z, Zhang S, Wang J, Wang X, Wei Y, Zhang Y, Gao C, Sang N. Hierarchical Spatio-temporal Decoupling for Text-to-Video Generation. arXiv:2312.04483 Accessed 2024-11-15","DOI":"10.1109\/CVPR52733.2024.00634"},{"key":"1314_CR38","unstructured":"Wang X, Zhang S, Zhang H, Liu Y, Zhang Y, Gao C, Sang N. VideoLCM: Video Latent Consistency Model. arXiv:2312.09109 Accessed 2024-12-10"},{"key":"1314_CR39","unstructured":"Ruan L, Tian L, Huang C, Zhang X, Xiao X. UniVG: Towards UNIfied-modal Video Generation. arXiv:2401.09084. Accessed 2024-12-10"},{"key":"1314_CR40","unstructured":"Yin S, Wu C, Yang H, Wang J, Wang X, Ni M, Yang Z, Li L, Liu S, Yang F, Fu J, Ming G, Wang L, Liu Z, Li H, Duan N. NUWA-XL: Diffusion over Diffusion for eXtremely Long Video Generation. arXiv:2303.12346. Accessed 2024-12-03"},{"key":"1314_CR41","doi-asserted-by":"crossref","unstructured":"Wu JZ, Ge Y, Wang X, Lei SW, Gu Y, Shi Y, Hsu W, Shan Y, Qie X, Shou MZ. Tune-a-video: One-shot tuning of image diffusion models for text-to-video generation, 7623\u20137633. https:\/\/openaccess.thecvf.com\/content\/ICCV2023\/html\/Wu_Tune-A-Video_One-Shot_Tuning_of_Image_Diffusion_Models_for_Text-to-Video_Generation_ICCV_2023_paper.html Accessed 2024-12-03","DOI":"10.1109\/ICCV51070.2023.00701"},{"key":"1314_CR42","unstructured":"Ho J, Chan W, Saharia C, Whang J, Gao R, Gritsenko A, Kingma DP, Poole B, Norouzi M, Fleet DJ, Salimans T. Imagen Video: High Definition Video Generation with Diffusion Models. arXiv:2210.02303. Accessed 2024-12-03"},{"key":"1314_CR43","first-page":"8633","volume":"35","author":"J Ho","year":"2022","unstructured":"Ho J, Salimans T, Gritsenko A, Chan W, Norouzi M, Fleet DJ. Video diffusion models. 2022;35:8633\u201346 (Accessed 2024-12-03).","journal-title":"Video diffusion models"},{"key":"1314_CR44","unstructured":"Zhou D, Wang W, Yan H, Lv W, Zhu Y, Feng J. MagicVideo: Efficient Video Generation With Latent Diffusion Models. arXiv:2211.11018. Accessed 2024-12-03"},{"key":"1314_CR45","unstructured":"Chen X, Xia T, Xu S. UniCtrl: Improving the Spatiotemporal Consistency of Text-to-Video Diffusion Models via Training-Free Unified Attention Control. arXiv:2403.02332. Accessed 2024-11-08"},{"key":"1314_CR46","unstructured":"Zhang Y, Kang Y, Zhang Z, Ding X, Zhao S, Yue X. InteractiveVideo: User-Centric Controllable Video Generation with Synergistic Multimodal Instructions. arXiv:2402.03040. Accessed 2024-11-08"},{"key":"1314_CR47","unstructured":"Pan B, Xu Z, Huang C-HP, Singh KK, Zhou Y, Guibas LJ, Yang J. ActAnywhere: Subject-Aware Video Background Generation. arXiv:2401.10822. Accessed 2024-11-08"},{"key":"1314_CR48","doi-asserted-by":"crossref","unstructured":"Wang Z, Wang L, Zhao Z, Wu M, Lyu C, Li H, Cai D, Zhou L, Shi S, Tu Z. GPT4Video: A Unified Multimodal Large Language Model for lnstruction-Followed Understanding and Safety-Aware Generation. arXiv:2311.16511. Accessed 2024-11-08","DOI":"10.1145\/3664647.3681464"},{"key":"1314_CR49","unstructured":"Touvron H, Lavril T, Izacard G, Martinet X, Lachaux M-A, Lacroix T, Rozi\u00e8re B, Goyal N, Hambro E, Azhar F, Rodriguez A, Joulin A, Grave E, Lample G. LLaMA: Open and Efficient Foundation Language Models. arXiv:2302.13971. Accessed 2024-11-12"},{"key":"1314_CR50","unstructured":"Radford A, Kim JW, Hallacy C, Ramesh A, Goh G, Agarwal S, Sastry G, Askell A, Mishkin P, Clark J, Krueger G, Sutskever I. Learning Transferable Visual Models From Natural Language Supervision. arXiv:2103.00020. Accessed 2024-11-12"},{"key":"1314_CR51","unstructured":"Hu EJ, Shen Y, Wallis P, Allen-Zhu Z, Li Y, Wng S, Wang L, Chen W. LoRA: Low-Rank Adaptation of Large Language Models. arXiv:2106.09685. Accessed 2024-11-12"},{"key":"1314_CR52","doi-asserted-by":"publisher","unstructured":"Li Y, Min M, Shen D, Carlson D, Carin L. Video generation from text 32(1) https:\/\/doi.org\/10.1609\/aaai.v32i1.12233 . Number: 1. Accessed 2024-12-03","DOI":"10.1609\/aaai.v32i1.12233"},{"key":"1314_CR53","doi-asserted-by":"publisher","first-page":"153113","DOI":"10.1109\/ACCESS.2020.3017881","volume":"8","author":"D Kim","year":"2020","unstructured":"Kim D, Joo D, Kim J. TiVGAN: text to image to video generation with step-by-step evolutionary generator. IEEE Access. 2020;8:153113\u201322. https:\/\/doi.org\/10.1109\/ACCESS.2020.3017881.","journal-title":"IEEE Access"},{"key":"1314_CR54","doi-asserted-by":"publisher","unstructured":"Xu J, Mei T, Yao T, Rui Y. MSR-VTT: A large video description dataset for bridging video and language. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 5288\u20135296. IEEE. https:\/\/doi.org\/10.1109\/CVPR.2016.571 . Accessed 2024-11-08","DOI":"10.1109\/CVPR.2016.571"},{"key":"1314_CR55","unstructured":"Krishna R, Hata K, Ren F, Fei-Fei L, Niebles JC. Dense-Captioning Events in Videos. arXiv:L1705.00754. Accessed 2024-11-09"},{"key":"1314_CR56","unstructured":"Miech A, Zhukov D, Alayrac J-B, Tapaswi M, Laptev I, Sivic J. HowTo100M: Learning a Text-Video Embedding by Watching Hundred Million Narrated Video Clips. arXiv:1906.03327. Accessed 2024-11-11"},{"key":"1314_CR57","unstructured":"Kay W, Carreira J, Simonyan K, Zhang B, Hillier C, Vijayanarasimhan S, Viola F, Green T, Back T, Natsev P, Suleyman M, Zisserman A. The Kinetics Human Action Video Dataset. arXiv:1705.06950. Accessed 2024-11-11"},{"key":"1314_CR58","unstructured":"Soomro K. UCF101: A dataset of 101 human actions classes from videos in the wild. Accessed 2024-12-10"},{"issue":"5","key":"1314_CR59","doi-asserted-by":"publisher","first-page":"971","DOI":"10.1007\/s00138-012-0450-4","volume":"24","author":"KK Reddy","year":"2013","unstructured":"Reddy KK, Shah M. Recognizing 50 human action categories of web videos. Mach Vis Appl. 2013;24(5):971\u201381. https:\/\/doi.org\/10.1007\/s00138-012-0450-4.","journal-title":"Mach Vis Appl"},{"key":"1314_CR60","unstructured":"Carreira J, Noland E, Banki-Horvath A, Hillier C, Zisserman A. A Short Note about Kinetics-600. arXiv:1808.01340. Accessed 2024-11-11"},{"key":"1314_CR61","unstructured":"Smaira L, Carreira J, Noland E, Clancy E, Wu A, Zisserman A. A Short Note on the Kinetics-700-2020 Human Action Dataset. arXiv:2010.10864. Accessed 2024-11-11"},{"key":"1314_CR62","unstructured":"Hendricks LA, Wang O, Shechtman E, Sivic J, Darrell T, Russell B. Localizing Moments in Video with Natural Language. arXiv:1708.01641. Accessed 2025-04-16"},{"key":"1314_CR63","unstructured":"Zellers R, Lu X, Hessel J, Yu Y, Park JS, Cao J, Farhadi A, Choi Y. MERLOT: Multimodal Neural Script Knowledge Models. arXiv:2106.02636. Accessed 2025-04-16"},{"key":"1314_CR64","unstructured":"Bain M, Nagrani A, Varol G, Zisserman A. Frozen in Time: A Joint Video and Image Encoder for End-to-End Retrieval. arXiv:2104.00650. Accessed 2025-04-16"},{"key":"1314_CR65","unstructured":"Xu H, Ye Q, Wu X, Yan M, Miao Y, Ye J, Xu G, Hu A, Shi Y, Xu G, Li C, Qian Q, Que M, Zhang J, Zeng X, Huang F. Youku-mPLUG: A 10 Million Large-scale Chinese Video-Language Dataset for Pre-training and Benchmarks. arXiv:2306.04362. Accessed 2025-04-16"},{"key":"1314_CR66","unstructured":"Chen S, Li H, Wang Q, Zhao Z, Sun M, Zhu X, Liu J. VAST: A Vision-Audio-Subtitle-Text Omni-Modality Foundation Model and Dataset. arXiv:2305.18500. Accessed 2025-04-16"},{"key":"1314_CR67","unstructured":"Yuan S, Huang J, Xu Y, Liu Y, Zhang S, Shi Y, Zhu R, Cheng X, Luo J, Yuan L. ChronoMagic-Bench: A Benchmark for Metamorphic Evaluation of Text-to-Time-lapse Video Generation. arXiv:2406.18522. Accessed 2025-04-16"},{"key":"1314_CR68","unstructured":"Sigurdsson GA, Varol G, Wang X, Farhadi A, Laptev I, Gupta A. Hollywood in Homes: Crowdsourcing Data Collection for Activity Understanding. arXiv:1604.01753. Accessed 2025-04-16"},{"key":"1314_CR69","unstructured":"Wang Y, He Y, Li Y, Li K, Yu J, Ma X, Li X, Chen G, Chen X, Wang Y, He C, Luo P, Liu Z, Wang Y, Wang L, Qiao Y. InternVid: A Large-scale Video-Text Dataset for Multimodal Understanding and Generation. arXiv:2307.06942. Accessed 2025-04-16"},{"key":"1314_CR70","unstructured":"Xue H, Hang T, Zeng Y, Sun Y, Liu B, Yang H, Fu J, Guo B. Advancing High-Resolution Video-Language Representation with Large-Scale Video Transcriptions. arXiv:2111.10337. Accessed 2025-04-16"},{"key":"1314_CR71","doi-asserted-by":"crossref","unstructured":"Wang W, Yang H, Tuo Z, He H, Zhu J, Fu J, Liu J. Swap Attention in Spatiotemporal Diffusions for Text-to-Video Generation. arXiv:2305.10874 Accessed 2025-04-16","DOI":"10.1007\/s11263-025-02349-y"},{"key":"1314_CR72","doi-asserted-by":"crossref","unstructured":"Chen T-S, Siarohin A, Menapace W, Deyneka E, Chao H-w, Jeon BE, Fang Y, Lee H-Y, Ren J, Yang M-H, Tulyakov S. Panda-70M: Captioning 70M Videos with Multiple Cross-Modality Teachers. arXiv:2402.19479 Accessed 2025-04-16","DOI":"10.1109\/CVPR52733.2024.01265"},{"key":"1314_CR73","unstructured":"Barratt S, Sharma R. A Note on the Inception Score (2018). https:\/\/arxiv.org\/abs\/1801.01973v2 Accessed 2025-08-07"},{"key":"1314_CR74","unstructured":"Jung S, Keuper M. Internalized Biases in Fr\u00e9chet Inception Distance. (2021). https:\/\/openreview.net\/forum?id=mLG96UpmbYz Accessed 2025-08-07"},{"key":"1314_CR75","doi-asserted-by":"publisher","unstructured":"Bossard L, Guillaumin M, Van\u00a0Gool L. Food-101 \u2013 Mining Discriminative Components with Random Forests. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) Computer Vision \u2013 ECCV 2014:446\u2013461. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-319-10599-4_29","DOI":"10.1007\/978-3-319-10599-4_29"},{"key":"1314_CR76","unstructured":"Shao J-J, Shi J-X, Yang X-W, Guo L-Z, Li Y-F. Investigating the Limitation of CLIP Models: The Worst-Performing Categories. arXiv:2310.03324 (2023). Accessed 2025-08-07"},{"key":"1314_CR77","unstructured":"Liu X, Xiang X, Li Z, Wang Y, Li Z, Liu Z, Zhang W, Ye W, Zhang J. A Survey of AI-Generated Video Evaluation. arXiv:2410.19884 2024. Accessed 2025-08-07"},{"key":"1314_CR78","doi-asserted-by":"crossref","unstructured":"Liu Y, Cun X, Liu X, Wang X, Zhang Y, Chen H, Liu Y, Zeng T, Chan R, Shan Y. EvalCrafter: Benchmarking and Evaluating Large Video Generation Models. arXiv:2310.11440 2024. Accessed 2025-08-07","DOI":"10.1109\/CVPR52733.2024.02090"},{"key":"1314_CR79","unstructured":"OpenAI R. Gpt-4 technical report. arxiv:2303.08774. View in Article 2023;2(5):1"},{"key":"1314_CR80","unstructured":"Huang Z, He Y, Yu J, Zhang F, Si C, Jiang Y, Zhang Y, Wu T, Jin Q, Chanpaisit N, Wang Y, Chen X, Wang L, Lin D, Qiao Y, Liu Z. VBench: Comprehensive Benchmark Suite for Video Generative Models. arXiv:2311.17982 2023. Accessed 2025-08-07"},{"key":"1314_CR81","unstructured":"Miao Y, Zhu Y, Dong Y, Yu L, Zhu J, Gao X-S. T2VSafetyBench: Evaluating the Safety of Text-to-Video Generative Models. arXiv:2407.05965 2024. Accessed 2025-08-07"},{"key":"1314_CR82","doi-asserted-by":"crossref","unstructured":"Feng R, Weng W, Wang Y, Yuan Y, Bao J, Luo C, Chen Z, Guo B. CCEdit: Creative and Controllable Video Editing via Diffusion Models. arXiv:2309.16496 2024. Accessed 2025-08-07","DOI":"10.1109\/CVPR52733.2024.00641"},{"key":"1314_CR83","unstructured":"Xing J, Xia M, Liu Y, Zhang Y, Zhang Y, He Y, Liu H, Chen H, Cun X, Wang X, Shan Y, Wong T-T. Make-Your-Video: Customized Video Generation Using Textual and Structural Guidance. arXiv:2306.00943 2023. Accessed 2025-08-07"},{"key":"1314_CR84","unstructured":"Guo Y, Yang C, Rao A, Liang Z, Wang Y, Qiao Y, Agrawala M, Lin D, Dai B. AnimateDiff: Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning. arXiv:2307.04725 2024. Accessed 2025-08-07"},{"key":"1314_CR85","unstructured":"He Y, Xia M, Chen H, Cun X, Gong Y, Xing J, Zhang Y, Wang X, Weng C, Shan Y, Chen Q. Animate-A-Story: Storytelling with Retrieval-Augmented Video Generation 2023. arxiv:2307.06940v1 Accessed 2025-08-07"},{"key":"1314_CR86","unstructured":"Mei K, Patel VM. VIDM: Video Implicit Diffusion Models. arXiv:2212.00235 2022. Accessed 2025-08-07"},{"key":"1314_CR87","unstructured":"Yu S, Sohn K, Kim S, Shin J. Video Probabilistic Diffusion Models in Projected Latent Space. arXiv:2302.07685 2023. Accessed 2025-08-07"},{"key":"1314_CR88","doi-asserted-by":"crossref","unstructured":"Su K, Qian K, Shlizerman E, Torralba A, Gan C. Physics-Driven Diffusion Models for Impact Sound Synthesis from Videos. arXiv:2303.16897 2023. Accessed 2025-08-07","DOI":"10.1109\/CVPR52729.2023.00940"},{"key":"1314_CR89","unstructured":"Li, S., Dong, W., Zhang, Y., Tang, F., Ma, C., Deussen, O., Lee, T.-Y., Xu, C.: Dance-to-Music Generation with Encoder-based Textual Inversion (2024). arxiv:2401.17800v2 Accessed 2025-08-07"},{"key":"1314_CR90","doi-asserted-by":"publisher","DOI":"10.1101\/2023.11.16.567461","author":"A Awasthi","year":"2023","unstructured":"Awasthi A, Nizam J, Zare S, Ahmad S, Montalvo M, Varadarajan N, et al. Video Diffusion Models for the Apoptosis Forcasting. 2023. https:\/\/doi.org\/10.1101\/2023.11.16.567461.","journal-title":"Video Diffusion Models for the Apoptosis Forcasting"},{"key":"1314_CR91","doi-asserted-by":"publisher","unstructured":"Bozorgpour A, Sadegheih Y, Kazerouni A, Azad R, Merhof D. DermoSegDiff: A Boundary-Aware Segmentation Diffusion Model for Skin Lesion Delineation. In: Rekik I, Adeli E, Park SH, Cintas C, Zamzmi G, editors. Predictive Intelligence in Medicine. Cham: Springer; 2023. p. 146\u201358. https:\/\/doi.org\/10.1007\/978-3-031-46005-0_13.","DOI":"10.1007\/978-3-031-46005-0_13"},{"key":"1314_CR92","doi-asserted-by":"crossref","unstructured":"Flaborea A, Collorone L, D\u2019Amely G, D\u2019Arrigo S, Prenkaj B, Galasso F. Multimodal Motion Conditioned Diffusion Model for Skeleton-based Video Anomaly Detection. arXiv:2307.07205 2023. Accessed 2025-08-07","DOI":"10.1109\/ICCV51070.2023.00947"}],"container-title":["Journal of Big Data"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1186\/s40537-025-01314-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1186\/s40537-025-01314-3\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1186\/s40537-025-01314-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T15:45:12Z","timestamp":1763135112000},"score":1,"resource":{"primary":{"URL":"https:\/\/journalofbigdata.springeropen.com\/articles\/10.1186\/s40537-025-01314-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,14]]},"references-count":92,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2025,12]]}},"alternative-id":["1314"],"URL":"https:\/\/doi.org\/10.1186\/s40537-025-01314-3","relation":{},"ISSN":["2196-1115"],"issn-type":[{"value":"2196-1115","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,14]]},"assertion":[{"value":"4 May 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 October 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 November 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"253"}}