{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T16:32:24Z","timestamp":1762878744052,"version":"3.45.0"},"reference-count":21,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T00:00:00Z","timestamp":1762819200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T00:00:00Z","timestamp":1762819200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Discov Internet Things"],"DOI":"10.1007\/s43926-025-00233-2","type":"journal-article","created":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T15:57:50Z","timestamp":1762876670000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["StyleGAN based path analysis from image to video generation in multimedia generation"],"prefix":"10.1007","volume":"5","author":[{"given":"Xiao","family":"Zhang","sequence":"first","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,11]]},"reference":[{"issue":"07","key":"233_CR1","doi-asserted-by":"publisher","first-page":"10435","DOI":"10.5281\/zenodo.6788660","volume":"10","author":"DAS George","year":"2021","unstructured":"George DAS, George AH. The evolution of content delivery network: how it enhances video services, streaming, games, ecommerce, and advertising. Int J Adv Res Electr Electron Instrum Eng (IJAREEIE). 2021;10(07):10435\u201342. https:\/\/doi.org\/10.5281\/zenodo.6788660.","journal-title":"Int J Adv Res Electr Electron Instrum Eng (IJAREEIE)"},{"issue":"12","key":"233_CR2","doi-asserted-by":"publisher","first-page":"109","DOI":"10.25236\/IJFS.2021.031214","volume":"3","author":"F Huang","year":"2021","unstructured":"Huang F. Research on innovation and development of multimedia industry agglomeration mode under the background of big data. Int J Front Sociol. 2021;3(12):109\u201317. https:\/\/doi.org\/10.25236\/IJFS.2021.031214.","journal-title":"Int J Front Sociol"},{"issue":"11","key":"233_CR3","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3689641","volume":"20","author":"G Bansal","year":"2024","unstructured":"Bansal G, Nawal A, Chamola V, Herencsar N. Revolutionizing visuals: the role of generative AI in modern image generation. ACM Trans Multimedia Comput Commun Appl. 2024;20(11):1\u201322. https:\/\/doi.org\/10.1145\/3689641.","journal-title":"ACM Trans Multimedia Comput Commun Appl"},{"key":"233_CR4","doi-asserted-by":"publisher","unstructured":"Liu MY, Huang X, Yu J, Wang TC, Mallya A. (2021). Generative adversarial networks for image and video synthesis: Algorithms and applications. Proceedings of the IEEE, 109(5), pp.839\u2013862.https:\/\/doi.org\/10.1109\/JPROC.2021.3049196","DOI":"10.1109\/JPROC.2021.3049196"},{"key":"233_CR5","doi-asserted-by":"publisher","first-page":"2963","DOI":"10.1109\/TMM.2021.3091847","volume":"24","author":"H Tang","year":"2021","unstructured":"Tang H, Sebe N. Total generate: cycle in cycle generative adversarial networks for generating human faces, hands, bodies, and natural scenes. IEEE Trans Multimedia. 2021;24:2963\u201374. https:\/\/doi.org\/10.1109\/TMM.2021.3091847.","journal-title":"IEEE Trans Multimedia"},{"key":"233_CR6","doi-asserted-by":"publisher","first-page":"124847","DOI":"10.1109\/ACCESS.2021.3110798","volume":"9","author":"W Zhang","year":"2021","unstructured":"Zhang W, Wang G, Huang M, Wang H, Wen S. Generative adversarial networks for abnormal event detection in videos based on self-attention mechanism. IEEE Access. 2021;9:124847\u201360. https:\/\/doi.org\/10.1109\/ACCESS.2021.3110798.","journal-title":"IEEE Access"},{"issue":"5","key":"233_CR7","doi-asserted-by":"publisher","first-page":"e0323304","DOI":"10.1371\/journal.pone.0323304","volume":"20","author":"Z Lin","year":"2025","unstructured":"Lin Z, Feng K. Improved generative adversarial networks model for movie dance generation. PLoS ONE. 2025;20(5):e0323304.","journal-title":"PLoS ONE"},{"issue":"23","key":"233_CR8","doi-asserted-by":"publisher","first-page":"13153","DOI":"10.1007\/s00500-022-07014-x","volume":"26","author":"B Natarajan","year":"2022","unstructured":"Natarajan B, Elakkiya R. Dynamic GAN for high-quality sign Language video generation from skeletal poses using generative adversarial networks. Soft Comput. 2022;26(23):13153\u201375. https:\/\/doi.org\/10.1007\/s00500-022-07014-x.","journal-title":"Soft Comput"},{"issue":"10","key":"233_CR9","doi-asserted-by":"publisher","first-page":"96","DOI":"10.1109\/MCOM.003.2300645","volume":"62","author":"M Xu","year":"2024","unstructured":"Xu M, Niyato D, Kang J, Xiong Z, Guo S, Fang Y, Kim DI. Generative AI-Enabled mobile tactical multimedia networks: Distribution, Generation, and perception. IEEE Commun Mag. 2024;62(10):96\u2013102. https:\/\/doi.org\/10.1109\/MCOM.003.2300645.","journal-title":"IEEE Commun Mag"},{"issue":"4","key":"233_CR10","doi-asserted-by":"publisher","first-page":"1805","DOI":"10.1109\/TCSVT.2021.3083257","volume":"32","author":"X Tu","year":"2021","unstructured":"Tu X, Zou Y, Zhao J, Ai W, Dong J, Yao Y, Wang Z, Guo G, Li Z, Liu W, Feng J. Image-to-video generation via 3D facial dynamics. IEEE Trans Circuits Syst Video Technol. 2021;32(4):1805\u201319. https:\/\/doi.org\/10.1109\/TCSVT.2021.3083257.","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"233_CR11","doi-asserted-by":"publisher","unstructured":"Iyengar SS, Nabavirazavi S, Hariprasad Y, HB P, and, Mohan CK. (2025). Temporal Deepfake Generation and Detection in Video Sequences Using Recurrent Neural Networks (RNNs). Artificial Intelligence in Practice: Theory and Application for Cyber Security and Forensics, pp.309\u2013334. https:\/\/doi.org\/10.1007\/978-3-031-89327-8_11","DOI":"10.1007\/978-3-031-89327-8_11"},{"key":"233_CR12","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2506.17201","author":"J Li","year":"2025","unstructured":"Li J, Tang J, Xu Z, Wu L, Zhou Y, Shao S, Yu T, Cao Z, Lu Q. Hunyuan-GameCraft: High-dynamic interactive game video generation with hybrid history condition. ArXiv Preprint. 2025. https:\/\/doi.org\/10.48550\/arXiv.2506.17201. arXiv:2506.17201.","journal-title":"ArXiv Preprint"},{"key":"233_CR13","doi-asserted-by":"publisher","unstructured":"Liu M. Exploring autonomous content creation in digital media using generative adversarial networks: A moral and aesthetic evaluation framework. J Comput Methods Sci Eng. 2025;14727978251352135. https:\/\/doi.org\/10.1177\/14727978251352135.","DOI":"10.1177\/14727978251352135"},{"issue":"1","key":"233_CR14","doi-asserted-by":"publisher","first-page":"41","DOI":"10.1007\/s12559-024-10389-8","volume":"17","author":"A Joshi","year":"2025","unstructured":"Joshi A, Diwakar M. I2V-CMGAN: generative adversarial Cross-Modal Network-Based Image-to-Video person Re-identification. Cogn Comput. 2025;17(1):41. https:\/\/doi.org\/10.1007\/s12559-024-10389-8.","journal-title":"Cogn Comput"},{"key":"233_CR15","doi-asserted-by":"publisher","first-page":"1281944","DOI":"10.3389\/fnbot.2023.1281944","volume":"17","author":"S Lu","year":"2023","unstructured":"Lu S, Wang P. Multi-dimensional fusion: transformer and GANs-based multimodal audiovisual perception robot for musical performance Art. Front Neurorobotics. 2023;17:1281944. https:\/\/doi.org\/10.3389\/fnbot.2023.1281944.","journal-title":"Front Neurorobotics"},{"key":"233_CR16","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2506.03191","author":"M Islam","year":"2025","unstructured":"Islam M, Huang T, Ahn E, Naseem U. Multimodal generative AI with autoregressive LLMs for human motion Understanding and generation: A way forward. ArXiv Preprint. 2025. https:\/\/doi.org\/10.48550\/arXiv.2506.03191. arXiv:2506.03191.","journal-title":"ArXiv Preprint"},{"issue":"1","key":"233_CR17","doi-asserted-by":"publisher","first-page":"221","DOI":"10.1007\/s41870-023-01468-4","volume":"16","author":"R Mehmood","year":"2024","unstructured":"Mehmood R, Bashir R, Giri KJ. VTM-GAN: video-text matcher based generative adversarial network for generating videos from textual description. Int J Inform Technol. 2024;16(1):221\u201336. https:\/\/doi.org\/10.1007\/s41870-023-01468-4.","journal-title":"Int J Inform Technol"},{"key":"233_CR18","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2411.16657","author":"Z Wang","year":"2024","unstructured":"Wang Z, Li J, Lin H, Yoon J, Bansal M. DreamRunner: Fine-Grained storytelling video generation with Retrieval-Augmented motion adaptation. ArXiv Preprint. 2024. https:\/\/doi.org\/10.48550\/arXiv.2411.16657. arXiv:2411.16657.","journal-title":"ArXiv Preprint"},{"key":"233_CR19","doi-asserted-by":"publisher","first-page":"2323","DOI":"10.1109\/TMM.2022.3146010","volume":"25","author":"H Yan","year":"2022","unstructured":"Yan H, Zhang H, Liu L, Zhou D, Xu X, Zhang Z, Yan S. Toward intelligent design: an AI-based fashion designer using generative adversarial networks aided by sketch and rendering generators. IEEE Trans Multimedia. 2022;25:2323\u201338. https:\/\/doi.org\/10.1109\/TMM.2022.3146010.","journal-title":"IEEE Trans Multimedia"},{"key":"233_CR20","doi-asserted-by":"publisher","unstructured":"Shi X, Huang Z, Wang FY, Bian W, Li D, Zhang Y, Zhang M, Cheung KC, See S, Qin H, Da J. (2024). Motion-I2V: Consistent and Controllable Image-to-Video Generation with Explicit Motion Modeling. arXiv e-prints, arXiv:2401.15977. https:\/\/doi.org\/10.48550\/arXiv.2401.15977","DOI":"10.48550\/arXiv.2401.15977"},{"key":"233_CR21","doi-asserted-by":"publisher","unstructured":"Liang J, Fan Y, Zhang K, Timofte R, Van Gool L, Ranjan R. (2023). MoVideo: Motion-Aware Video Generation with Diffusion Models. arXiv e-prints, arXiv:2311. https:\/\/doi.org\/10.1007\/978-3-031-72784-9_4","DOI":"10.1007\/978-3-031-72784-9_4"}],"container-title":["Discover Internet of Things"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s43926-025-00233-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s43926-025-00233-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s43926-025-00233-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T15:57:51Z","timestamp":1762876671000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s43926-025-00233-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,11]]},"references-count":21,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2025,12]]}},"alternative-id":["233"],"URL":"https:\/\/doi.org\/10.1007\/s43926-025-00233-2","relation":{},"ISSN":["2730-7239"],"issn-type":[{"value":"2730-7239","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,11]]},"assertion":[{"value":"14 July 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 October 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 November 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"Not applicable.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval and consent to participate"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}},{"value":"The authors declare no competing interests.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"131"}}