{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T14:14:49Z","timestamp":1767017689426,"version":"3.48.0"},"reference-count":19,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T00:00:00Z","timestamp":1766966400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T00:00:00Z","timestamp":1766966400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Discov Artif Intell"],"DOI":"10.1007\/s44163-025-00501-8","type":"journal-article","created":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T14:11:09Z","timestamp":1767017469000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Enhancing character animation realism with generative adversarial networks (GANs): a comparative method study"],"prefix":"10.1007","volume":"5","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-2823-3070","authenticated-orcid":false,"given":"Mars Caroline","family":"Wibowo","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7430-8740","authenticated-orcid":false,"given":"Daniel","family":"Manongga","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7387-2622","authenticated-orcid":false,"given":"Hendry","family":"Hendry","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0003-9754-2809","authenticated-orcid":false,"given":"Teguh Indra","family":"Bayu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,12,29]]},"reference":[{"issue":"1","key":"501_CR1","doi-asserted-by":"publisher","first-page":"011001","DOI":"10.1088\/2632-2153\/AD1F77","volume":"5","author":"T Chakraborty","year":"2024","unstructured":"Chakraborty T, Reddy K S U, M. Naik S, Panja M, Manvitha B. Ten years of generative adversarial nets (GANs): a survey of the state-of-the-art. Mach Learn Sci Technol. 2024;5(1):011001. https:\/\/doi.org\/10.1088\/2632-2153\/AD1F77.","journal-title":"Mach Learn Sci Technol"},{"key":"501_CR2","doi-asserted-by":"publisher","DOI":"10.1145\/3613904.3641927","author":"Q Zhou","year":"2024","unstructured":"Zhou Q, Ledo D, Fitzmaurice G, Anderson F. TimeTunnel: integrating spatial and temporal motion editing for character animation in virtual reality. Conf Human Fact Comput Syst\u2014Proc. 2024. https:\/\/doi.org\/10.1145\/3613904.3641927.","journal-title":"Conf Human Fact Comput Syst\u2014Proc"},{"key":"501_CR3","doi-asserted-by":"publisher","first-page":"179","DOI":"10.1007\/978-981-99-9018-4_13","volume":"382","author":"RK Rai","year":"2024","unstructured":"Rai RK, Bansal R, Jha SS, Narava R. Assessing the utility of GAN-generated 3D virtual desert terrain: a user-centric evaluation of immersion and realism. Smart Innov, Syst, Technol. 2024;382:179\u201391. https:\/\/doi.org\/10.1007\/978-981-99-9018-4_13.","journal-title":"Smart Innov, Syst, Technol"},{"key":"501_CR4","unstructured":"Devi YS and Kumar SP. DR-DCGAN: a deep convolutional generative adversarial network (DC-GAN) for diabetic retinopathy image synthesis. Webology, vol. 19, no. 2; 2022. Accessed: Oct. 02, 2024. [Online]. Available: http:\/\/www.webology.org."},{"issue":"5","key":"501_CR5","doi-asserted-by":"publisher","first-page":"831","DOI":"10.1007\/s11633-022-1411-7","volume":"21","author":"A Mumuni","year":"2024","unstructured":"Mumuni A, Mumuni F, Gerrar NK. A survey of synthetic data augmentation methods in computer vision. Mach Intell Res. 2024;21(5):831\u201369. https:\/\/doi.org\/10.1007\/s11633-022-1411-7.","journal-title":"Mach Intell Res"},{"key":"501_CR6","doi-asserted-by":"publisher","DOI":"10.1007\/S12559-024-10342-9","author":"V Chamola","year":"2024","unstructured":"Chamola V, et al. A comprehensive survey on generative AI for Metaverse: enabling immersive experience. Cognit Comput. 2024. https:\/\/doi.org\/10.1007\/S12559-024-10342-9.","journal-title":"Cognit Comput"},{"key":"501_CR7","doi-asserted-by":"publisher","DOI":"10.3389\/FRAI.2021.604234","author":"RT Hughes","year":"2021","unstructured":"Hughes RT, Zhu L, Bednarz T. Generative adversarial networks-enabled human\u2013artificial intelligence collaborative applications for creative and design industries: a systematic review of current approaches and trends. Front Artif Intell. 2021. https:\/\/doi.org\/10.3389\/FRAI.2021.604234.","journal-title":"Front Artif Intell"},{"issue":"1","key":"501_CR8","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/S44196-024-00491-Y\/TABLES\/1","volume":"17","author":"R Vasant Bidwe","year":"2024","unstructured":"Vasant Bidwe R, Mishra S, Kamini Bajaj S, Kotecha K. Attention-focused eye gaze analysis to predict autistic traits using transfer learning. Int J Comput Intell Syst. 2024;17(1):1\u201333. https:\/\/doi.org\/10.1007\/S44196-024-00491-Y\/TABLES\/1.","journal-title":"Int J Comput Intell Syst"},{"key":"501_CR9","doi-asserted-by":"publisher","unstructured":"Patil A and Venkatesh, DCGAN: Deep convolutional GAN with attention module for remote view classification. In International Conference on Forensics, Analytics, Big Data, Security, FABS 2021, Institute of Electrical and Electronics Engineers Inc.; 2021. https:\/\/doi.org\/10.1109\/FABS52071.2021.9702655.","DOI":"10.1109\/FABS52071.2021.9702655"},{"key":"501_CR10","doi-asserted-by":"publisher","DOI":"10.1016\/j.infrared.2023.104822","volume":"133","author":"Y Chen","year":"2023","unstructured":"Chen Y, Zhan W, Jiang Y, Zhu D, Guo R, Xu X. Ddgan: dense residual module and dual-stream attention-guided generative adversarial network for colorizing near-infrared images. Infrared Phys Technol. 2023;133:104822. https:\/\/doi.org\/10.1016\/j.infrared.2023.104822.","journal-title":"Infrared Phys Technol"},{"issue":"10","key":"501_CR11","doi-asserted-by":"publisher","first-page":"2481","DOI":"10.1109\/TMM.2019.2960588","volume":"22","author":"D Avola","year":"2020","unstructured":"Avola D, Cascio M, Cinque L, Foresti GL, Massaroni C, Rodola E. 2-d skeleton-based action recognition via two-branch stacked LSTM-RNNs. IEEE Trans Multimedia. 2020;22(10):2481\u201396. https:\/\/doi.org\/10.1109\/TMM.2019.2960588.","journal-title":"IEEE Trans Multimedia"},{"key":"501_CR12","doi-asserted-by":"publisher","unstructured":"Stiuca RE and Mocanu I. Character animation using LSTM networks. In 2023 46th international conference on telecommunications and signal processing (TSP), IEEE; 2023, pp. 284\u2013287. https:\/\/doi.org\/10.1109\/TSP59544.2023.10197710.","DOI":"10.1109\/TSP59544.2023.10197710"},{"key":"501_CR13","doi-asserted-by":"crossref","unstructured":"Liu X and Hsieh C-J. Rob-GAN: generator, discriminator, and adversarial attacker. In In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition; 2019. pp. 11234\u201311243.","DOI":"10.1109\/CVPR.2019.01149"},{"key":"501_CR14","doi-asserted-by":"publisher","unstructured":"Sauer A, Schwarz K, and Geiger A. StyleGAN-XL: scaling StyleGAN to large diverse datasets. In In ACM SIGGRAPH 2022 conference proceedings, Association for Computing Machinery (ACM); 2022, pp. 1\u201310. https:\/\/doi.org\/10.1145\/3528233.3530738.","DOI":"10.1145\/3528233.3530738"},{"key":"501_CR15","unstructured":"Tasnim S, Mostafa AM, Morshed A, and Shaiyaz N. Normalizing images in various weather and lighting conditions using Pix2Pix GAN; 2024, Accessed: Oct. 02, 2024. [Online]. Available: https:\/\/dspace.bracu.ac.bd:8443\/xmlui\/handle\/10361\/23637."},{"issue":"1","key":"501_CR16","doi-asserted-by":"publisher","first-page":"181","DOI":"10.32604\/cmc.2023.041479","volume":"77","author":"X Zhao","year":"2023","unstructured":"Zhao X, Yu H, Bian H, Author C. Image to image translation based on differential image Pix2Pix model. Comput Mater Continua. 2023;77(1):181\u20139. https:\/\/doi.org\/10.32604\/cmc.2023.041479.","journal-title":"Comput Mater Continua"},{"key":"501_CR17","doi-asserted-by":"publisher","first-page":"1","DOI":"10.7717\/PEERJ-CS.1889\/SUPP-1","volume":"10","author":"A Yang","year":"2024","unstructured":"Yang A, Hanif MK. Visual resource extraction and artistic communication model design based on improved CycleGAN algorithm. PeerJ Comput Sci. 2024;10:1\u201318. https:\/\/doi.org\/10.7717\/PEERJ-CS.1889\/SUPP-1.","journal-title":"PeerJ Comput Sci"},{"key":"501_CR18","unstructured":"Goodfellow IJ et al. Generative adversarial nets. In Adv Neural Inf Process Syst, vol. 27; 2014. Accessed: Oct. 31, 2024. [Online]. Available: https:\/\/proceedings.neurips.cc\/paper\/5423-generative-adversarial-nets."},{"key":"501_CR19","doi-asserted-by":"crossref","unstructured":"Ionescu C, Papava D, Olaru V, and Sminchisescu C. Human3.6M: large scale datasets and predictive methods for 3D human sensing in natural environments. In IEEE Trans Pattern Anal Mach Intell; 2014. Accessed: Oct. 31, 2024. [Online]. Available: http:\/\/vision.imar.ro\/human3.6m.","DOI":"10.1109\/TPAMI.2013.248"}],"container-title":["Discover Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s44163-025-00501-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s44163-025-00501-8","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s44163-025-00501-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T14:11:10Z","timestamp":1767017470000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s44163-025-00501-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,29]]},"references-count":19,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2025,12]]}},"alternative-id":["501"],"URL":"https:\/\/doi.org\/10.1007\/s44163-025-00501-8","relation":{},"ISSN":["2731-0809"],"issn-type":[{"type":"electronic","value":"2731-0809"}],"subject":[],"published":{"date-parts":[[2025,12,29]]},"assertion":[{"value":"2 February 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 August 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 December 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"We declare that ethical approval, consent to participate, and consent to publish are not applicable to this study.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval and consent to participate"}},{"value":"The authors declare no competing interests.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"398"}}