{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T22:33:37Z","timestamp":1768343617372,"version":"3.49.0"},"reference-count":39,"publisher":"Tech Science Press","issue":"1","license":[{"start":{"date-parts":[[2025,3,30]],"date-time":"2025-03-30T00:00:00Z","timestamp":1743292800000},"content-version":"vor","delay-in-days":88,"URL":"https:\/\/doi.org\/10.32604\/TSP-CROSSMARKPOLICY"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["CMC"],"published-print":{"date-parts":[[2025]]},"DOI":"10.32604\/cmc.2025.061243","type":"journal-article","created":{"date-parts":[[2025,3,14]],"date-time":"2025-03-14T04:31:17Z","timestamp":1741926677000},"page":"335-355","update-policy":"https:\/\/doi.org\/10.32604\/tsp-crossmarkpolicy","source":"Crossref","is-referenced-by-count":1,"title":["Optimizing 2D Image Quality in CartoonGAN: A Novel Approach Using Enhanced Pixel Integration"],"prefix":"10.32604","volume":"83","author":[{"given":"Stellar","family":"Choi","sequence":"first","affiliation":[]},{"given":"HeeAe","family":"Ko","sequence":"additional","affiliation":[]},{"given":"KyungRok","family":"Bae","sequence":"additional","affiliation":[]},{"given":"HyunSook","family":"Lee","sequence":"additional","affiliation":[]},{"given":"HaeJong","family":"Joo","sequence":"additional","affiliation":[]},{"given":"Woong","family":"Choi","sequence":"additional","affiliation":[]}],"member":"17807","published-online":{"date-parts":[[2025]]},"reference":[{"key":"ref1","author":"Goodfellow","journal-title":"Advances in neural information processing systems (NeurIPS)"},{"key":"ref2","unstructured":"Karras T, Timo A, Samuli L, Jaakko L. Progressive growing of GANs for improved quality, stability, and variation. arXiv:1710.10196. 2017."},{"key":"ref3","author":"Szeliski","year":"2010","journal-title":"Computer vision: algorithms and applications"},{"key":"ref4","unstructured":"Elgammal A, Liu B, Elhoseiny M, Mazzone M. CAN: creative adversarial networks: generating art by learning about styles and deviating from style norms. arXiv:1706.07068. 2017."},{"key":"ref5","series-title":"7th International Conference on Trends in Electronics and Informatics (ICOEI)","first-page":"265","article-title":"Photo-to-cartoon translation with generative adversarial network","author":"Ahmed","year":"2023"},{"key":"ref6","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"1125","article-title":"Image-to-image translation with conditional adversarial networks","author":"Isola","year":"2017"},{"key":"ref7","unstructured":"Maerten AS, Soydaner D. From paintbrush to pixel: a review of deep neural networks in AI-generated art. arXiv:2302.10913. 2023."},{"key":"ref8","series-title":"Proceedings of the IEEE International Conference on Computer Vision (ICCV)","article-title":"Arbitrary style transfer in real-time with adaptive instance normalization","author":"Huang","year":"2017"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"e2248","DOI":"10.1002\/cav.2248","article-title":"GAN-based multi-decomposiotion photo cartoonization","volume":"35","author":"Zhao","year":"2024","journal-title":"Comput Anim Virtual Worlds"},{"key":"ref10","unstructured":"Brock A, Donahue J, Simonyan K. Large scale GAN training for high fidelity natural image synthesis. arXiv:1809.11096. 2018."},{"key":"ref11","author":"Forsyth","year":"2002","journal-title":"Computer vision: a modern approach"},{"key":"ref12","unstructured":"OpenCV documentation and tutorials, The open source computer vision library. [cited 2025 Jan 1]. Available from: https:\/\/docs.opencv.org\/4.x\/d9\/df8\/tutorial_root.html."},{"key":"ref13","doi-asserted-by":"crossref","unstructured":"Karras T, Laine S, Aila T. A style-based generator architecture for generative adversarial networks. arXiv:1812.04948. 2019.","DOI":"10.1109\/CVPR.2019.00453"},{"key":"ref14","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"9465","article-title":"CartoonGAN: generative adversarial networks for photo cartoonization","author":"Yang","year":"2018"},{"key":"ref15","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"586","article-title":"The unreasonable effectiveness of deep features as a perceptual metric","author":"Zhang","year":"2018"},{"key":"ref16","series-title":"Proceedings of the European Conference on Computer Vision (ECCV)","first-page":"694","article-title":"Perceptual losses for real-time style transfer and super-resolution","author":"Johnson","year":"2016"},{"key":"ref17","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"8090","article-title":"Learning to cartoonize using white-box cartoon representations","author":"Wang","year":"2020"},{"key":"ref18","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"4681","article-title":"Photo-realistic single image super-resolution using a generative adversarial network","author":"Ledig","year":"2017"},{"key":"ref19","doi-asserted-by":"crossref","unstructured":"Zhu J, Park T, Isola P, Efros AA. Unpaired image-to-image translation using cycle-consistent adversarial networks. arXiv: 1703.10593. 2017.","DOI":"10.1109\/ICCV.2017.244"},{"key":"ref20","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J. Deep residual learning for image recognition. arXiv: 1512.03385. 2016.","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref21","author":"Gonzalez","year":"2016","journal-title":"Digital image processing"},{"key":"ref22","doi-asserted-by":"crossref","first-page":"12","DOI":"10.1145\/965139.807361","article-title":"Color gamut transform pairs","volume":"12","author":"Smith","year":"1978","journal-title":"ACM SIGGRAPH Comput Graph"},{"key":"ref23","doi-asserted-by":"crossref","first-page":"901","DOI":"10.1109\/83.597268","article-title":"Digital color imaging","volume":"6","author":"Sharma","year":"1997","journal-title":"IEEE Trans Image Process"},{"key":"ref24","doi-asserted-by":"crossref","first-page":"340","DOI":"10.1002\/col.1049","article-title":"The development of the CIEDE2000 colour-difference formula","volume":"26","author":"Luo","year":"2001","journal-title":"Color Res Appl"},{"key":"ref25","first-page":"1315","article-title":"A categorization of multiscale-decomposition-based image fusion schemes with a performance study for a digital camera application","volume":"87","author":"Zhang","year":"1999","journal-title":"IEEE Trans Image Process"},{"key":"ref26","unstructured":"Xu B, Wang N, Chen T, Li M. Empirical evaluation of rectified activations in convolutional network. arXiv:1505.00853. 2015."},{"key":"ref27","unstructured":"Redmon J, Farhadi A. YOLOv3: an incremental improvement. arXiv:1804.02767. 2018."},{"key":"ref28","unstructured":"Simonyan K, Zisserman A. Very deep convolutional networks for large-scale image recognition. arXiv:1409.1556. 2014."},{"key":"ref29","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","article-title":"Feature pyramid networks for object detection","author":"Lin"},{"key":"ref30","author":"Poynton","year":"2012","journal-title":"Digital video and HD: algorithms and interfaces"},{"key":"ref31","doi-asserted-by":"crossref","first-page":"600","DOI":"10.1109\/TIP.2003.819861","article-title":"Image quality assessment: from error visibility to structural similarity","volume":"13","author":"Wang","year":"2004","journal-title":"IEEE Trans Image Process"},{"key":"ref32","series-title":"20th International Conference on Pattern Recognition (ICPR)","first-page":"2366","article-title":"Image quality metrics: PSNR vs. SSIM","author":"Hor\u00e9","year":"2010"},{"key":"ref33","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1109\/TG.2019.2896986","article-title":"Deep learning for video game playing","volume":"12","author":"Justesen","year":"2020","journal-title":"IEEE Trans Games"},{"key":"ref34","first-page":"1","article-title":"Neural style transfer: a paradigm shift for image-based artistic rendering?","author":"Semmo","year":"2017","journal-title":"Proceedings of the Symposium on Non-Photorealistic Animation and Rendering"},{"key":"ref35","series-title":"Proceedings of the 41st IEEE\/ACM International Conference on Computer-Aided Design (ICCAD)","first-page":"1","article-title":"RT-NeRF: real-time on-device neural radiance fields towards immersive AR\/VR rendering","author":"Li","year":"2022"},{"key":"ref36","doi-asserted-by":"crossref","first-page":"34","DOI":"10.1109\/38.963459","article-title":"Recent advances in augmented reality","volume":"21","author":"Azuma","year":"2002","journal-title":"IEEE Comput Graph Appl"},{"key":"ref37","unstructured":"Han S, Mao H, Dally WJ. Deep compression: compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv:1510.00149. 2015."},{"key":"ref38","unstructured":"Howard AG, Zhu M, Chen B, Kalenichenko D, Wang W, Weyand T, et al. MobileNets: efficient convolutional neural networks for mobile vision applications. arXiv:1704.04861. 2017."},{"key":"ref39","series-title":"German Conference on Pattern Recognition (GCPR)","first-page":"26","article-title":"Artistic style transfer for videos","author":"Ruder","year":"2016"}],"container-title":["Computers, Materials &amp; Continua"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/cdn.techscience.cn\/files\/cmc\/2025\/TSP_CMC-83-1\/TSP_CMC_61243\/TSP_CMC_61243.pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T06:39:39Z","timestamp":1763102379000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.techscience.com\/cmc\/v83n1\/60113"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":39,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2025]]},"published-print":{"date-parts":[[2025]]}},"URL":"https:\/\/doi.org\/10.32604\/cmc.2025.061243","relation":{},"ISSN":["1546-2226"],"issn-type":[{"value":"1546-2226","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"2024-11-20","order":0,"name":"received","label":"Received","group":{"name":"publication_history","label":"Publication History"}},{"value":"2025-02-19","order":1,"name":"accepted","label":"Accepted","group":{"name":"publication_history","label":"Publication History"}},{"value":"2025-03-26","order":2,"name":"published","label":"Published Online","group":{"name":"publication_history","label":"Publication History"}}]}}