{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T06:34:11Z","timestamp":1773210851016,"version":"3.50.1"},"reference-count":47,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100004052","name":"King Abdullah University of Science and Technology","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100004052","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Visual Computing Center"},{"DOI":"10.13039\/501100004663","name":"Ministry of Science and Technology, Taiwan","doi-asserted-by":"publisher","award":["MOST 110-2634-F-007-015"],"award-info":[{"award-number":["MOST 110-2634-F-007-015"]}],"id":[{"id":"10.13039\/501100004663","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Multimedia"],"published-print":{"date-parts":[[2022]]},"DOI":"10.1109\/tmm.2021.3113786","type":"journal-article","created":{"date-parts":[[2021,9,20]],"date-time":"2021-09-20T20:16:47Z","timestamp":1632169007000},"page":"4077-4091","source":"Crossref","is-referenced-by-count":59,"title":["AniGAN: Style-Guided Generative Adversarial Networks for Unsupervised Anime Face Generation"],"prefix":"10.1109","volume":"24","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9465-8142","authenticated-orcid":false,"given":"Bing","family":"Li","sequence":"first","affiliation":[{"name":"Visual Computing Center, King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1745-4626","authenticated-orcid":false,"given":"Yuanlue","family":"Zhu","sequence":"additional","affiliation":[{"name":"ByteDance, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7559-4152","authenticated-orcid":false,"given":"Yitong","family":"Wang","sequence":"additional","affiliation":[{"name":"ByteDance, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9097-2318","authenticated-orcid":false,"given":"Chia-Wen","family":"Lin","sequence":"additional","affiliation":[{"name":"Department of Electrical Engineering and the Institute of Communications Engineering, National Tsing Hua University, Hsinchu, Taiwan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5534-587X","authenticated-orcid":false,"given":"Bernard","family":"Ghanem","sequence":"additional","affiliation":[{"name":"Visual Computing Center, King Abdullah University of Science and Technology (KAUST), Thuwal, Saudi Arabia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1420-0815","authenticated-orcid":false,"given":"Linlin","family":"Shen","sequence":"additional","affiliation":[{"name":"Computer Vision Institute, College of Computer Science and Software Engineering, Shenzhen University, Shenzhen, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"2672","volume-title":"Generative Adversarial Nets","author":"Goodfellow","year":"2014"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.244"},{"key":"ref3","article-title":"U-GAT-IT: Unsupervised generative attentional networks with adaptive layer-instance normalization for image-to-image translation","volume-title":"Proc. Int. Conf. Learn. Rep","author":"Kim","year":"2020"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.632"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00917"},{"key":"ref6","first-page":"700","article-title":"Unsupervised image-to-image translation networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst","author":"Liu","year":"2017"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-019-01284-z"},{"key":"ref8","article-title":"Exemplar guided unsupervised image-to-image translation with semantic consistency","volume-title":"Proc. Int. Conf. Learn. Rep.","author":"Ma","year":"2019"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3182157"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3240508.3240618"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01028"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00524"},{"key":"ref13","article-title":"Conditional generative adversarial nets","author":"Mirza","year":"2014"},{"key":"ref14","article-title":"Progressive growing of GANs for improved quality, stability, and variation","volume-title":"Proc. Int. Conf. Learn. Rep","author":"Karras","year":"2018"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00453"},{"key":"ref16","first-page":"7354","article-title":"Self-attention generative adversarial networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Zhang","year":"2019"},{"key":"ref17","article-title":"Large scale GAN training for high fidelity natural image synthesis","volume-title":"Proc. Int. Conf. Learn. Rep","author":"Brock","year":"2019"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2914583"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01216-8_1"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2019.2907052"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.3015015"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2019.2897897"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01219-9_11"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.01065"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00916"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00596"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.167"},{"key":"ref28","first-page":"574","article-title":"Generative semantic manipulation with contrasting GAN","volume-title":"Proc. Eur. Conf. Comput. Vis","author":"Liang","year":"2018"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1167\/16.12.326"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.265"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46475-6_43"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00986"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1145\/3240508.3240655"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01100"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01102"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1145\/3306346.3322984"},{"key":"ref37","article-title":"Layer normalization","author":"Ba","year":"2016"},{"key":"ref38","article-title":"Instance normalization: The missing ingredient for fast stylization","author":"Ulyanov","year":"2016"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00244"},{"key":"ref40","article-title":"Anonymous, Danbooru community, and gwern branwen, danbooru2019: A large-scale crowdsourced and tagged anime illustration dataset"},{"key":"ref41","article-title":"nagadomi, Animeface 2009"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.425"},{"key":"ref43","first-page":"8024","article-title":"Pytorch: An imperative style, highperformance deep learning library","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Paszke","year":"2019"},{"key":"ref44","first-page":"6626","article-title":"Gans trained by a two time-scale update rule converge to a local nash equilibrium,","volume-title":"Proc. Adv. Neural Inf. Process. Syst","author":"Heusel","year":"2017"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00821"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58580-8_3"}],"container-title":["IEEE Transactions on Multimedia"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6046\/9687854\/09541089.pdf?arnumber=9541089","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,11]],"date-time":"2024-01-11T23:07:02Z","timestamp":1705014422000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9541089\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"references-count":47,"URL":"https:\/\/doi.org\/10.1109\/tmm.2021.3113786","relation":{},"ISSN":["1520-9210","1941-0077"],"issn-type":[{"value":"1520-9210","type":"print"},{"value":"1941-0077","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]}}}