{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,12]],"date-time":"2025-06-12T04:48:43Z","timestamp":1749703723287,"version":"3.37.3"},"reference-count":59,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"ARC Centre of Excellence for Robotics Vision","award":["CE140100016","DP 190102261","190100080"],"award-info":[{"award-number":["CE140100016","DP 190102261","190100080"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Multimedia"],"published-print":{"date-parts":[[2022]]},"DOI":"10.1109\/tmm.2021.3064273","type":"journal-article","created":{"date-parts":[[2021,3,8]],"date-time":"2021-03-08T21:21:16Z","timestamp":1615238476000},"page":"1378-1388","source":"Crossref","is-referenced-by-count":4,"title":["Disentangled Feature Networks for Facial Portrait and Caricature Generation"],"prefix":"10.1109","volume":"24","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4317-660X","authenticated-orcid":false,"given":"Kaihao","family":"Zhang","sequence":"first","affiliation":[{"name":"College of Engineering, and Computer Science, the Australian National University, Canberra, ACT, Australia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5697-4168","authenticated-orcid":false,"given":"Wenhan","family":"Luo","sequence":"additional","affiliation":[{"name":"Tencent, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7331-6132","authenticated-orcid":false,"given":"Lin","family":"Ma","sequence":"additional","affiliation":[{"name":"Meituan, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5481-653X","authenticated-orcid":false,"given":"Wenqi","family":"Ren","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Information Security, Institute of Information Engineering, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4125-1554","authenticated-orcid":false,"given":"Hongdong","family":"Li","sequence":"additional","affiliation":[{"name":"College of Engineering, and Computer Science, the Australian National University, Canberra, ACT, Australia"}]}],"member":"263","reference":[{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1145\/259081.259231"},{"key":"ref2","first-page":"1","article-title":"Making extreme caricatures with a new interactive 2 d deformation technique with simplicial complexes","volume-title":"Proc. Vis.","author":"Akleman","year":"2000"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1145\/641007.641040"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1145\/966131.966133"},{"doi-asserted-by":"publisher","key":"ref5","DOI":"10.1109\/PCCGA.2002.1167882"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1145\/1186223.1186294"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.1162\/leon.2007.40.4.392"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.1016\/j.cag.2011.03.005"},{"doi-asserted-by":"publisher","key":"ref9","DOI":"10.1007\/978-3-030-01219-9_11"},{"key":"ref10","first-page":"1857","article-title":"Learning to discover cross-domain relations with generative adversarial networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kim","year":"2017"},{"key":"ref11","article-title":"Unsupervised image-to-image translation networks","volume-title":"Adv. Neural Inf. Process. Syst.","author":"Liu","year":"2017"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1109\/ICCV.2017.310"},{"doi-asserted-by":"publisher","key":"ref13","DOI":"10.1109\/TMM.2019.2897897"},{"key":"ref14","first-page":"1","article-title":"WebCaricature: A benchmark for caricature face recognition","volume-title":"Proc. Brit. Mach. Vis. Conf.","author":"Huo","year":"2018"},{"doi-asserted-by":"publisher","key":"ref15","DOI":"10.1109\/CVPR.2017.632"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1109\/ICCV.2017.244"},{"key":"ref17","first-page":"1","article-title":"Conditional generative adversarial nets","volume-title":"Adv. Neural Inf. Process. Syst.","author":"Mirza","year":"2014"},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.1145\/1180639.1180783"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1007\/978-3-540-76386-4_29"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1109\/CVPR.2012.6247759"},{"key":"ref21","first-page":"1","article-title":"Spatial transformer networks","volume-title":"Adv. Neural Inf. Process. Syst.","author":"Jaderberg","year":"2015"},{"doi-asserted-by":"publisher","key":"ref22","DOI":"10.1109\/CVPR.2018.00985"},{"doi-asserted-by":"publisher","key":"ref23","DOI":"10.1007\/978-3-319-46475-6_20"},{"doi-asserted-by":"publisher","key":"ref24","DOI":"10.1145\/3272127.3275046"},{"doi-asserted-by":"publisher","key":"ref25","DOI":"10.1016\/j.neunet.2020.08.011"},{"doi-asserted-by":"publisher","key":"ref26","DOI":"10.1109\/CVPR.2019.01102"},{"year":"2019","author":"Wu","article-title":"Landmark assisted cyclegan for cartoon face generation","key":"ref27"},{"doi-asserted-by":"publisher","key":"ref28","DOI":"10.1145\/383259.383296"},{"doi-asserted-by":"publisher","key":"ref29","DOI":"10.1109\/ICIP.1995.537718"},{"doi-asserted-by":"publisher","key":"ref30","DOI":"10.1109\/CVPR.2016.265"},{"doi-asserted-by":"publisher","key":"ref31","DOI":"10.1109\/CVPR.2017.397"},{"doi-asserted-by":"publisher","key":"ref32","DOI":"10.1109\/ICCV.2017.167"},{"doi-asserted-by":"publisher","key":"ref33","DOI":"10.1109\/CVPR.2016.272"},{"doi-asserted-by":"publisher","key":"ref34","DOI":"10.1007\/978-3-319-45886-1_3"},{"doi-asserted-by":"publisher","key":"ref35","DOI":"10.24963\/ijcai.2017\/310"},{"doi-asserted-by":"publisher","key":"ref36","DOI":"10.1109\/CVPR.2017.745"},{"doi-asserted-by":"publisher","key":"ref37","DOI":"10.1109\/ICCV.2017.126"},{"doi-asserted-by":"publisher","key":"ref38","DOI":"10.3156\/jsoft.29.5_177_2"},{"doi-asserted-by":"publisher","key":"ref39","DOI":"10.1109\/TMM.2019.2908352"},{"doi-asserted-by":"publisher","key":"ref40","DOI":"10.1109\/TMM.2019.2895292"},{"key":"ref41","first-page":"1","article-title":"Toward multimodal image-to-image translation","volume-title":"Adv. Neural Inf. Process. Syst.","author":"Zhu","year":"2017"},{"key":"ref42","first-page":"1","article-title":"Generating videos with scene dynamics","volume-title":"Adv. Neural Inf. Process. Syst.","author":"Vondrick","year":"2016"},{"key":"ref43","first-page":"1","article-title":"Unsupervised Cross-Domain Image Generation","volume-title":"Int. Conf. Learn. Representation","author":"Taigman","year":"2017"},{"doi-asserted-by":"publisher","key":"ref44","DOI":"10.1109\/ICCV.2017.629"},{"key":"ref45","first-page":"1","article-title":"Deep generative image models using a laplacian pyramid of adversarial networks","volume-title":"Adv. Neural Inf. Process. Syst.","author":"Denton","year":"2015"},{"doi-asserted-by":"publisher","key":"ref46","DOI":"10.1109\/CVPR.2017.319"},{"doi-asserted-by":"publisher","key":"ref47","DOI":"10.1109\/CVPR.2018.00251"},{"doi-asserted-by":"publisher","key":"ref48","DOI":"10.1109\/TIP.2018.2867733"},{"doi-asserted-by":"publisher","key":"ref49","DOI":"10.1007\/978-3-319-24574-4_28"},{"doi-asserted-by":"publisher","key":"ref50","DOI":"10.1109\/CVPR.2016.90"},{"doi-asserted-by":"publisher","key":"ref51","DOI":"10.1007\/978-3-319-46493-038"},{"key":"ref52","first-page":"730","article-title":"Very deep convolutional networks for large-scale image recognition","volume-title":"in Proc. Int. Conf. Learn. Representations","author":"Simonyan","year":"2015"},{"key":"ref53","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","volume-title":"Int. Conf. Mach. Learn.","author":"Ioffe","year":"2015"},{"key":"ref54","article-title":"Unsupervised representation learning with deep convolutional generative adversarial networks","volume-title":"The Int. Conf. Learn. Representations","author":"Radford","year":"2015"},{"doi-asserted-by":"publisher","key":"ref55","DOI":"10.1109\/CVPR.2017.437"},{"doi-asserted-by":"publisher","key":"ref56","DOI":"10.1007\/978-3-319-46475-6_43"},{"key":"ref57","first-page":"1","article-title":"Fitnets: Hints for thin deep nets","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Romero","year":"2015"},{"key":"ref58","article-title":"Pose guided person image generation","volume-title":"Adv. Neural Inf. Process. Syst.","author":"Ma","year":"2017"},{"doi-asserted-by":"publisher","key":"ref59","DOI":"10.1109\/ICCV.2015.425"}],"container-title":["IEEE Transactions on Multimedia"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6046\/9687854\/09372827.pdf?arnumber=9372827","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,9]],"date-time":"2024-01-09T23:24:16Z","timestamp":1704842656000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9372827\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"references-count":59,"URL":"https:\/\/doi.org\/10.1109\/tmm.2021.3064273","relation":{},"ISSN":["1520-9210","1941-0077"],"issn-type":[{"type":"print","value":"1520-9210"},{"type":"electronic","value":"1941-0077"}],"subject":[],"published":{"date-parts":[[2022]]}}}