{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T15:27:42Z","timestamp":1759332462884,"version":"3.37.3"},"reference-count":51,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2020YFB1313002","2020AAA0107400"],"award-info":[{"award-number":["2020YFB1313002","2020AAA0107400"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U20A20222"],"award-info":[{"award-number":["U20A20222"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004731","name":"Zhejiang Provincial Natural Science Foundation of China","doi-asserted-by":"publisher","award":["LR19F020004"],"award-info":[{"award-number":["LR19F020004"]}],"id":[{"id":"10.13039\/501100004731","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key Scientific Technological Innovation Research Project by Ministry of Education"},{"DOI":"10.13039\/501100004773","name":"Henan University Scientific and Technological Innovation Team Support Program","doi-asserted-by":"publisher","award":["19IRTSTHN012"],"award-info":[{"award-number":["19IRTSTHN012"]}],"id":[{"id":"10.13039\/501100004773","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2021]]},"DOI":"10.1109\/tip.2021.3058566","type":"journal-article","created":{"date-parts":[[2021,2,22]],"date-time":"2021-02-22T23:28:36Z","timestamp":1614036516000},"page":"3154-3166","source":"Crossref","is-referenced-by-count":29,"title":["Efficient Style-Corpus Constrained Learning for Photorealistic Style Transfer"],"prefix":"10.1109","volume":"30","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6227-614X","authenticated-orcid":false,"given":"Yingxu","family":"Qiao","sequence":"first","affiliation":[]},{"given":"Jiabao","family":"Cui","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8873-1729","authenticated-orcid":false,"given":"Fuxian","family":"Huang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9834-4087","authenticated-orcid":false,"given":"Hongmin","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Cuizhu","family":"Bao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3023-1662","authenticated-orcid":false,"given":"Xi","family":"Li","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00143"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2016.2567393"},{"key":"ref33","first-page":"1","article-title":"Fitnets: Hints for thin deep nets","volume":"abs 1412 6550","author":"romero","year":"2014","journal-title":"CoRR"},{"key":"ref32","article-title":"Distilling the knowledge in a neural network","author":"hinton","year":"2015","journal-title":"ArXiv 1503 02531"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00271"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00110"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00409"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00302"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.754"},{"key":"ref34","first-page":"1","article-title":"Paying more attention to attention: Improving the performance of convolutional neural networks via attention transfer","author":"zagoruyko","year":"2017","journal-title":"Proc ICLR"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00128"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00044"},{"key":"ref29","article-title":"Distilling knowledge from a deep pose regressor network","author":"risqi u saputra","year":"2019","journal-title":"arXiv 1908 00858"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01219-9_28"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.740"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.244"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00593"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/3240508.3240618"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2019.8756542"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/3240508.3240612"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00159"},{"key":"ref25","first-page":"1","article-title":"Exemplar guided unsupervised image-to-image translation with semantic consistency","author":"ma","year":"2019","journal-title":"Proc ICLR"},{"journal-title":"Painter by numbers wikiart","year":"2016","author":"nicho","key":"ref50"},{"key":"ref51","first-page":"694","article-title":"Perceptual losses for real-time style transfer and super-resolution","author":"johnson","year":"2016","journal-title":"Proc Eur Conf Comput Vis (ECCV)"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00860"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00841"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00145"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00665"},{"article-title":"Learning and investigating a style-free representation for fast, flexible, and high-quality neural style transfer","year":"2019","author":"zhang","key":"ref13"},{"key":"ref14","first-page":"1521","article-title":"Learning affinity via spatial propagation networks","author":"liu","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref15","article-title":"Fast universal style transfer for artistic and photorealistic rendering","author":"an","year":"2019","journal-title":"arXiv 1907 03118"},{"key":"ref16","article-title":"StyleNAS: An empirical study of neural architecture search to uncover surprisingly fast end-to-end universal style transfer networks","author":"an","year":"2019","journal-title":"arXiv 1906 02470"},{"key":"ref17","first-page":"2672","article-title":"Generative adversarial nets","author":"goodfellow","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.632"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00917"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00913"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00393"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.265"},{"journal-title":"Art History and its Methods A critical anthology","year":"1995","author":"fernie","key":"ref5"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00858"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01264-9_47"},{"key":"ref49","first-page":"740","article-title":"Microsoft COCO: Common objects in context","author":"lin","year":"2014","journal-title":"Proc Eur Conf Comput Vis (ECCV)"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01029"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.481"},{"key":"ref45","first-page":"1","article-title":"Disentangling content and style via unsupervised geometry distillation","author":"wu","year":"2019","journal-title":"Proc ICLR"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01237-3_43"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.167"},{"key":"ref42","first-page":"775","article-title":"KDGAN: Knowledge distillation with generative adversarial networks","author":"wang","year":"2018","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref41","first-page":"1","article-title":"Training shallow and thin networks for acceleration via knowledge distillation with conditional adversarial networks","author":"xu","year":"2018","journal-title":"Proc ICLRW"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2019.8851980"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00361"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/83\/9263394\/09360460.pdf?arnumber=9360460","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T14:50:03Z","timestamp":1652194203000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9360460\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"references-count":51,"URL":"https:\/\/doi.org\/10.1109\/tip.2021.3058566","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"type":"print","value":"1057-7149"},{"type":"electronic","value":"1941-0042"}],"subject":[],"published":{"date-parts":[[2021]]}}}