{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T13:46:22Z","timestamp":1774964782796,"version":"3.50.1"},"publisher-location":"Cham","reference-count":34,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783030585976","type":"print"},{"value":"9783030585983","type":"electronic"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-58598-3_34","type":"book-chapter","created":{"date-parts":[[2020,11,6]],"date-time":"2020-11-06T16:03:52Z","timestamp":1604678632000},"page":"573-589","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":38,"title":["Domain-Specific Mappings for Generative Adversarial Style Transfer"],"prefix":"10.1007","author":[{"given":"Hsin-Yu","family":"Chang","sequence":"first","affiliation":[]},{"given":"Zhixiang","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yung-Yu","family":"Chuang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,11,7]]},"reference":[{"key":"34_CR1","doi-asserted-by":"crossref","unstructured":"Chen, D., Yuan, L., Liao, J., Yu, N., Hua, G.: StyleBank: an explicit representation for neural image style transfer. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.296"},{"key":"34_CR2","doi-asserted-by":"crossref","unstructured":"Cho, W., Choi, S., Keetae Park, D., Shin, I., Choo, J.: Image-to-image translation via group-wise deep whitening-and-coloring transformation. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.01089"},{"key":"34_CR3","doi-asserted-by":"crossref","unstructured":"Choi, Y., Choi, M., Kim, M., Ha, J.W., Kim, S., Choo, J.: StarGAN: unified generative adversarial networks for multi-domain image-to-image translation. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00916"},{"key":"34_CR4","doi-asserted-by":"crossref","unstructured":"Gatys, L., Ecker, A.S., Bethge, M.: Texture synthesis using convolutional neural networks. In: NeurIPS (2015)","DOI":"10.1109\/CVPR.2016.265"},{"key":"34_CR5","doi-asserted-by":"crossref","unstructured":"Gatys, L.A., Ecker, A.S., Bethge, M.: Image style transfer using convolutional neural networks. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.265"},{"key":"34_CR6","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: GANs trained by a two time-scale update rule converge to a local nash equilibrium. In: NeurIPS (2017)"},{"key":"34_CR7","doi-asserted-by":"crossref","unstructured":"Huang, X., Belongie, S.: Arbitrary style transfer in real-time with adaptive instance normalization. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.167"},{"key":"34_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"179","DOI":"10.1007\/978-3-030-01219-9_11","volume-title":"Computer Vision \u2013 ECCV 2018","author":"X Huang","year":"2018","unstructured":"Huang, X., Liu, M.-Y., Belongie, S., Kautz, J.: Multimodal unsupervised image-to-image translation. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11207, pp. 179\u2013196. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01219-9_11"},{"key":"34_CR9","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.632"},{"key":"34_CR10","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"694","DOI":"10.1007\/978-3-319-46475-6_43","volume-title":"Computer Vision \u2013 ECCV 2016","author":"J Johnson","year":"2016","unstructured":"Johnson, J., Alahi, A., Fei-Fei, L.: Perceptual losses for real-time style transfer and super-resolution. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9906, pp. 694\u2013711. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46475-6_43"},{"key":"34_CR11","unstructured":"Kim, J., Kim, M., Kang, H., Lee, K.: U-GAT-IT: unsupervised generative attentional networks with adaptive layer-instance normalization for image-to-image translation. arXiv preprint arXiv:1907.10830 (2019)"},{"key":"34_CR12","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114 (2013)"},{"key":"34_CR13","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"36","DOI":"10.1007\/978-3-030-01246-5_3","volume-title":"Computer Vision \u2013 ECCV 2018","author":"H-Y Lee","year":"2018","unstructured":"Lee, H.-Y., Tseng, H.-Y., Huang, J.-B., Singh, M., Yang, M.-H.: Diverse image-to-image translation via disentangled representations. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11205, pp. 36\u201352. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01246-5_3"},{"key":"34_CR14","unstructured":"Li, X., et al.: Attribute guided unpaired image-to-image translation with semi-supervised learning. arXiv preprint arXiv:1904.12428 (2019)"},{"key":"34_CR15","doi-asserted-by":"crossref","unstructured":"Li, X., Liu, S., Kautz, J., Yang, M.H.: Learning linear transformations for fast arbitrary style transfer. arXiv preprint arXiv:1808.04537 (2018)","DOI":"10.1109\/CVPR.2019.00393"},{"key":"34_CR16","unstructured":"Li, Y., Fang, C., Yang, J., Wang, Z., Lu, X., Yang, M.H.: Universal style transfer via feature transforms. In: NeurIPS (2017)"},{"key":"34_CR17","doi-asserted-by":"crossref","unstructured":"Liao, J., Yao, Y., Yuan, L., Hua, G., Kang, S.B.: Visual attribute transfer through deep image analogy. In: ACM SIGGRAPH (2017)","DOI":"10.1145\/3072959.3073683"},{"key":"34_CR18","doi-asserted-by":"crossref","unstructured":"Lin, J., Pang, Y., Xia, Y., Chen, Z., Luo, J.: TuiGAN: learning versatile image-to-image translation with two unpaired images. ECCV (2020)","DOI":"10.1007\/978-3-030-58548-8_2"},{"key":"34_CR19","unstructured":"Liu, M.Y., Breuel, T., Kautz, J.: Unsupervised image-to-image translation networks. In: NeurIPS (2017)"},{"key":"34_CR20","doi-asserted-by":"crossref","unstructured":"Liu, M.Y., et al.: Few-shot unsupervised image-to-image translation. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.01065"},{"key":"34_CR21","doi-asserted-by":"crossref","unstructured":"Lu, M., Zhao, H., Yao, A., Xu, F., Chen, Y., Zhang, L.: Decoder network over lightweight reconstructed feature for fast semantic style transfer. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.270"},{"key":"34_CR22","doi-asserted-by":"crossref","unstructured":"Luan, F., Paris, S., Shechtman, E., Bala, K.: Deep photo style transfer. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.740"},{"key":"34_CR23","doi-asserted-by":"crossref","unstructured":"Mao, Q., Lee, H.Y., Tseng, H.Y., Ma, S., Yang, M.H.: Mode seeking generative adversarial networks for diverse image synthesis. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00152"},{"key":"34_CR24","doi-asserted-by":"crossref","unstructured":"Mao, X., Li, Q., Xie, H., Lau, R.Y., Wang, Z., Paul Smolley, S.: Least squares generative adversarial networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.304"},{"key":"34_CR25","unstructured":"Mejjati, Y.A., Richardt, C., Tompkin, J., Cosker, D., Kim, K.I.: Unsupervised attention-guided image-to-image translation. In: NeurIPS (2018)"},{"key":"34_CR26","unstructured":"Mo, S., Cho, M., Shin, J.: InstaGAN: instance-aware image-to-image translation. arXiv preprint arXiv:1812.10889 (2018)"},{"key":"34_CR27","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"835","DOI":"10.1007\/978-3-030-01249-6_50","volume-title":"Computer Vision \u2013 ECCV 2018","author":"A Pumarola","year":"2018","unstructured":"Pumarola, A., Agudo, A., Martinez, A.M., Sanfeliu, A., Moreno-Noguer, F.: GANimation: anatomically-aware facial animation from a single image. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11214, pp. 835\u2013851. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01249-6_50"},{"key":"34_CR28","doi-asserted-by":"crossref","unstructured":"Wu, W., Cao, K., Li, C., Qian, C., Loy, C.C.: TransGaGa: geometry-aware unsupervised image-to-image translation. arXiv preprint arXiv:1904.09571 (2019)","DOI":"10.1109\/CVPR.2019.00820"},{"key":"34_CR29","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"172","DOI":"10.1007\/978-3-030-01249-6_11","volume-title":"Computer Vision \u2013 ECCV 2018","author":"T Xiao","year":"2018","unstructured":"Xiao, T., Hong, J., Ma, J.: ELEGANT: exchanging latent encodings with GAN for transferring multiple face attributes. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11214, pp. 172\u2013187. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01249-6_11"},{"key":"34_CR30","doi-asserted-by":"crossref","unstructured":"Yi, Z., Zhang, H., Tan, P., Gong, M.: DualGAN: unsupervised dual learning for image-to-image translation. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.310"},{"key":"34_CR31","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"34_CR32","unstructured":"Zheng, Z., Yu, Z., Zheng, H., Wu, Y., Zheng, B., Lin, P.: Generative adversarial network with multi-branch discriminator for cross-species image-to-image translation. arXiv preprint arXiv:1901.10895 (2019)"},{"key":"34_CR33","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.244"},{"key":"34_CR34","unstructured":"Zhu, J.Y., et al.: Toward multimodal image-to-image translation. In: NeurIPS (2017)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2020"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-58598-3_34","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,6]],"date-time":"2024-11-06T00:34:54Z","timestamp":1730853294000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-58598-3_34"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030585976","9783030585983"],"references-count":34,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-58598-3_34","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"7 November 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Glasgow","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"United Kingdom","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 August 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 August 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2020.eu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"OpenReview","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5025","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1360","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"27% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"7","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"The conference was held virtually due to the COVID-19 pandemic.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}