{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T15:48:00Z","timestamp":1776181680999,"version":"3.50.1"},"publisher-location":"Cham","reference-count":84,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783030012182","type":"print"},{"value":"9783030012199","type":"electronic"}],"license":[{"start":{"date-parts":[[2018,1,1]],"date-time":"2018-01-01T00:00:00Z","timestamp":1514764800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2018,1,1]],"date-time":"2018-01-01T00:00:00Z","timestamp":1514764800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2018]]},"DOI":"10.1007\/978-3-030-01219-9_11","type":"book-chapter","created":{"date-parts":[[2018,10,6]],"date-time":"2018-10-06T14:23:51Z","timestamp":1538835831000},"page":"179-196","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1569,"title":["Multimodal Unsupervised Image-to-Image Translation"],"prefix":"10.1007","author":[{"given":"Xun","family":"Huang","sequence":"first","affiliation":[]},{"given":"Ming-Yu","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Serge","family":"Belongie","sequence":"additional","affiliation":[]},{"given":"Jan","family":"Kautz","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2018,10,7]]},"reference":[{"key":"11_CR1","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"184","DOI":"10.1007\/978-3-319-10593-2_13","volume-title":"Computer Vision \u2013 ECCV 2014","author":"C Dong","year":"2014","unstructured":"Dong, C., Loy, C.C., He, K., Tang, X.: Learning a deep convolutional network for image super-resolution. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8692, pp. 184\u2013199. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10593-2_13"},{"key":"11_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"649","DOI":"10.1007\/978-3-319-46487-9_40","volume-title":"Computer Vision \u2013 ECCV 2016","author":"R Zhang","year":"2016","unstructured":"Zhang, R., Isola, P., Efros, A.A.: Colorful image colorization. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9907, pp. 649\u2013666. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46487-9_40"},{"key":"11_CR3","doi-asserted-by":"crossref","unstructured":"Pathak, D., Krahenbuhl, P., Donahue, J., Darrell, T., Efros, A.A.: Context encoders: feature learning by inpainting. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.278"},{"key":"11_CR4","first-page":"149","volume":"34","author":"PY Laffont","year":"2014","unstructured":"Laffont, P.Y., Ren, Z., Tao, X., Qian, C., Hays, J.: Transient attributes for high-level understanding and editing of outdoor scenes. TOG 34, 149 (2014)","journal-title":"TOG"},{"key":"11_CR5","doi-asserted-by":"crossref","unstructured":"Gatys, L.A., Ecker, A.S., Bethge, M.: Image style transfer using convolutional neural networks. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.265"},{"key":"11_CR6","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.632"},{"key":"11_CR7","doi-asserted-by":"crossref","unstructured":"Yi, Z., Zhang, H., Tan, P., Gong, M.: DualGAN: unsupervised dual learning for image-to-image translation. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.310"},{"key":"11_CR8","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.244"},{"key":"11_CR9","unstructured":"Kim, T., Cha, M., Kim, H., Lee, J., Kim, J.: Learning to discover cross-domain relations with generative adversarial networks. In: ICML (2017)"},{"key":"11_CR10","unstructured":"Taigman, Y., Polyak, A., Wolf, L.: Unsupervised cross-domain image generation. In: ICLR (2017)"},{"key":"11_CR11","unstructured":"Zhu, J.Y., Zhang, R., Pathak, D., Darrell, T., Efros, A.A., Wang, O., Shechtman, E.: Toward multimodal image-to-image translation. In: NIPS (2017)"},{"key":"11_CR12","unstructured":"Liu, M.Y., Tuzel, O.: Coupled generative adversarial networks. In: NIPS (2016)"},{"key":"11_CR13","doi-asserted-by":"crossref","unstructured":"Chen, Q., Koltun, V.: Photographic image synthesis with cascaded refinement networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.168"},{"key":"11_CR14","unstructured":"Liang, X., Zhang, H., Xing, E.P.: Generative semantic manipulation with contrasting GAN. arXiv preprint arXiv:1708.00315 (2017)"},{"key":"11_CR15","unstructured":"Liu, M.Y., Breuel, T., Kautz, J.: Unsupervised image-to-image translation networks. In: NIPS (2017)"},{"key":"11_CR16","unstructured":"Benaim, S., Wolf, L.: One-sided unsupervised domain mapping. In: NIPS (2017)"},{"key":"11_CR17","unstructured":"Royer, A., et al.: XGAN: unsupervised image-to-image translation for many-to-many mappings. arXiv preprint arXiv:1711.05139 (2017)"},{"key":"11_CR18","unstructured":"Gan, Z., et al.: Triangle generative adversarial networks. In: NIPS, pp. 5253\u20135262 (2017)"},{"key":"11_CR19","doi-asserted-by":"crossref","unstructured":"Choi, Y., Choi, M., Kim, M., Ha, J.W., Kim, S., Choo, J.: StarGAN: unified generative adversarial networks for multi-domain image-to-image translation. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00916"},{"key":"11_CR20","doi-asserted-by":"crossref","unstructured":"Wang, T.C., Liu, M.Y., Zhu, J.Y., Tao, A., Kautz, J., Catanzaro, B.: High-resolution image synthesis and semantic manipulation with conditional GANs. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00917"},{"key":"11_CR21","doi-asserted-by":"crossref","unstructured":"Shrivastava, A., Pfister, T., Tuzel, O., Susskind, J., Wang, W., Webb, R.: Learning from simulated and unsupervised images through adversarial training. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.241"},{"key":"11_CR22","doi-asserted-by":"crossref","unstructured":"Bousmalis, K., Silberman, N., Dohan, D., Erhan, D., Krishnan, D.: Unsupervised pixel-level domain adaptation with generative adversarial networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.18"},{"key":"11_CR23","doi-asserted-by":"crossref","unstructured":"Wolf, L., Taigman, Y., Polyak, A.: Unsupervised creation of parameterized avatars. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.170"},{"key":"11_CR24","unstructured":"Tau, T.G., Wolf, L., Tau, S.B.: The role of minimal complexity functions in unsupervised learning of semantic mappings. In: ICLR (2018)"},{"key":"11_CR25","unstructured":"Hoshen, Y., Wolf, L.: Identifying analogies across domains. In: ICLR (2018)"},{"key":"11_CR26","unstructured":"Mathieu, M., Couprie, C., LeCun, Y.: Deep multi-scale video prediction beyond mean square error. In: ICLR (2016)"},{"key":"11_CR27","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: NIPS (2014)"},{"key":"11_CR28","unstructured":"Denton, E.L., Chintala, S., Fergus, R.: Deep generative image models using a Laplacian pyramid of adversarial networks. In: NIPS (2015)"},{"key":"11_CR29","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"318","DOI":"10.1007\/978-3-319-46493-0_20","volume-title":"Computer Vision \u2013 ECCV 2016","author":"X Wang","year":"2016","unstructured":"Wang, X., Gupta, A.: Generative image modeling using style and structure adversarial networks. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 318\u2013335. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_20"},{"key":"11_CR30","unstructured":"Yang, J., Kannan, A., Batra, D., Parikh, D.: LR-GAN: layered recursive generative adversarial networks for image generation. In: ICLR (2017)"},{"key":"11_CR31","doi-asserted-by":"crossref","unstructured":"Huang, X., Li, Y., Poursaeed, O., Hopcroft, J., Belongie, S.: Stacked generative adversarial networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.202"},{"key":"11_CR32","doi-asserted-by":"crossref","unstructured":"Zhang, H., et al.: StackGAN: text to photo-realistic image synthesis with stacked generative adversarial networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.629"},{"key":"11_CR33","unstructured":"Karras, T., Aila, T., Laine, S., Lehtinen, J.: Progressive growing of GANs for improved quality, stability, and variation. In: ICLR (2018)"},{"key":"11_CR34","unstructured":"Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., Chen, X.: Improved techniques for training GANs. In: NIPS (2016)"},{"key":"11_CR35","unstructured":"Zhao, J., Mathieu, M., LeCun, Y.: Energy-based generative adversarial network. In: ICLR (2017)"},{"key":"11_CR36","unstructured":"Arjovsky, M., Chintala, S., Bottou, L.: Wasserstein generative adversarial networks. In: ICML (2017)"},{"key":"11_CR37","unstructured":"Berthelot, D., Schumm, T., Metz, L.: BEGAN: boundary equilibrium generative adversarial networks. arXiv preprint arXiv:1703.10717 (2017)"},{"key":"11_CR38","doi-asserted-by":"crossref","unstructured":"Mao, X., Li, Q., Xie, H., Lau, Y.R., Wang, Z., Smolley, S.P.: Least squares generative adversarial networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.304"},{"key":"11_CR39","unstructured":"Tolstikhin, I., Bousquet, O., Gelly, S., Schoelkopf, B.: Wasserstein auto-encoders. In: ICLR (2018)"},{"key":"11_CR40","unstructured":"Larsen, A.B.L., S\u00f8nderby, S.K., Larochelle, H., Winther, O.: Autoencoding beyond pixels using a learned similarity metric. In: ICML (2016)"},{"key":"11_CR41","unstructured":"Dosovitskiy, A., Brox, T.: Generating images with perceptual similarity metrics based on deep networks. In: NIPS (2016)"},{"key":"11_CR42","unstructured":"Rosca, M., Lakshminarayanan, B., Warde-Farley, D., Mohamed, S.: Variational approaches for auto-encoding generative adversarial networks. arXiv preprint arXiv:1706.04987 (2017)"},{"key":"11_CR43","unstructured":"Li, C., et al.: Alice: towards understanding adversarial learning for joint distribution matching. In: NIPS (2017)"},{"key":"11_CR44","unstructured":"Srivastava, A., Valkoz, L., Russell, C., Gutmann, M.U., Sutton, C.: VEEGAN: reducing mode collapse in gans using implicit variational learning. In: NIPS (2017)"},{"key":"11_CR45","doi-asserted-by":"crossref","unstructured":"Ghosh, A., Kulharia, V., Namboodiri, V., Torr, P.H., Dokania, P.K.: Multi-agent diverse generative adversarial networks. arXiv preprint arXiv:1704.02906 (2017)","DOI":"10.1109\/CVPR.2018.00888"},{"key":"11_CR46","unstructured":"Bansal, A., Sheikh, Y., Ramanan, D.: PixeLNN: example-based image synthesis. In: ICLR (2018)"},{"key":"11_CR47","unstructured":"Almahairi, A., Rajeswar, S., Sordoni, A., Bachman, P., Courville, A.: Augmented cycleGAN: learning many-to-many mappings from unpaired data. arXiv preprint arXiv:1802.10151 (2018)"},{"key":"11_CR48","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"36","DOI":"10.1007\/978-3-030-01219-9_Z","volume-title":"ECCV 2018, Part I","author":"HY Lee","year":"2018","unstructured":"Lee, H.Y., Tseng, H.Y., Huang, J.B., Singh, M.K., Yang, M.H.: Diverse image-to-image translation via disentangled representation. In: Ferrari, V. (ed.) ECCV 2018, Part I. LNCS, vol. 11207, pp. 36\u201352. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01219-9_Z"},{"key":"11_CR49","doi-asserted-by":"crossref","unstructured":"Anoosheh, A., Agustsson, E., Timofte, R., Van Gool, L.: ComboGAN: unrestrained scalability for image domain translation. arXiv preprint arXiv:1712.06909 (2017)","DOI":"10.1109\/CVPRW.2018.00122"},{"key":"11_CR50","doi-asserted-by":"crossref","unstructured":"Hui, L., Li, X., Chen, J., He, H., Yang, J., et al.: Unsupervised multi-domain image translation with domain-specific encoders\/decoders. arXiv preprint arXiv:1712.02050 (2017)","DOI":"10.1109\/ICPR.2018.8545169"},{"key":"11_CR51","doi-asserted-by":"crossref","unstructured":"Hertzmann, A., Jacobs, C.E., Oliver, N., Curless, B., Salesin, D.H.: Image analogies. In: SIGGRAPH (2001)","DOI":"10.1145\/383259.383295"},{"key":"11_CR52","doi-asserted-by":"crossref","unstructured":"Li, C., Wand, M.: Combining markov random fields and convolutional neural networks for image synthesis. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.272"},{"key":"11_CR53","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"694","DOI":"10.1007\/978-3-319-46475-6_43","volume-title":"Computer Vision \u2013 ECCV 2016","author":"J Johnson","year":"2016","unstructured":"Johnson, J., Alahi, A., Fei-Fei, L.: Perceptual losses for real-time style transfer and super-resolution. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9906, pp. 694\u2013711. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46475-6_43"},{"key":"11_CR54","doi-asserted-by":"crossref","unstructured":"Huang, X., Belongie, S.: Arbitrary style transfer in real-time with adaptive instance normalization. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.167"},{"key":"11_CR55","unstructured":"Li, Y., Fang, C., Yang, J., Wang, Z., Lu, X., Yang, M.H.: Universal style transfer via feature transforms. In: NIPS, pp. 385\u2013395 (2017)"},{"key":"11_CR56","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"469","DOI":"10.1007\/978-3-030-01219-9_Z","volume-title":"ECCV 2018, Part III","author":"Y Li","year":"2018","unstructured":"Li, Y., Liu, M.Y., Li, X., Yang, M.H., Kautz, J.: A closed-form solution to photorealistic image stylization. In: Ferrari, V., et al. (eds.) ECCV 2018, Part III. LNCS, vol. 11207, pp. 469\u2013486. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01219-9_Z"},{"key":"11_CR57","unstructured":"Chen, X., Duan, Y., Houthooft, R., Schulman, J., Sutskever, I., Abbeel, P.: InfoGAN: interpretable representation learning by information maximizing generative adversarial nets. In: NIPS (2016)"},{"key":"11_CR58","unstructured":"Higgins, I., et al.: beta-VAE: learning basic visual concepts with a constrained variational framework. In: ICLR (2017)"},{"key":"11_CR59","unstructured":"Tenenbaum, J.B., Freeman, W.T.: Separating style and content. In: NIPS (1997)"},{"key":"11_CR60","unstructured":"Bousmalis, K., Trigeorgis, G., Silberman, N., Krishnan, D., Erhan, D.: Domain separation networks. In: NIPS (2016)"},{"key":"11_CR61","unstructured":"Villegas, R., Yang, J., Hong, S., Lin, X., Lee, H.: Decomposing motion and content for natural video sequence prediction. In: ICLR (2017)"},{"key":"11_CR62","unstructured":"Mathieu, M.F., Zhao, J.J., Zhao, J., Ramesh, A., Sprechmann, P., LeCun, Y.: Disentangling factors of variation in deep representation using adversarial training. In: NIPS (2016)"},{"key":"11_CR63","unstructured":"Denton, E.L., et al.: Unsupervised learning of disentangled representations from video. In: NIPS (2017)"},{"key":"11_CR64","doi-asserted-by":"crossref","unstructured":"Tulyakov, S., Liu, M.Y., Yang, X., Kautz, J.: MocoGAN: decomposing motion and content for video generation. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00165"},{"key":"11_CR65","unstructured":"Donahue, C., Balsubramani, A., McAuley, J., Lipton, Z.C.: Semantically decomposing the latent spaces of generative adversarial networks. In: ICLR (2018)"},{"key":"11_CR66","unstructured":"Shen, T., Lei, T., Barzilay, R., Jaakkola, T.: Style transfer from non-parallel text by cross-alignment. In: Advances in Neural Information Processing Systems, pp. 6833\u20136844 (2017)"},{"key":"11_CR67","unstructured":"Donahue, J., Kr\u00e4henb\u00fchl, P., Darrell, T.: Adversarial feature learning. In: ICLR (2017)"},{"key":"11_CR68","unstructured":"Dumoulin, V., et al.: Adversarially learned inference. In: ICLR (2017)"},{"key":"11_CR69","unstructured":"Automatic differentiation in PyTorch. In: NIPS Autodiff Workshop (2017)"},{"key":"11_CR70","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"11_CR71","doi-asserted-by":"crossref","unstructured":"Ulyanov, D., Vedaldi, A., Lempitsky, V.: Improved texture networks: maximizing quality and diversity in feed-forward stylization and texture synthesis. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.437"},{"key":"11_CR72","unstructured":"Dumoulin, V., Shlens, J., Kudlur, M.: A learned representation for artistic style. In: ICLR (2017)"},{"key":"11_CR73","unstructured":"Wang, H., Liang, X., Zhang, H., Yeung, D.Y., Xing, E.P.: ZM-Net: real-time zero-shot image manipulation network. arXiv preprint arXiv:1703.07255 (2017)"},{"key":"11_CR74","doi-asserted-by":"crossref","unstructured":"Ghiasi, G., Lee, H., Kudlur, M., Dumoulin, V., Shlens, J.: Exploring the structure of a real-time, arbitrary neural artistic stylization network. In: BMVC (2017)","DOI":"10.5244\/C.31.114"},{"key":"11_CR75","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: ICLR (2015)"},{"key":"11_CR76","unstructured":"Li, Y., Wang, N., Shi, J., Liu, J., Hou, X.: Revisiting batch normalization for practical domain adaptation. arXiv preprint arXiv:1603.04779 (2016)"},{"key":"11_CR77","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"11_CR78","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: ImageNet classification with deep convolutional neural networks. In: Advances in neural information processing systems (2012)"},{"key":"11_CR79","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.308"},{"key":"11_CR80","doi-asserted-by":"crossref","unstructured":"Yu, A., Grauman, K.: Fine-grained visual comparisons with local learning. In: CVPR (2014)","DOI":"10.1109\/CVPR.2014.32"},{"key":"11_CR81","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"597","DOI":"10.1007\/978-3-319-46454-1_36","volume-title":"Computer Vision \u2013 ECCV 2016","author":"J-Y Zhu","year":"2016","unstructured":"Zhu, J.-Y., Kr\u00e4henb\u00fchl, P., Shechtman, E., Efros, A.A.: Generative visual manipulation on the natural image manifold. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9909, pp. 597\u2013613. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46454-1_36"},{"key":"11_CR82","doi-asserted-by":"crossref","unstructured":"Xie, S., Tu, Z.: Holistically-nested edge detection. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.164"},{"key":"11_CR83","doi-asserted-by":"crossref","unstructured":"Ros, G., Sellart, L., Materzynska, J., Vazquez, D., Lopez, A.M.: The synthia dataset: a large collection of synthetic images for semantic segmentation of urban scenes. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.352"},{"key":"11_CR84","doi-asserted-by":"crossref","unstructured":"Cordts, M., et al.: The cityscapes dataset for semantic urban scene understanding. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.350"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2018"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-01219-9_11","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,6]],"date-time":"2022-10-06T01:05:59Z","timestamp":1665018359000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-01219-9_11"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018]]},"ISBN":["9783030012182","9783030012199"],"references-count":84,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-01219-9_11","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2018]]},"assertion":[{"value":"7 October 2018","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Munich","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Germany","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2018","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 September 2018","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14 September 2018","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2018","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2018.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"This content has been made available to all.","name":"free","label":"Free to read"}]}}