{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,9]],"date-time":"2025-10-09T21:07:54Z","timestamp":1760044074589,"version":"3.40.3"},"publisher-location":"Cham","reference-count":46,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031200496"},{"type":"electronic","value":"9783031200502"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-20050-2_13","type":"book-chapter","created":{"date-parts":[[2022,10,27]],"date-time":"2022-10-27T22:09:58Z","timestamp":1666908598000},"page":"205-221","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["GAN Cocktail: Mixing GANs Without Dataset Access"],"prefix":"10.1007","author":[{"given":"Omri","family":"Avrahami","sequence":"first","affiliation":[]},{"given":"Dani","family":"Lischinski","sequence":"additional","affiliation":[]},{"given":"Ohad","family":"Fried","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,28]]},"reference":[{"doi-asserted-by":"crossref","unstructured":"Abdal, R., Qin, Y., Wonka, P.: Image2stylegan: how to embed images into the StyleGAN latent space? In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4432\u20134441 (2019)","key":"13_CR1","DOI":"10.1109\/ICCV.2019.00453"},{"doi-asserted-by":"crossref","unstructured":"Abdal, R., Qin, Y., Wonka, P.: Image2stylegan++: how to edit the embedded images? In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8296\u20138305 (2020)","key":"13_CR2","DOI":"10.1109\/CVPR42600.2020.00832"},{"doi-asserted-by":"crossref","unstructured":"Bao, Y., et al.: An information-theoretic approach to transferability in task transfer learning. In: 2019 IEEE International Conference on Image Processing (ICIP), pp. 2309\u20132313. IEEE (2019)","key":"13_CR3","DOI":"10.1109\/ICIP.2019.8803726"},{"doi-asserted-by":"crossref","unstructured":"Bau, D., et al.: Seeing what a GAN cannot generate. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4502\u20134511 (2019)","key":"13_CR4","DOI":"10.1109\/ICCV.2019.00460"},{"unstructured":"Brock, A., Donahue, J., Simonyan, K.: Large scale GAN training for high fidelity natural image synthesis. In: International Conference on Learning Representations (2018)","key":"13_CR5"},{"unstructured":"Chen, M., et al.: Generative pretraining from pixels. In: International Conference on Machine Learning, pp. 1691\u20131703. PMLR (2020)","key":"13_CR6"},{"unstructured":"Donahue, J., et al.: Decaf: a deep convolutional activation feature for generic visual recognition. In: International Conference on Machine Learning, pp. 647\u2013655. PMLR (2014)","key":"13_CR7"},{"unstructured":"Geyer, R., Corinzia, L., Wegmayr, V.: Transfer learning by adaptive merging of multiple models. In: International Conference on Medical Imaging with Deep Learning, pp. 185\u2013196. PMLR (2019)","key":"13_CR8"},{"unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: Ghahramani, Z., Welling, M., Cortes, C., Lawrence, N., Weinberger, K.Q. (eds.) Advances in Neural Information Processing Systems, vol. 27. Curran Associates, Inc. (2014). https:\/\/proceedings.neurips.cc\/paper\/2014\/file\/5ca3e9b122f61f8f06494c97b1afccf3-Paper.pdf\u2019","key":"13_CR9"},{"unstructured":"Gulrajani, I., Ahmed, F., Arjovsky, M., Dumoulin, V., Courville, A.C.: Improved training of Wasserstein GANs. In: NIPS (2017)","key":"13_CR10"},{"key":"13_CR11","first-page":"9841","volume":"33","author":"E H\u00e4rk\u00f6nen","year":"2020","unstructured":"H\u00e4rk\u00f6nen, E., Hertzmann, A., Lehtinen, J., Paris, S.: GANSpace: discovering interpretable GAN controls. Advances in Neural Information Process. Syst. 33, 9841\u20139850 (2020)","journal-title":"Advances in Neural Information Process. Syst."},{"unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: GANs trained by a two time-scale update rule converge to a local Nash equilibrium. In: Guyon, I., Luxburg, U.V. (eds.) Advances in Neural Information Processing Systems. vol. 30. Curran Associates, Inc. (2017). https:\/\/proceedings.neurips.cc\/paper\/2017\/file\/8a1d694707eb0fefe65871369074926d-Paper.pdf","key":"13_CR12"},{"unstructured":"Karras, T., Aila, T., Laine, S., Lehtinen, J.: Progressive growing of GANs for improved quality, stability, and variation. arXiv preprint arXiv:1710.10196 (2017)","key":"13_CR13"},{"doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4401\u20134410 (2019)","key":"13_CR14","DOI":"10.1109\/CVPR.2019.00453"},{"doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of StyleGAN. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8110\u20138119 (2020)","key":"13_CR15","DOI":"10.1109\/CVPR42600.2020.00813"},{"issue":"13","key":"13_CR16","doi-asserted-by":"publisher","first-page":"3521","DOI":"10.1073\/pnas.1611835114","volume":"114","author":"J Kirkpatrick","year":"2017","unstructured":"Kirkpatrick, J., et al.: Overcoming catastrophic forgetting in neural networks. Proc. Nat. Acad. Sci. 114(13), 3521\u20133526 (2017)","journal-title":"Proc. Nat. Acad. Sci."},{"doi-asserted-by":"crossref","unstructured":"Kornblith, S., Shlens, J., Le, Q.V.: Do better imagenet models transfer better? In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2661\u20132671 (2019)","key":"13_CR17","DOI":"10.1109\/CVPR.2019.00277"},{"unstructured":"Li, Y., Zhang, R., Lu, J.C., Shechtman, E.: Few-shot image generation with elastic weight consolidation. In: Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M.F., Lin, H. (eds.) Advances in Neural Information Processing Systems, vol. 33, pp. 15885\u201315896. Curran Associates, Inc. (2020). https:\/\/proceedings.neurips.cc\/paper\/2020\/file\/b6d767d2f8ed5d21a44b0e5886680cb9-Paper.pdf","key":"13_CR18"},{"issue":"12","key":"13_CR19","doi-asserted-by":"publisher","first-page":"2935","DOI":"10.1109\/TPAMI.2017.2773081","volume":"40","author":"Z Li","year":"2017","unstructured":"Li, Z., Hoiem, D.: Learning without forgetting. IEEE Trans. Pattern Anal. Mach. Intell. 40(12), 2935\u20132947 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"doi-asserted-by":"crossref","unstructured":"Mao, X., Li, Q., Xie, H., Lau, R.Y., Wang, Z., Paul Smolley, S.: Least squares generative adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2794\u20132802 (2017)","key":"13_CR20","DOI":"10.1109\/ICCV.2017.304"},{"unstructured":"Mescheder, L., Geiger, A., Nowozin, S.: Which training methods for GANs do actually converge? In: International Conference on Machine Learning, pp. 3481\u20133490. PMLR (2018)","key":"13_CR21"},{"unstructured":"Mirza, M., Osindero, S.: Conditional generative adversarial nets. arXiv preprint arXiv:1411.1784 (2014)","key":"13_CR22"},{"unstructured":"Miyato, T., Kataoka, T., Koyama, M., Yoshida, Y.: Spectral normalization for generative adversarial networks. arXiv preprint arXiv:1802.05957 (2018)","key":"13_CR23"},{"unstructured":"Mo, S., Cho, M., Shin, J.: Freeze discriminator: a simple baseline for fine-tuning GANs. arXiv preprint arXiv:2002.10964 (2020)","key":"13_CR24"},{"unstructured":"Nguyen, C., Hassner, T., Seeger, M., Archambeau, C.: Leep: a new measure to evaluate transferability of learned representations. In: International Conference on Machine Learning, pp. 7294\u20137305. PMLR (2020)","key":"13_CR25"},{"doi-asserted-by":"crossref","unstructured":"Noguchi, A., Harada, T.: Image generation from small datasets via batch statistics adaptation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2750\u20132758 (2019)","key":"13_CR26","DOI":"10.1109\/ICCV.2019.00284"},{"doi-asserted-by":"crossref","unstructured":"Oquab, M., Bottou, L., Laptev, I., Sivic, J.: Learning and transferring mid-level image representations using convolutional neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1717\u20131724 (2014)","key":"13_CR27","DOI":"10.1109\/CVPR.2014.222"},{"issue":"10","key":"13_CR28","doi-asserted-by":"publisher","first-page":"1345","DOI":"10.1109\/TKDE.2009.191","volume":"22","author":"SJ Pan","year":"2009","unstructured":"Pan, S.J., Yang, Q.: A survey on transfer learning. IEEE Trans. Knowl. Data Eng. 22(10), 1345\u20131359 (2009)","journal-title":"IEEE Trans. Knowl. Data Eng."},{"doi-asserted-by":"crossref","unstructured":"Pidhorskyi, S., Adjeroh, D.A., Doretto, G.: Adversarial latent autoencoders. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14104\u201314113 (2020)","key":"13_CR29","DOI":"10.1109\/CVPR42600.2020.01411"},{"unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. arXiv preprint arXiv:2103.00020 (2021)","key":"13_CR30"},{"unstructured":"Ramesh, A., et al.: Zero-shot text-to-image generation. arXiv preprint arXiv:2102.12092 (2021)","key":"13_CR31"},{"unstructured":"Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., Chen, X.: Improved techniques for training GANs. arXiv preprint arXiv:1606.03498 (2016)","key":"13_CR32"},{"unstructured":"Seff, A., Beatson, A., Suo, D., Liu, H.: Continual learning in generative adversarial nets. arXiv preprint arXiv:1705.08395 (2017)","key":"13_CR33"},{"doi-asserted-by":"crossref","unstructured":"Shen, Y., Gu, J., Tang, X., Zhou, B.: Interpreting the latent space of GANs for semantic face editing. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9243\u20139252 (2020)","key":"13_CR34","DOI":"10.1109\/CVPR42600.2020.00926"},{"key":"13_CR35","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/978-3-030-00536-8_1","volume-title":"Simulation and Synthesis in Medical Imaging","author":"H-C Shin","year":"2018","unstructured":"Shin, H.-C., et al.: Medical image synthesis for data augmentation and anonymization using generative adversarial networks. In: Gooya, A., Goksel, O., Oguz, I., Burgos, N. (eds.) SASHIMI 2018. LNCS, vol. 11037, pp. 1\u201311. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-00536-8_1"},{"unstructured":"Shu, Y., Kou, Z., Cao, Z., Wang, J., Long, M.: Zoo-tuning: adaptive transfer from a zoo of models. In: International Conference on Machine Learning, pp. 9626\u20139637. PMLR (2021)","key":"13_CR36"},{"unstructured":"Tarvainen, A., Valpola, H.: Mean teachers are better role models: weight-averaged consistency targets improve semi-supervised deep learning results. In: Guyon, I., et al. (eds.) Advances in Neural Information Processing Systems. vol. 30. Curran Associates, Inc. (2017). https:\/\/proceedings.neurips.cc\/paper\/2017\/file\/68053af2923e00204c3ca7c6a3150cf7-Paper.pdf","key":"13_CR37"},{"doi-asserted-by":"crossref","unstructured":"Tran, A.T., Nguyen, C.V., Hassner, T.: Transferability and hardness of supervised classification tasks. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1395\u20131405 (2019)","key":"13_CR38","DOI":"10.1109\/ICCV.2019.00148"},{"key":"13_CR39","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"170","DOI":"10.1007\/978-3-030-58542-6_11","volume-title":"Computer Vision","author":"Y Viazovetskyi","year":"2020","unstructured":"Viazovetskyi, Y., Ivashkin, V., Kashin, E.: StyleGAN2 distillation for feed-forward image manipulation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12367, pp. 170\u2013186. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58542-6_11"},{"doi-asserted-by":"crossref","unstructured":"Wang, Y., Gonzalez-Garcia, A., Berga, D., Herranz, L., Khan, F.S., Weijer, J.V.D.: Minegan: effective knowledge transfer from GANs to target domains with few images. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9332\u20139341 (2020)","key":"13_CR40","DOI":"10.1109\/CVPR42600.2020.00935"},{"doi-asserted-by":"crossref","unstructured":"Wang, Y., Wu, C., Herranz, L., van de Weijer, J., Gonzalez-Garcia, A., Raducanu, B.: Transferring GANs: generating images from limited data. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 218\u2013234 (2018)","key":"13_CR41","DOI":"10.1007\/978-3-030-01231-1_14"},{"key":"13_CR42","first-page":"5962","volume":"31","author":"C Wu","year":"2018","unstructured":"Wu, C., Herranz, L., Liu, X., van de Weijer, J., Raducanu, B., et al.: Memory replay GANs: learning to generate new categories without forgetting. Adv. Neural Inf. Process. Syst. 31, 5962\u20135972 (2018)","journal-title":"Adv. Neural Inf. Process. Syst."},{"issue":"8","key":"13_CR43","doi-asserted-by":"publisher","first-page":"2378","DOI":"10.1109\/JBHI.2020.2980262","volume":"24","author":"J Yoon","year":"2020","unstructured":"Yoon, J., Drumright, L.N., Van Der Schaar, M.: Anonymization through data synthesis using generative adversarial networks (ADS-GAN). IEEE J. Biomed. Health Inf. 24(8), 2378\u20132388 (2020)","journal-title":"IEEE J. Biomed. Health Inf."},{"unstructured":"Yu, F., Seff, A., Zhang, Y., Song, S., Funkhouser, T., Xiao, J.: LSUN: construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365 (2015)","key":"13_CR44"},{"unstructured":"Zenke, F., Poole, B., Ganguli, S.: Continual learning through synaptic intelligence. In: International Conference on Machine Learning, pp. 3987\u20133995. PMLR (2017)","key":"13_CR45"},{"doi-asserted-by":"crossref","unstructured":"Zhai, M., Chen, L., Tung, F., He, J., Nawhal, M., Mori, G.: Lifelong GAN: continual learning for conditional image generation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2759\u20132768 (2019)","key":"13_CR46","DOI":"10.1109\/ICCV.2019.00285"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-20050-2_13","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,27]],"date-time":"2022-10-27T22:23:50Z","timestamp":1666909430000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-20050-2_13"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031200496","9783031200502"],"references-count":46,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-20050-2_13","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"28 October 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}