{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T10:36:01Z","timestamp":1743071761958,"version":"3.40.3"},"publisher-location":"Cham","reference-count":35,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031781940"},{"type":"electronic","value":"9783031781957"}],"license":[{"start":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T00:00:00Z","timestamp":1733184000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T00:00:00Z","timestamp":1733184000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-78195-7_3","type":"book-chapter","created":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T11:12:06Z","timestamp":1733137926000},"page":"29-45","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["A New StyleGAN Latent Space Based Model for Image Style Transfer"],"prefix":"10.1007","author":[{"given":"Rakesh","family":"Dey","sequence":"first","affiliation":[]},{"given":"Shivakumara","family":"Palaiahnakote","sequence":"additional","affiliation":[]},{"given":"Saumik","family":"Bhattacharya","sequence":"additional","affiliation":[]},{"given":"Sukalpa","family":"Chanda","sequence":"additional","affiliation":[]},{"given":"Umapada","family":"Pal","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,3]]},"reference":[{"key":"3_CR1","doi-asserted-by":"crossref","unstructured":"Zhao, J., Zhang, H (2022). Thin-plate spline motion model for image animation. In: Proc. CVPR. pp. 3657\u20133666","DOI":"10.1109\/CVPR52688.2022.00364"},{"key":"3_CR2","unstructured":"Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y (2014). Generative adversarial nets. Advances in neural information processing systems. 27"},{"key":"3_CR3","doi-asserted-by":"publisher","first-page":"504","DOI":"10.1126\/science.1127647","volume":"313","author":"GE Hinton","year":"2006","unstructured":"Hinton, G.E., Salakhutdinov, R.R (2006). Reducing the dimensionality of data with neural networks. Science. 313, 504\u2013507","journal-title":"Science"},{"key":"3_CR4","unstructured":"Kingma, D.P., Welling, M (2013). Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114"},{"key":"3_CR5","doi-asserted-by":"crossref","unstructured":"Huang, X., Belongie, S (2017). Arbitrary style transfer in real-time with adaptive instance normalization. In Proc. ICCV. pp. 1501\u20131510","DOI":"10.1109\/ICCV.2017.167"},{"key":"3_CR6","doi-asserted-by":"crossref","unstructured":"Johnson, J., Alahi, A., Fei-Fei, L (2016). Perceptual losses for real-time style transfer and super-resolution. In Proc. ECCV. pp. 694\u2013711","DOI":"10.1007\/978-3-319-46475-6_43"},{"key":"3_CR7","unstructured":"Radford, A. et al. (2021). Learning transferable visual models from natural language supervision. In Proc. PMLR, pp. 8748\u20138763"},{"key":"3_CR8","doi-asserted-by":"crossref","unstructured":"Patashnik, O., Wu, Z., Shechtman, E., Cohen-Or, D., Lischinski, D (2021). Styleclip: Text-driven manipulation of stylegan imagery. In Proc. ICCV. pp. 2085\u20132094","DOI":"10.1109\/ICCV48922.2021.00209"},{"key":"3_CR9","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3450626.3459860","volume":"40","author":"W Jang","year":"2021","unstructured":"Jang, W., Ju, G., Jung, Y., Yang, J., Tong, X., Lee, S (2021). StyleCariGAN: caricature generation via StyleGAN feature map modulation. ACM Transactions on Graphics (TOG). 40, 1\u201316","journal-title":"ACM Transactions on Graphics (TOG)"},{"key":"3_CR10","doi-asserted-by":"crossref","unstructured":"Yang, T et al. (2022). Beyond a Video Frame Interpolator: A Space Decoupled Learning Approach to Continuous Image Transition. In Proc. ECCV. pp. 738\u2013755","DOI":"10.1007\/978-3-031-25069-9_47"},{"key":"3_CR11","doi-asserted-by":"crossref","unstructured":"Yang, S., Jiang, L., Liu, Z., Loy, C.C (2022). Pastiche master: Exemplar-based high-resolution portrait style transfer. In Proc. CVPR. pp. 7693\u20137702","DOI":"10.1109\/CVPR52688.2022.00754"},{"key":"3_CR12","doi-asserted-by":"crossref","unstructured":"Deng, Y., Tang, F., Dong, W., Ma, C., Pan, X., Wang, L., Xu, C (2022). Stytr2: Image style transfer with transformers. In Proc. CVPR. pp. 11326\u201311336","DOI":"10.1109\/CVPR52688.2022.01104"},{"key":"3_CR13","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B (2022). High-resolution image synthesis with latent diffusion models. In Proc. CVPR. pp. 10684\u201310695","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"3_CR14","doi-asserted-by":"crossref","unstructured":"Ulyanov, D., Vedaldi, A., Lempitsky, V (2018). Deep image prior. In Proc. CVPR. pp. 9446\u20139454","DOI":"10.1109\/CVPR.2018.00984"},{"key":"3_CR15","doi-asserted-by":"crossref","unstructured":"Luo, W., Yang, S., Zhang, X., Zhang, W (2023). SIEDOB: Semantic Image Editing by Disentangling Object and Background. In Proc. CVPR. pp. 1868\u20131878","DOI":"10.1109\/CVPR52729.2023.00186"},{"key":"3_CR16","unstructured":"Karras, T., Aila, T., Laine, S., Lehtinen, J (2017). Progressive growing of gans for improved quality, stability, and variation. arXiv preprint arXiv:1710.10196"},{"key":"3_CR17","first-page":"852","volume":"34","author":"T Karras","year":"2021","unstructured":"Karras, T et al. (2021), Alias-free generative adversarial networks. Advances in neural information processing systems. 34, 852\u2013863","journal-title":"Advances in neural information processing systems"},{"key":"3_CR18","doi-asserted-by":"crossref","unstructured":"Wang, X., Xie, L., Dong, C., Shan, Y (2021). Real-esrgan: Training real-world blind super-resolution with pure synthetic data. In Proc. ICCV. pp. 1905\u20131914","DOI":"10.1109\/ICCVW54120.2021.00217"},{"key":"3_CR19","doi-asserted-by":"crossref","unstructured":"Choi, Y., Uh, Y., Yoo, J., Ha, J.-W (2020). Stargan v2: Diverse image synthesis for multiple domains. In Proc. CVPR. pp. 8188\u20138197","DOI":"10.1109\/CVPR42600.2020.00821"},{"key":"3_CR20","doi-asserted-by":"crossref","unstructured":"Chen, Y., Liu, S., Wang, X (2021). Learning continuous image representation with local implicit image function. In Proc. CVPR. pp. 8628\u20138638","DOI":"10.1109\/CVPR46437.2021.00852"},{"key":"3_CR21","doi-asserted-by":"crossref","unstructured":"Shi, Y., Deb, D., Jain, A.K (2019). Warpgan: Automatic caricature generation. In Proc. CVPR. pp. 10762\u201310771","DOI":"10.1109\/CVPR.2019.01102"},{"key":"3_CR22","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T (2020). Analyzing and improving the image quality of stylegan. In Proc. CVPR. pp. 8110\u20138119","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"3_CR23","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3447648","volume":"40","author":"R Abdal","year":"2021","unstructured":"Abdal, R., Zhu, P., Mitra, N.J., Wonka, P (2021). Styleflow: Attribute-conditioned exploration of stylegan-generated images using conditional continuous normalizing flows. ACM Transactions on Graphics (ToG). 40, 1\u201321","journal-title":"ACM Transactions on Graphics (ToG)."},{"key":"3_CR24","unstructured":"Abdal, R., Zhu, P., Mitra, N.J., Wonka, P (2022). Video2stylegan: Disentangling local and global variations in a video. arXiv preprint arXiv:2205.13996"},{"key":"3_CR25","doi-asserted-by":"crossref","unstructured":"Pehlivan, H., Dalva, Y., Dundar, A (2022). StyleRes: Transforming the Residuals for Real Image Editing with StyleGAN. abs\/2212.14359","DOI":"10.1109\/CVPR52729.2023.00182"},{"key":"3_CR26","doi-asserted-by":"publisher","DOI":"10.1145\/3610287","author":"A Baykal","year":"2023","unstructured":"Baykal, A et al. (2023). CLIP-Guided StyleGAN Inversion for Text-Driven Real Image Editing. https:\/\/doi.org\/10.1145\/3610287","journal-title":"CLIP-Guided StyleGAN Inversion for Text-Driven Real Image Editing."},{"key":"3_CR27","doi-asserted-by":"publisher","unstructured":"Gal, R et al. (2022). An Image is Worth One Word: Personalizing Text-to-Image Generation using Textual Inversion. Presented at https:\/\/doi.org\/10.48550\/arXiv.2208.01618.","DOI":"10.48550\/arXiv.2208.01618"},{"key":"3_CR28","doi-asserted-by":"crossref","unstructured":"Zhang, Y et al. (2023). Inversion-Based Creativity Transfer with Diffusion Models, In Proc. CVPR, 10146\u201310156","DOI":"10.1109\/CVPR52729.2023.00978"},{"key":"3_CR29","doi-asserted-by":"crossref","unstructured":"Richardson, E et al. (2021). Encoding in style: a stylegan encoder for image-to-image translation. In Proc. CVPR. pp. 2287\u20132296","DOI":"10.1109\/CVPR46437.2021.00232"},{"key":"3_CR30","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T (2019). A style-based generator architecture for generative adversarial networks. In Proc. CVPR. pp. 4401\u20134410","DOI":"10.1109\/CVPR.2019.00453"},{"key":"3_CR31","doi-asserted-by":"crossref","unstructured":"Deng, J., Guo, J., Xue, N., Zafeiriou, S (2019). Arcface: Additive angular margin loss for deep face recognition. In Proc. CVPR. pp. 4690\u20134699","DOI":"10.1109\/CVPR.2019.00482"},{"key":"3_CR32","unstructured":"Kingma, D.P., Ba, J (2014). Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980."},{"key":"3_CR33","doi-asserted-by":"crossref","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing. 600\u2013612.","DOI":"10.1109\/TIP.2003.819861"},{"key":"3_CR34","unstructured":"Krause, J., Deng, J., Stark, M., & Fei-Fei, L (2023). Collecting a large-scale dataset of fine-grained cars. https:\/\/ai.stanford.edu\/~jkrause\/papers\/fgvc13.pdf"},{"key":"3_CR35","unstructured":"Yu, F et al. (2015). Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop.\u00a0arXiv preprint arXiv:1506.03365"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-78195-7_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T12:02:39Z","timestamp":1733140959000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-78195-7_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,3]]},"ISBN":["9783031781940","9783031781957"],"references-count":35,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-78195-7_3","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,12,3]]},"assertion":[{"value":"3 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICPR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Pattern Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Kolkata","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"India","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1 December 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 December 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icpr2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icpr2024.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}