{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,23]],"date-time":"2025-06-23T15:26:48Z","timestamp":1750692408280,"version":"3.40.3"},"publisher-location":"Cham","reference-count":32,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031250682"},{"type":"electronic","value":"9783031250699"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-25069-9_38","type":"book-chapter","created":{"date-parts":[[2023,2,14]],"date-time":"2023-02-14T00:15:46Z","timestamp":1676333746000},"page":"594-609","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["Text-Driven Stylization of\u00a0Video Objects"],"prefix":"10.1007","author":[{"given":"Sebastian","family":"Loeschcke","sequence":"first","affiliation":[]},{"given":"Serge","family":"Belongie","sequence":"additional","affiliation":[]},{"given":"Sagie","family":"Benaim","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,2,14]]},"reference":[{"key":"38_CR1","unstructured":"Avrahami, O., Lischinski, D., Fried, O.: Blended diffusion for text-driven editing of natural images. CoRR abs\/2111.14818 (2021). https:\/\/arxiv.org\/abs\/2111.14818"},{"key":"38_CR2","doi-asserted-by":"publisher","unstructured":"Bar-Tal, O., Ofri-Amar, D., Fridman, R., Kasten, Y., Dekel, T.: Text2live: text-driven layered image and video editing (2022). https:\/\/doi.org\/10.48550\/ARXIV.2204.02491. https:\/\/arxiv.org\/abs\/2204.02491","DOI":"10.48550\/ARXIV.2204.02491"},{"key":"38_CR3","unstructured":"Bau, D., et al.: Paint by word. CoRR abs\/2103.10951 (2021). https:\/\/arxiv.org\/abs\/2103.10951"},{"key":"38_CR4","unstructured":"Brock, A., Donahue, J., Simonyan, K.: Large scale GAN training for high fidelity natural image synthesis. CoRR abs\/1809.11096 (2018). http:\/\/arxiv.org\/abs\/1809.11096"},{"key":"38_CR5","unstructured":"Chefer, H., Benaim, S., Paiss, R., Wolf, L.: Image-based clip-guided essence transfer. CoRR abs\/2110.12427 (2021). https:\/\/arxiv.org\/abs\/2110.12427"},{"key":"38_CR6","doi-asserted-by":"publisher","unstructured":"Crowson, K., et al.: Vqgan-clip: open domain image generation and editing with natural language guidance (2022). https:\/\/doi.org\/10.48550\/ARXIV.2204.08583. https:\/\/arxiv.org\/abs\/2204.08583","DOI":"10.48550\/ARXIV.2204.08583"},{"key":"38_CR7","doi-asserted-by":"publisher","unstructured":"Frans, K., Soros, L.B., Witkowski, O.: Clipdraw: exploring text-to-drawing synthesis through language-image encoders (2021). https:\/\/doi.org\/10.48550\/ARXIV.2106.14843. https:\/\/arxiv.org\/abs\/2106.14843","DOI":"10.48550\/ARXIV.2106.14843"},{"key":"38_CR8","unstructured":"Gal, R., Patashnik, O., Maron, H., Chechik, G., Cohen-Or, D.: Stylegan-nada: clip-guided domain adaptation of image generators. CoRR abs\/2108.00946 (2021). https:\/\/arxiv.org\/abs\/2108.00946"},{"key":"38_CR9","unstructured":"Jabri, A., Owens, A., Efros, A.: Space-time correspondence as a contrastive random walk. In: Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., Lin, H. (eds.) Advances in Neural Information Processing Systems. vol. 33, pp. 19545\u201319560. Curran Associates, Inc. (2020). https:\/\/proceedings.neurips.cc\/paper\/2020\/file\/e2ef524fbf3d9fe611d5a8e90fefdc9c-Paper.pdf"},{"key":"38_CR10","unstructured":"Jampani, V., Gadde, R., Gehler, P.V.: Video propagation networks. CoRR abs\/1612.05478 (2016). http:\/\/arxiv.org\/abs\/1612.05478"},{"key":"38_CR11","unstructured":"Jetchev, N.: Clipmatrix: Text-controlled creation of 3d textured meshes. CoRR abs\/2109.12922 (2021). https:\/\/arxiv.org\/abs\/2109.12922"},{"key":"38_CR12","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. CoRR abs\/1812.04948 (2018). http:\/\/arxiv.org\/abs\/1812.04948"},{"key":"38_CR13","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. CoRR abs\/1912.04958 (2019). http:\/\/arxiv.org\/abs\/1912.04958","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"38_CR14","unstructured":"Kasten, Y., Ofri, D., Wang, O., Dekel, T.: Layered neural atlases for consistent video editing. CoRR abs\/2109.11418 (2021). https:\/\/arxiv.org\/abs\/2109.11418"},{"key":"38_CR15","unstructured":"Kim, G., Ye, J.C.: Diffusionclip: Text-guided image manipulation using diffusion models. CoRR abs\/2110.02711 (2021). https:\/\/arxiv.org\/abs\/2110.02711"},{"key":"38_CR16","unstructured":"Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: Bengio, Y., LeCun, Y. (eds.) 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, 7\u20139 May, 2015, Conference Track Proceedings (2015). http:\/\/arxiv.org\/abs\/1412.6980"},{"key":"38_CR17","unstructured":"Kwon, G., Ye, J.C.: Clipstyler: Image style transfer with a single text condition. CoRR abs\/2112.00374 (2021). https:\/\/arxiv.org\/abs\/2112.00374"},{"key":"38_CR18","doi-asserted-by":"crossref","unstructured":"Michel, O., Bar-On, R., Liu, R., Benaim, S., Hanocka, R.: Text2mesh: Text-driven neural stylization for meshes. CoRR abs\/2112.03221 (2021). https:\/\/arxiv.org\/abs\/2112.03221","DOI":"10.1109\/CVPR52688.2022.01313"},{"key":"38_CR19","unstructured":"Nichol, A., et al.: GLIDE: towards photorealistic image generation and editing with text-guided diffusion models. CoRR abs\/2112.10741 (2021). https:\/\/arxiv.org\/abs\/2112.10741"},{"key":"38_CR20","unstructured":"Paszke, A., et al.: Pytorch: an imperative style, high-performance deep learning library. In: Advances in Neural Information Processing Systems 32, pp. 8024\u20138035. Curran Associates, Inc. (2019). http:\/\/papers.neurips.cc\/paper\/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf"},{"key":"38_CR21","doi-asserted-by":"crossref","unstructured":"Patashnik, O., Wu, Z., Shechtman, E., Cohen-Or, D., Lischinski, D.: Styleclip: text-driven manipulation of stylegan imagery. CoRR abs\/2103.17249 (2021). https:\/\/arxiv.org\/abs\/2103.17249","DOI":"10.1109\/ICCV48922.2021.00209"},{"key":"38_CR22","unstructured":"Pont-Tuset, J., Perazzi, F., Caelles, S., Arbelaez, P., Sorkine-Hornung, A., Gool, L.V.: The 2017 DAVIS challenge on video object segmentation. CoRR abs\/1704.00675 (2017). http:\/\/arxiv.org\/abs\/1704.00675"},{"key":"38_CR23","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. CoRR abs\/2103.00020 (2021). https:\/\/arxiv.org\/abs\/2103.00020"},{"key":"38_CR24","doi-asserted-by":"publisher","unstructured":"Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., Chen, M.: Hierarchical text-conditional image generation with clip latents (2022). https:\/\/doi.org\/10.48550\/ARXIV.2204.06125. https:\/\/arxiv.org\/abs\/2204.06125","DOI":"10.48550\/ARXIV.2204.06125"},{"key":"38_CR25","unstructured":"Ramesh, A., et al.: Zero-shot text-to-image generation. CoRR abs\/2102.12092 (2021). https:\/\/arxiv.org\/abs\/2102.12092"},{"key":"38_CR26","doi-asserted-by":"publisher","unstructured":"Saharia, C., et al.: Photorealistic text-to-image diffusion models with deep language understanding (2022). https:\/\/doi.org\/10.48550\/ARXIV.2205.11487. https:\/\/arxiv.org\/abs\/2205.11487","DOI":"10.48550\/ARXIV.2205.11487"},{"key":"38_CR27","doi-asserted-by":"publisher","unstructured":"Sanghi, A., Chu, H., Lambourne, J.G., Wang, Y., Cheng, C.Y., Fumero, M., Malekshan, K.R.: Clip-forge: Towards zero-shot text-to-shape generation (2021). https:\/\/doi.org\/10.48550\/ARXIV.2110.02624. https:\/\/arxiv.org\/abs\/2110.02624","DOI":"10.48550\/ARXIV.2110.02624"},{"key":"38_CR28","unstructured":"Texler, O., et al.: Interactive video stylization using few-shot patch-based training. CoRR abs\/2004.14489 (2020). https:\/\/arxiv.org\/abs\/2004.14489"},{"key":"38_CR29","doi-asserted-by":"crossref","unstructured":"Wang, X., Jabri, A., Efros, A.A.: Learning correspondence from the cycle-consistency of time. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2019","DOI":"10.1109\/CVPR.2019.00267"},{"key":"38_CR30","doi-asserted-by":"publisher","unstructured":"Ye, V., Li, Z., Tucker, R., Kanazawa, A., Snavely, N.: Deformable sprites for unsupervised video decomposition (2022). https:\/\/doi.org\/10.48550\/ARXIV.2204.07151. https:\/\/arxiv.org\/abs\/2204.07151","DOI":"10.48550\/ARXIV.2204.07151"},{"key":"38_CR31","doi-asserted-by":"crossref","unstructured":"Yin, K., Gao, J., Shugrina, M., Khamis, S., Fidler, S.: 3dstylenet: creating 3d shapes with geometric and texture style variations. CoRR abs\/2108.12958 (2021). https:\/\/arxiv.org\/abs\/2108.12958","DOI":"10.1109\/ICCV48922.2021.01223"},{"key":"38_CR32","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Learning to prompt for vision-language models. CoRR abs\/2109.01134 (2021). https:\/\/arxiv.org\/abs\/2109.01134"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-25069-9_38","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,7]],"date-time":"2024-03-07T12:57:22Z","timestamp":1709816242000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-25069-9_38"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031250682","9783031250699"],"references-count":32,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-25069-9_38","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"14 February 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}