{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T21:21:14Z","timestamp":1776115274880,"version":"3.50.1"},"publisher-location":"Cham","reference-count":50,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031200465","type":"print"},{"value":"9783031200472","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-20047-2_21","type":"book-chapter","created":{"date-parts":[[2022,10,22]],"date-time":"2022-10-22T10:02:55Z","timestamp":1666432975000},"page":"358-374","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":247,"title":["MotionCLIP: Exposing Human Motion Generation to\u00a0CLIP Space"],"prefix":"10.1007","author":[{"given":"Guy","family":"Tevet","sequence":"first","affiliation":[]},{"given":"Brian","family":"Gordon","sequence":"additional","affiliation":[]},{"given":"Amir","family":"Hertz","sequence":"additional","affiliation":[]},{"given":"Amit H.","family":"Bermano","sequence":"additional","affiliation":[]},{"given":"Daniel","family":"Cohen-Or","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,23]]},"reference":[{"key":"21_CR1","doi-asserted-by":"crossref","unstructured":"Aberman, K., Weng, Y., Lischinski, D., Cohen-Or, D., Chen, B.: Unpaired motion style transfer from video to animation. ACM Trans. Graph. (TOG) 39(4), 64-1 (2020)","DOI":"10.1145\/3386569.3392469"},{"key":"21_CR2","doi-asserted-by":"crossref","unstructured":"Ahuja, C., Morency, L.P.: Language2Pose: natural language grounded pose forecasting. In: 2019 International Conference on 3D Vision (3DV), pp. 719\u2013728. IEEE (2019)","DOI":"10.1109\/3DV.2019.00084"},{"key":"21_CR3","doi-asserted-by":"crossref","unstructured":"Aristidou, A., Yiannakidis, A., Aberman, K., Cohen-Or, D., Shamir, A., Chrysanthou, Y.: Rhythm is a dancer: music-driven motion synthesis with global structure. IEEE Trans. Visual. Comput. Graph. (2022)","DOI":"10.1109\/TVCG.2022.3163676"},{"key":"21_CR4","unstructured":"Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations. In: International Conference on Machine Learning, pp. 1597\u20131607. PMLR (2020)"},{"key":"21_CR5","doi-asserted-by":"crossref","unstructured":"Cho, K., et al.: Learning phrase representations using RNN encoder-decoder for statistical machine translation. arXiv preprint arXiv:1406.1078 (2014)","DOI":"10.3115\/v1\/D14-1179"},{"key":"21_CR6","unstructured":"Dilokthanakul, N., et al.: Deep unsupervised clustering with Gaussian mixture variational autoencoders. arXiv preprint arXiv:1611.02648 (2016)"},{"key":"21_CR7","unstructured":"Dinh, L., Krueger, D., Bengio, Y.: Nice: non-linear independent components estimation. arXiv preprint arXiv:1410.8516 (2014)"},{"issue":"4","key":"21_CR8","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2897824.2925984","volume":"35","author":"P Edwards","year":"2016","unstructured":"Edwards, P., Landreth, C., Fiume, E., Singh, K.: Jali: an animator-centric viseme model for expressive lip synchronization. ACM Trans. graph. (TOG) 35(4), 1\u201311 (2016)","journal-title":"ACM Trans. graph. (TOG)"},{"key":"21_CR9","unstructured":"Fang, H., Xiong, P., Xu, L., Chen, Y.: Clip2Video: mastering video-text retrieval via image clip. arXiv preprint arXiv:2106.11097 (2021)"},{"key":"21_CR10","unstructured":"Frans, K., Soros, L., Witkowski, O.: ClipDraw: exploring text-to-drawing synthesis through language-image encoders. arXiv preprint arXiv:2106.14843 (2021)"},{"key":"21_CR11","doi-asserted-by":"crossref","unstructured":"Gal, R., Patashnik, O., Maron, H., Chechik, G., Cohen-Or, D.: StyleGAN-NADA: clip-guided domain adaptation of image generators. arXiv preprint arXiv:2108.00946 (2021)","DOI":"10.1145\/3528223.3530164"},{"key":"21_CR12","doi-asserted-by":"crossref","unstructured":"Gatys, L.A., Ecker, A.S., Bethge, M.: Image style transfer using convolutional neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2016","DOI":"10.1109\/CVPR.2016.265"},{"key":"21_CR13","doi-asserted-by":"crossref","unstructured":"Guo, C., et al.: Generating diverse and natural 3D human motions from text. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5152\u20135161, June 2022","DOI":"10.1109\/CVPR52688.2022.00509"},{"key":"21_CR14","doi-asserted-by":"crossref","unstructured":"Guo, C., et al.: Action2Motion: conditioned generation of 3D human motions. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 2021\u20132029 (2020)","DOI":"10.1145\/3394171.3413635"},{"key":"21_CR15","doi-asserted-by":"crossref","unstructured":"Guzhov, A., Raue, F., Hees, J., Dengel, A.: AudioClip: extending clip to image, text and audio. In: ICASSP 2022\u20132022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 976\u2013980. IEEE (2022)","DOI":"10.1109\/ICASSP43922.2022.9747631"},{"key":"21_CR16","doi-asserted-by":"crossref","unstructured":"Hadsell, R., Chopra, S., LeCun, Y.: Dimensionality reduction by learning an invariant mapping. In: 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR 2006), vol. 2, pp. 1735\u20131742. IEEE (2006)","DOI":"10.1109\/CVPR.2006.100"},{"key":"21_CR17","doi-asserted-by":"crossref","unstructured":"He, X., Peng, Y.: Fine-grained image classification via combining vision and language. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5994\u20136002 (2017)","DOI":"10.1109\/CVPR.2017.775"},{"issue":"4","key":"21_CR18","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2897824.2925975","volume":"35","author":"D Holden","year":"2016","unstructured":"Holden, D., Saito, J., Komura, T.: A deep learning framework for character motion synthesis and editing. ACM Trans. Graph. (TOG) 35(4), 1\u201311 (2016)","journal-title":"ACM Trans. Graph. (TOG)"},{"issue":"4","key":"21_CR19","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3528223.3530094","volume":"41","author":"F Hong","year":"2022","unstructured":"Hong, F., Zhang, M., Pan, L., Cai, Z., Yang, L., Liu, Z.: AvatarClip: zero-shot text-driven generation and animation of 3d avatars. ACM Trans. Graph. (TOG) 41(4), 1\u201319 (2022). https:\/\/doi.org\/10.1145\/3528223.3530094","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"21_CR20","doi-asserted-by":"crossref","unstructured":"Huang, X., Belongie, S.: Arbitrary style transfer in real-time with adaptive instance normalization. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1501\u20131510 (2017)","DOI":"10.1109\/ICCV.2017.167"},{"key":"21_CR21","doi-asserted-by":"crossref","unstructured":"Ji, Y., Xu, F., Yang, Y., Shen, F., Shen, H.T., Zheng, W.S.: A large-scale RGB-D database for arbitrary-view human action recognition. In: Proceedings of the 26th ACM international Conference on Multimedia, pp. 1510\u20131518 (2018)","DOI":"10.1145\/3240508.3240675"},{"key":"21_CR22","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational Bayes. arXiv preprint arXiv:1312.6114 (2013)"},{"key":"21_CR23","doi-asserted-by":"crossref","unstructured":"Li, R., Yang, S., Ross, D.A., Kanazawa, A.: AI choreographer: music conditioned 3D dance generation with AIST++. In: The IEEE International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.01315"},{"key":"21_CR24","unstructured":"Lin, A.S., Wu, L., Corona, R., Tai, K., Huang, Q., Mooney, R.J.: Generating animated videos of human activities from natural language descriptions. Learning 2018, 1 (2018)"},{"issue":"10","key":"21_CR25","doi-asserted-by":"publisher","first-page":"2684","DOI":"10.1109\/TPAMI.2019.2916873","volume":"42","author":"J Liu","year":"2019","unstructured":"Liu, J., Shahroudy, A., Perez, M., Wang, G., Duan, L.Y., Kot, A.C.: NTU RGB+D 120: a large-scale benchmark for 3D human activity understanding. IEEE Trans. Pattern Anal. Mach. Intell. 42(10), 2684\u20132701 (2019)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"6","key":"21_CR26","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2816795.2818013","volume":"34","author":"M Loper","year":"2015","unstructured":"Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: SMPL: a skinned multi-person linear model. ACM Trans. Graph. (TOG) 34(6), 1\u201316 (2015)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"21_CR27","doi-asserted-by":"crossref","unstructured":"Luo, H., et al.: CLIP4Clip: an empirical study of clip for end to end video clip retrieval. arXiv preprint arXiv:2104.08860 (2021)","DOI":"10.1016\/j.neucom.2022.07.028"},{"key":"21_CR28","doi-asserted-by":"crossref","unstructured":"Maheshwari, S., Gupta, D., Sarvadevabhatla, R.K.: MUGL: large scale multi person conditional action generation with locomotion. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 257\u2013265 (2022)","DOI":"10.1109\/WACV51458.2022.00082"},{"key":"21_CR29","doi-asserted-by":"crossref","unstructured":"Mahmood, N., Ghorbani, N., Troje, N.F., Pons-Moll, G., Black, M.J.: AMASS: archive of motion capture as surface shapes. In: International Conference on Computer Vision, pp. 5442\u20135451, October 2019","DOI":"10.1109\/ICCV.2019.00554"},{"key":"21_CR30","doi-asserted-by":"crossref","unstructured":"Michel, O., Bar-On, R., Liu, R., Benaim, S., Hanocka, R.: Text2Mesh: text-driven neural stylization for meshes. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13492\u201313502 (2022)","DOI":"10.1109\/CVPR52688.2022.01313"},{"key":"21_CR31","doi-asserted-by":"crossref","unstructured":"Patashnik, O., Wu, Z., Shechtman, E., Cohen-Or, D., Lischinski, D.: StyleClip: text-driven manipulation of styleGAN imagery. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2085\u20132094 (2021)","DOI":"10.1109\/ICCV48922.2021.00209"},{"key":"21_CR32","doi-asserted-by":"crossref","unstructured":"Pavlakos, G., et al.: Expressive body capture: 3D hands, face, and body from a single image. In: Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), pp. 10975\u201310985 (2019)","DOI":"10.1109\/CVPR.2019.01123"},{"key":"21_CR33","doi-asserted-by":"crossref","unstructured":"Petrovich, M., Black, M.J., Varol, G.: Action-conditioned 3D human motion synthesis with transformer VAE. In: International Conference on Computer Vision (ICCV), pp. 10985\u201310995, October 2021","DOI":"10.1109\/ICCV48922.2021.01080"},{"key":"21_CR34","doi-asserted-by":"crossref","unstructured":"Petrovich, M., Black, M.J., Varol, G.: TEMOS: generating diverse human motions from textual descriptions. In: European Conference on Computer Vision (ECCV) (2022)","DOI":"10.1007\/978-3-031-20047-2_28"},{"issue":"4","key":"21_CR35","doi-asserted-by":"publisher","first-page":"236","DOI":"10.1089\/big.2016.0028","volume":"4","author":"M Plappert","year":"2016","unstructured":"Plappert, M., Mandery, C., Asfour, T.: The kit motion-language dataset. Big Data 4(4), 236\u2013252 (2016)","journal-title":"Big Data"},{"key":"21_CR36","doi-asserted-by":"publisher","first-page":"13","DOI":"10.1016\/j.robot.2018.07.006","volume":"109","author":"M Plappert","year":"2018","unstructured":"Plappert, M., Mandery, C., Asfour, T.: Learning a bidirectional mapping between human whole-body motion and natural language using deep recurrent neural networks. Robot. Auton. Syst. 109, 13\u201326 (2018)","journal-title":"Robot. Auton. Syst."},{"key":"21_CR37","doi-asserted-by":"crossref","unstructured":"Punnakkal, A.R., Chandrasekaran, A., Athanasiou, N., Quiros-Ramirez, A., Black, M.J.: BABEL: bodies, action and behavior with English labels. In: Proceedings IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 722\u2013731, June 2021","DOI":"10.1109\/CVPR46437.2021.00078"},{"key":"21_CR38","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"21_CR39","unstructured":"Ramesh, A., et al.: Zero-shot text-to-image generation. In: International Conference on Machine Learning, pp. 8821\u20138831. PMLR (2021)"},{"key":"21_CR40","doi-asserted-by":"crossref","unstructured":"Sanghi, A., et al.: Clip-forge: towards zero-shot text-to-shape generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18603\u201318613 (2022)","DOI":"10.1109\/CVPR52688.2022.01805"},{"key":"21_CR41","doi-asserted-by":"crossref","unstructured":"Shi, L., Zhang, Y., Cheng, J., Lu, H.: Two-stream adaptive graph convolutional networks for skeleton-based action recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12026\u201312035 (2019)","DOI":"10.1109\/CVPR.2019.01230"},{"key":"21_CR42","unstructured":"Sohn, K., Lee, H., Yan, X.: Learning structured output representation using deep conditional generative models. In: Advances in Neural Information Processing Systems, vol. 28 (2015)"},{"key":"21_CR43","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"21_CR44","doi-asserted-by":"crossref","unstructured":"Vinker, Y., et al.: ClipASSO: semantically-aware object sketching. arXiv preprint arXiv:2202.05822 (2022)","DOI":"10.1145\/3528223.3530068"},{"key":"21_CR45","doi-asserted-by":"crossref","unstructured":"Wang, C., Chai, M., He, M., Chen, D., Liao, J.: Clip-NERF: text-and-image driven manipulation of neural radiance fields. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3835\u20133844 (2022)","DOI":"10.1109\/CVPR52688.2022.00381"},{"key":"21_CR46","unstructured":"Wang, J., Xu, H., Narasimhan, M., Wang, X.: Multi-person 3D motion prediction with multi-range transformers. In: Advances in Neural Information Processing Systems, vol. 34 (2021)"},{"key":"21_CR47","doi-asserted-by":"crossref","unstructured":"Wen, Y.H., Yang, Z., Fu, H., Gao, L., Sun, Y., Liu, Y.J.: Autoregressive stylized motion synthesis with generative flow. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13612\u201313621 (2021)","DOI":"10.1109\/CVPR46437.2021.01340"},{"issue":"4","key":"21_CR48","doi-asserted-by":"publisher","first-page":"3441","DOI":"10.1109\/LRA.2018.2852838","volume":"3","author":"T Yamada","year":"2018","unstructured":"Yamada, T., Matsunaga, H., Ogata, T.: Paired recurrent autoencoders for bidirectional translation between robot actions and linguistic descriptions. IEEE Robot. Autom. Lett. 3(4), 3441\u20133448 (2018)","journal-title":"IEEE Robot. Autom. Lett."},{"key":"21_CR49","doi-asserted-by":"crossref","unstructured":"Youwang, K., Ji-Yeon, K., Oh, T.H.: Clip-Actor: text-driven recommendation and stylization for animating human meshes (2022)","DOI":"10.1007\/978-3-031-20062-5_11"},{"key":"21_CR50","doi-asserted-by":"crossref","unstructured":"Zhou, Y., Barnes, C., Lu, J., Yang, J., Li, H.: On the continuity of rotation representations in neural networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5745\u20135753 (2019)","DOI":"10.1109\/CVPR.2019.00589"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-20047-2_21","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,6]],"date-time":"2024-10-06T10:03:53Z","timestamp":1728209033000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-20047-2_21"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031200465","9783031200472"],"references-count":50,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-20047-2_21","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"23 October 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}