{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T16:19:16Z","timestamp":1778084356839,"version":"3.51.4"},"publisher-location":"Cham","reference-count":58,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031198328","type":"print"},{"value":"9783031198335","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19833-5_34","type":"book-chapter","created":{"date-parts":[[2022,11,4]],"date-time":"2022-11-04T00:40:30Z","timestamp":1667522430000},"page":"580-597","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":186,"title":["TM2T: Stochastic and\u00a0Tokenized Modeling for\u00a0the\u00a0Reciprocal Generation of\u00a03D Human Motions and\u00a0Texts"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4539-0634","authenticated-orcid":false,"given":"Chuan","family":"Guo","sequence":"first","affiliation":[]},{"given":"Xinxin","family":"Zuo","sequence":"additional","affiliation":[]},{"given":"Sen","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Li","family":"Cheng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,4]]},"reference":[{"issue":"4","key":"34_CR1","doi-asserted-by":"publisher","first-page":"6033","DOI":"10.1109\/LRA.2020.3010742","volume":"5","author":"V Adeli","year":"2020","unstructured":"Adeli, V., Adeli, E., Reid, I., Niebles, J.C., Rezatofighi, H.: Socially and contextually aware human motion and pose forecasting. IEEE Robot. Autom. Lett. 5(4), 6033\u20136040 (2020)","journal-title":"IEEE Robot. Autom. Lett."},{"key":"34_CR2","doi-asserted-by":"crossref","unstructured":"Ahuja, C., Morency, L.P.: Language2pose: natural language grounded pose forecasting. In: 2019 International Conference on 3D Vision (3DV), pp. 719\u2013728. IEEE (2019)","DOI":"10.1109\/3DV.2019.00084"},{"key":"34_CR3","doi-asserted-by":"crossref","unstructured":"Aliakbarian, S., Saleh, F., Petersson, L., Gould, S., Salzmann, M.: Contextually plausible and diverse 3D human motion prediction. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 11333\u201311342 (2021)","DOI":"10.1109\/ICCV48922.2021.01114"},{"key":"34_CR4","unstructured":"Bahdanau, D., Cho, K., Bengio, Y.: Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473 (2014)"},{"key":"34_CR5","doi-asserted-by":"crossref","unstructured":"Bhattacharya, U., Rewkowski, N., Banerjee, A., Guhan, P., Bera, A., Manocha, D.: Text2gestures: a transformer-based network for generating emotive body gestures for virtual agents. In: IEEE Virtual Reality and 3D User Interfaces (VR), pp. 1\u201310. IEEE (2021)","DOI":"10.1109\/VR50410.2021.00037"},{"key":"34_CR6","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"387","DOI":"10.1007\/978-3-030-58452-8_23","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Z Cao","year":"2020","unstructured":"Cao, Z., Gao, H., Mangalam, K., Cai, Q.-Z., Vo, M., Malik, J.: Long-term human motion prediction with scene context. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 387\u2013404. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_23"},{"key":"34_CR7","doi-asserted-by":"crossref","unstructured":"Corona, E., Pumarola, A., Alenya, G., Moreno-Noguer, F.: Context-aware human motion prediction. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6992\u20137001 (2020)","DOI":"10.1109\/CVPR42600.2020.00702"},{"key":"34_CR8","unstructured":"Dubey, S., Olimov, F., Rafique, M.A., Kim, J., Jeon, M.: Label-attention transformer with geometrically coherent objects for image captioning. arXiv preprint arXiv:2109.07799 (2021)"},{"key":"34_CR9","doi-asserted-by":"crossref","unstructured":"Esser, P., Rombach, R., Ommer, B.: Taming transformers for high-resolution image synthesis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12873\u201312883 (2021)","DOI":"10.1109\/CVPR46437.2021.01268"},{"key":"34_CR10","doi-asserted-by":"crossref","unstructured":"Gao, J., Wang, S., Wang, S., Ma, S., Gao, W.: Self-critical n-step training for image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6300\u20136308 (2019)","DOI":"10.1109\/CVPR.2019.00646"},{"key":"34_CR11","doi-asserted-by":"crossref","unstructured":"Ghosh, A., Cheema, N., Oguz, C., Theobalt, C., Slusallek, P.: Synthesis of compositional animations from textual descriptions. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1396\u20131406 (2021)","DOI":"10.1109\/ICCV48922.2021.00143"},{"key":"34_CR12","unstructured":"Ging, S., Zolfaghari, M., Pirsiavash, H., Brox, T.: COOT: cooperative hierarchical transformer for video-text representation learning. In: Advances in Neural Information Processing Systems, vol. 33, pp. 22605\u201322618 (2020)"},{"key":"34_CR13","doi-asserted-by":"crossref","unstructured":"Goutsu, Y., Inamura, T.: Linguistic descriptions of human motion with generative adversarial Seq2Seq learning. In: 2021 IEEE International Conference on Robotics and Automation (ICRA), pp. 4281\u20134287. IEEE (2021)","DOI":"10.1109\/ICRA48506.2021.9561519"},{"key":"34_CR14","doi-asserted-by":"crossref","unstructured":"Guo, C., et al.: Generating diverse and natural 3D human motions from text. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5152\u20135161 (2022)","DOI":"10.1109\/CVPR52688.2022.00509"},{"key":"34_CR15","doi-asserted-by":"crossref","unstructured":"Guo, C., et al.: Action2video: generating videos of human 3D actions. Int. J. Comput. Vis., 1\u201331 (2022)","DOI":"10.1007\/s11263-021-01550-z"},{"key":"34_CR16","doi-asserted-by":"crossref","unstructured":"Guo, C., et al.: Action2motion: conditioned generation of 3D human motions. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 2021\u20132029 (2020)","DOI":"10.1145\/3394171.3413635"},{"key":"34_CR17","doi-asserted-by":"crossref","unstructured":"Guo, L., Liu, J., Yao, P., Li, J., Lu, H.: MSCap: multi-style image captioning with unpaired stylized text. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4204\u20134213 (2019)","DOI":"10.1109\/CVPR.2019.00433"},{"key":"34_CR18","doi-asserted-by":"crossref","unstructured":"Holden, D., Kanoun, O., Perepichka, M., Popa, T.: Learned motion matching. ACM Trans. Graph. (TOG) 39(4), 53\u20131 (2020)","DOI":"10.1145\/3386569.3392440"},{"issue":"4","key":"34_CR19","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3072959.3073663","volume":"36","author":"D Holden","year":"2017","unstructured":"Holden, D., Komura, T., Saito, J.: Phase-functioned neural networks for character control. ACM Trans. Graph. (TOG) 36(4), 1\u201313 (2017)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"34_CR20","unstructured":"Jang, E., Gu, S., Poole, B.: Categorical reparameterization with Gumbel-softmax. arXiv preprint arXiv:1611.01144 (2016)"},{"issue":"2","key":"34_CR21","doi-asserted-by":"publisher","first-page":"171","DOI":"10.1023\/A:1020346032608","volume":"50","author":"A Kojima","year":"2002","unstructured":"Kojima, A., Tamura, T., Fukunaga, K.: Natural language description of human activities from video images based on concept hierarchy of actions. Int. J. Comput. Vis. 50(2), 171\u2013184 (2002)","journal-title":"Int. J. Comput. Vis."},{"issue":"12","key":"34_CR22","doi-asserted-by":"publisher","first-page":"2891","DOI":"10.1109\/TPAMI.2012.162","volume":"35","author":"G Kulkarni","year":"2013","unstructured":"Kulkarni, G., et al.: Babytalk: understanding and generating simple image descriptions. IEEE Trans. Pattern Anal. Mach. Intell. 35(12), 2891\u20132903 (2013)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"34_CR23","unstructured":"Lee, H.Y., et al.: Dancing to music. In: Advances in Neural Information Processing Systems 32 (2019)"},{"key":"34_CR24","doi-asserted-by":"crossref","unstructured":"Li, Y., Min, M., Shen, D., Carlson, D., Carin, L.: Video generation from text. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 32 (2018)","DOI":"10.1609\/aaai.v32i1.12233"},{"key":"34_CR25","first-page":"1","volume":"2018","author":"AS Lin","year":"2018","unstructured":"Lin, A.S., Wu, L., Corona, R., Tai, K., Huang, Q., Mooney, R.J.: Generating animated videos of human activities from natural language descriptions. Learning 2018, 1 (2018)","journal-title":"Learning"},{"key":"34_CR26","unstructured":"Lin, C.Y.: ROUGE: a package for automatic evaluation of summaries. In: Text Summarization Branches Out, pp. 74\u201381 (2004)"},{"key":"34_CR27","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Towards natural and accurate future motion prediction of humans and animals. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10004\u201310012 (2019)","DOI":"10.1109\/CVPR.2019.01024"},{"key":"34_CR28","doi-asserted-by":"crossref","unstructured":"Mao, W., Liu, M., Salzmann, M.: Generating smooth pose sequences for diverse human motion prediction. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 13309\u201313318 (2021)","DOI":"10.1109\/ICCV48922.2021.01306"},{"key":"34_CR29","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: BLEU: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 311\u2013318 (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"34_CR30","doi-asserted-by":"crossref","unstructured":"Park, J.S., Rohrbach, M., Darrell, T., Rohrbach, A.: Adversarial inference for multi-sentence video description. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6598\u20136608 (2019)","DOI":"10.1109\/CVPR.2019.00676"},{"issue":"4","key":"34_CR31","doi-asserted-by":"publisher","first-page":"855","DOI":"10.1007\/s11263-019-01245-6","volume":"128","author":"D Pavllo","year":"2020","unstructured":"Pavllo, D., Feichtenhofer, C., Auli, M., Grangier, D.: Modeling human motion with quaternion-based neural networks. Int. J. Comput. Vis. 128(4), 855\u2013872 (2020)","journal-title":"Int. J. Comput. Vis."},{"key":"34_CR32","doi-asserted-by":"crossref","unstructured":"Peng, J., Liu, D., Xu, S., Li, H.: Generating diverse structure for image inpainting with hierarchical VQ-VAE. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10775\u201310784 (2021)","DOI":"10.1109\/CVPR46437.2021.01063"},{"key":"34_CR33","doi-asserted-by":"crossref","unstructured":"Petrovich, M., Black, M.J., Varol, G.: Action-conditioned 3D human motion synthesis with transformer VAE. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10985\u201310995 (2021)","DOI":"10.1109\/ICCV48922.2021.01080"},{"issue":"4","key":"34_CR34","doi-asserted-by":"publisher","first-page":"236","DOI":"10.1089\/big.2016.0028","volume":"4","author":"M Plappert","year":"2016","unstructured":"Plappert, M., Mandery, C., Asfour, T.: The kit motion-language dataset. Big Data 4(4), 236\u2013252 (2016)","journal-title":"Big Data"},{"key":"34_CR35","doi-asserted-by":"publisher","first-page":"13","DOI":"10.1016\/j.robot.2018.07.006","volume":"109","author":"M Plappert","year":"2018","unstructured":"Plappert, M., Mandery, C., Asfour, T.: Learning a bidirectional mapping between human whole-body motion and natural language using deep recurrent neural networks. Robot. Auton. Syst. 109, 13\u201326 (2018)","journal-title":"Robot. Auton. Syst."},{"key":"34_CR36","doi-asserted-by":"crossref","unstructured":"Qin, Y., Du, J., Zhang, Y., Lu, H.: Look back and predict forward in image captioning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8367\u20138375 (2019)","DOI":"10.1109\/CVPR.2019.00856"},{"key":"34_CR37","doi-asserted-by":"crossref","unstructured":"Rakhimov, R., Volkhonskiy, D., Artemov, A., Zorin, D., Burnaev, E.: Latent video transformer. arXiv preprint arXiv:2006.10704 (2020)","DOI":"10.5220\/0010241801010112"},{"key":"34_CR38","unstructured":"Ramesh, A., et al.: Zero-shot text-to-image generation. In: International Conference on Machine Learning, pp. 8821\u20138831. PMLR (2021)"},{"key":"34_CR39","unstructured":"Razavi, A., Van den Oord, A., Vinyals, O.: Generating diverse high-fidelity images with VQ-VAE-2. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"issue":"6","key":"34_CR40","doi-asserted-by":"publisher","first-page":"209","DOI":"10.1145\/3355089.3356505","volume":"38","author":"S Starke","year":"2019","unstructured":"Starke, S., Zhang, H., Komura, T., Saito, J.: Neural state machine for character-scene interactions. ACM Trans. Graph. 38(6), 209\u20131 (2019)","journal-title":"ACM Trans. Graph."},{"issue":"10","key":"34_CR41","doi-asserted-by":"publisher","first-page":"1314","DOI":"10.1177\/0278364915587923","volume":"34","author":"W Takano","year":"2015","unstructured":"Takano, W., Nakamura, Y.: Statistical mutual conversion between whole body motion primitives and linguistic sentences for human motions. Int. J. Robot. Res. 34(10), 1314\u20131328 (2015)","journal-title":"Int. J. Robot. Res."},{"key":"34_CR42","doi-asserted-by":"crossref","unstructured":"Tulyakov, S., Liu, M.Y., Yang, X., Kautz, J.: MoCoGAN: decomposing motion and content for video generation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1526\u20131535 (2018)","DOI":"10.1109\/CVPR.2018.00165"},{"key":"34_CR43","unstructured":"Van Den Oord, A., Vinyals, O., et al.: Neural discrete representation learning. In: Advances in Neural Information Processing Systems 30 (2017)"},{"key":"34_CR44","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"34_CR45","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence Zitnick, C., Parikh, D.: CIDEr: consensus-based image description evaluation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4566\u20134575 (2015)","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"34_CR46","doi-asserted-by":"crossref","unstructured":"Venugopalan, S., et al.: Sequence to sequence-video to text. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 4534\u20134542 (2015)","DOI":"10.1109\/ICCV.2015.515"},{"key":"34_CR47","doi-asserted-by":"crossref","unstructured":"Vinyals, O., Toshev, A., Bengio, S., Erhan, D.: Show and tell: a neural image caption generator. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3156\u20133164 (2015)","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"34_CR48","unstructured":"Wang, J., Xu, H., Narasimhan, M., Wang, X.: Multi-person 3D motion prediction with multi-range transformers. In: Advances in Neural Information Processing Systems, vol. 34 (2021)"},{"key":"34_CR49","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"20","DOI":"10.1007\/978-3-319-46484-8_2","volume-title":"Computer Vision \u2013 ECCV 2016","author":"L Wang","year":"2016","unstructured":"Wang, L., et al.: Temporal segment networks: towards good practices for deep action recognition. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9912, pp. 20\u201336. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46484-8_2"},{"key":"34_CR50","doi-asserted-by":"crossref","unstructured":"Wang, Z., et al.: Learning diverse stochastic human-action generators by learning smooth latent transitions. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 12281\u201312288 (2020)","DOI":"10.1609\/aaai.v34i07.6911"},{"issue":"3","key":"34_CR51","doi-asserted-by":"publisher","first-page":"454","DOI":"10.1007\/s11263-017-0998-6","volume":"123","author":"C Xu","year":"2017","unstructured":"Xu, C., Govindarajan, L.N., Zhang, Y., Cheng, L.: Lie-x: depth image based articulated object pose estimation, tracking, and action recognition on lie groups. Int. J. Comput. Vis. 123(3), 454\u2013478 (2017)","journal-title":"Int. J. Comput. Vis."},{"key":"34_CR52","unstructured":"Xu, K., et al.: Show, attend and tell: neural image caption generation with visual attention. In: International Conference on Machine Learning, pp. 2048\u20132057. PMLR (2015)"},{"key":"34_CR53","doi-asserted-by":"crossref","unstructured":"Xu, T., et al.: AttnGAN: fine-grained text to image generation with attentional generative adversarial networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1316\u20131324 (2018)","DOI":"10.1109\/CVPR.2018.00143"},{"issue":"4","key":"34_CR54","doi-asserted-by":"publisher","first-page":"3441","DOI":"10.1109\/LRA.2018.2852838","volume":"3","author":"T Yamada","year":"2018","unstructured":"Yamada, T., Matsunaga, H., Ogata, T.: Paired recurrent autoencoders for bidirectional translation between robot actions and linguistic descriptions. IEEE Robot. Autom. Lett. 3(4), 3441\u20133448 (2018)","journal-title":"IEEE Robot. Autom. Lett."},{"key":"34_CR55","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1007\/978-3-030-58577-8_2","volume-title":"Computer Vision \u2013 ECCV 2020","author":"P Yu","year":"2020","unstructured":"Yu, P., Zhao, Y., Li, C., Yuan, J., Chen, C.: Structure-aware human-action generation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 18\u201334. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_2"},{"key":"34_CR56","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"346","DOI":"10.1007\/978-3-030-58545-7_20","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y Yuan","year":"2020","unstructured":"Yuan, Y., Kitani, K.: DLow: diversifying latent flows for diverse human motion prediction. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12354, pp. 346\u2013364. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58545-7_20"},{"key":"34_CR57","doi-asserted-by":"crossref","unstructured":"Zhang, H., et al.: StackGAN: text to photo-realistic image synthesis with stacked generative adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 5907\u20135915 (2017)","DOI":"10.1109\/ICCV.2017.629"},{"key":"34_CR58","unstructured":"Zhang, T., Kishore, V., Wu, F., Weinberger, K.Q., Artzi, Y.: Bertscore: evaluating text generation with BERT. arXiv preprint arXiv:1904.09675 (2019)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19833-5_34","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,9]],"date-time":"2023-01-09T15:44:57Z","timestamp":1673279097000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19833-5_34"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198328","9783031198335"],"references-count":58,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19833-5_34","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"4 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}