{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:21:15Z","timestamp":1775578875071,"version":"3.50.1"},"publisher-location":"Cham","reference-count":77,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031198328","type":"print"},{"value":"9783031198335","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19833-5_37","type":"book-chapter","created":{"date-parts":[[2022,11,4]],"date-time":"2022-11-04T00:40:30Z","timestamp":1667522430000},"page":"634-651","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":29,"title":["FashionViL: Fashion-Focused Vision-and-Language Representation Learning"],"prefix":"10.1007","author":[{"given":"Xiao","family":"Han","sequence":"first","affiliation":[]},{"given":"Licheng","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Xiatian","family":"Zhu","sequence":"additional","affiliation":[]},{"given":"Li","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Yi-Zhe","family":"Song","sequence":"additional","affiliation":[]},{"given":"Tao","family":"Xiang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,4]]},"reference":[{"key":"37_CR1","unstructured":"Akbari, H., et al.: Vatt: transformers for multimodal self-supervised learning from raw video, audio and text. In: NeurIPS (2021)"},{"key":"37_CR2","doi-asserted-by":"crossref","unstructured":"Antol, S., et al.: VQA: Visual question answering. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"37_CR3","unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv preprint arXiv:1607.06450 (2016)"},{"key":"37_CR4","unstructured":"Bao, H., Dong, L., Piao, S., Wei, F.: Beit: bert pre-training of image transformers. In: ICLR (2022)"},{"key":"37_CR5","unstructured":"Bird, S., Klein, E., Loper, E.: Natural language processing with python: analyzing text with the natural language toolkit (2009). https:\/\/www.nltk.org"},{"key":"37_CR6","doi-asserted-by":"crossref","unstructured":"Bugliarello, E., Cotterell, R., Okazaki, N., Elliott, D.: Multimodal pretraining unmasked: a meta-analysis and a unified framework of vision-and-language berts. TACL (2021)","DOI":"10.1162\/tacl_a_00408"},{"key":"37_CR7","doi-asserted-by":"crossref","unstructured":"Chen, Y., Gong, S., Bazzani, L.: Image search with text feedback by visiolinguistic attention learning. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00307"},{"key":"37_CR8","doi-asserted-by":"crossref","unstructured":"Chen, Y.C., et al.: Uniter: universal image-text representation learning. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"37_CR9","doi-asserted-by":"crossref","unstructured":"Cho, K., et al.: Learning phrase representations using RNN encoder-decoder for statistical machine translation. In: EMNLP (2014)","DOI":"10.3115\/v1\/D14-1179"},{"key":"37_CR10","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: NAACL-HLT (2019)"},{"key":"37_CR11","unstructured":"Dong, X., et al.: M5product: a multi-modal pretraining benchmark for e-commercial product downstream tasks. arXiv preprint arXiv:2109.04275 (2021)"},{"key":"37_CR12","unstructured":"Dong, X., et al.: Peco: perceptual codebook for bert pre-training of vision transformers. arXiv preprint arXiv:2111.12710 (2021)"},{"key":"37_CR13","unstructured":"Dosovitskiy, A., et al.: An image is worth 16 x 16 words: transformers for image recognition at scale. In: ICLR (2020)"},{"key":"37_CR14","doi-asserted-by":"crossref","unstructured":"Dou, Z.Y., et al.: An empirical study of training end-to-end vision-and-language transformers. arXiv preprint arXiv:2111.02387 (2021)","DOI":"10.1109\/CVPR52688.2022.01763"},{"key":"37_CR15","doi-asserted-by":"crossref","unstructured":"Esser, P., Rombach, R., Ommer, B.: Taming transformers for high-resolution image synthesis. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01268"},{"key":"37_CR16","unstructured":"Faghri, F., Fleet, D.J., Kiros, J.R., Fidler, S.: Vse++: improving visual-semantic embeddings with hard negatives. In: BMVC (2018)"},{"key":"37_CR17","unstructured":"Fei, N., et al.: Wenlan 2.0: make AI imagine via a multimodal foundation model. arXiv preprint arXiv:2110.14378 (2021)"},{"key":"37_CR18","doi-asserted-by":"crossref","unstructured":"Gao, D., et al.: Fashionbert: text and image matching with adaptive loss for cross-modal retrieval. In: SIGIR (2020)","DOI":"10.1145\/3397271.3401430"},{"key":"37_CR19","doi-asserted-by":"crossref","unstructured":"Geigle, G., Pfeiffer, J., Reimers, N., Vuli\u0107, I., Gurevych, I.: Retrieve fast, rerank smart: cooperative and joint approaches for improved cross-modal retrieval. arXiv preprint arXiv:2103.11920 (2021)","DOI":"10.1162\/tacl_a_00473"},{"key":"37_CR20","unstructured":"Guo, X., Wu, H., Cheng, Y., Rennie, S., Tesauro, G., Feris, R.S.: Dialog-based interactive image retrieval. In: NeurIPS (2018)"},{"key":"37_CR21","doi-asserted-by":"crossref","unstructured":"Han, X., He, S., Zhang, L., Song, Y.Z., Xiang, T.: UIGR: unified interactive garment retrieval. In: CVPR workshops (2022)","DOI":"10.1109\/CVPRW56347.2022.00241"},{"key":"37_CR22","doi-asserted-by":"crossref","unstructured":"Han, X., et al.: Automatic spatially-aware fashion concept discovery. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.163"},{"key":"37_CR23","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"37_CR24","unstructured":"Hoe, J.T., Ng, K.W., Zhang, T., Chan, C.S., Song, Y.Z., Xiang, T.: One loss for all: deep hashing with a single cosine similarity based learning objective. In: NeurIPS (2021)"},{"key":"37_CR25","doi-asserted-by":"crossref","unstructured":"Hou, Y., Vig, E., Donoser, M., Bazzani, L.: Learning attribute-driven disentangled representations for interactive fashion retrieval. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01193"},{"key":"37_CR26","doi-asserted-by":"crossref","unstructured":"Hu, X., et al.: Vivo: visual vocabulary pre-training for novel object captioning. In: AAAI (2021)","DOI":"10.1609\/aaai.v35i2.16249"},{"key":"37_CR27","unstructured":"Huang, Z., Zeng, Z., Liu, B., Fu, D., Fu, J.: Pixel-bert: aligning image pixels with text by deep multi-modal transformers. arXiv preprint arXiv:2004.00849 (2020)"},{"key":"37_CR28","unstructured":"Jia, C., et al.: Scaling up visual and vision-language representation learning with noisy text supervision. In: ICML (2021)"},{"key":"37_CR29","unstructured":"Kim, W., Son, B., Kim, I.: VILT: Vision-and-language transformer without convolution or region supervision. In: ICML (2021)"},{"key":"37_CR30","doi-asserted-by":"crossref","unstructured":"Lee, S., Kim, D., Han, B.: Cosmo: Content-style modulation for image retrieval with text feedback. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00086"},{"key":"37_CR31","unstructured":"Li, J., Selvaraju, R., Gotmare, A., Joty, S., Xiong, C., Hoi, S.C.H.: Align before fuse: vision and language representation learning with momentum distillation. In: NeurIPS (2021)"},{"key":"37_CR32","unstructured":"Li, L.H., Yatskar, M., Yin, D., Hsieh, C.J., Chang, K.W.: Visualbert: a simple and performant baseline for vision and language. arXiv preprint arXiv:1908.03557 (2019)"},{"key":"37_CR33","doi-asserted-by":"crossref","unstructured":"Li, L.H., You, H., Wang, Z., Zareian, A., Chang, S.F., Chang, K.W.: Unsupervised vision-and-language pre-training without parallel images and captions. In: NAACL-HLT (2021)","DOI":"10.18653\/v1\/2021.naacl-main.420"},{"key":"37_CR34","doi-asserted-by":"crossref","unstructured":"Li, W., et al.: Unimo: towards unified-modal understanding and generation via cross-modal contrastive learning. In: ACL-IJCNLP (2021)","DOI":"10.18653\/v1\/2021.acl-long.202"},{"key":"37_CR35","doi-asserted-by":"crossref","unstructured":"Li, X., et al.: Oscar: object-semantics aligned pre-training for vision-language tasks. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"37_CR36","doi-asserted-by":"crossref","unstructured":"Liao, L., He, X., Zhao, B., Ngo, C.W., Chua, T.S.: Interpretable multimodal retrieval for fashion products. In: ACM MM (2018)","DOI":"10.1145\/3240508.3240646"},{"key":"37_CR37","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., et al.: Microsoft coco: common objects in context. In: ECCV (2014)","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"37_CR38","doi-asserted-by":"crossref","unstructured":"Lin, Y.L., Tran, S., Davis, L.S.: Fashion outfit complementary item retrieval. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00337"},{"key":"37_CR39","doi-asserted-by":"crossref","unstructured":"Liu, H., Yu, T., Li, P.: Inflate and shrink: enriching and reducing interactions for fast text-image retrieval. In: EMNLP (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.772"},{"key":"37_CR40","doi-asserted-by":"crossref","unstructured":"Liu, Z., Rodriguez-Opazo, C., Teney, D., Gould, S.: Image retrieval on real-life images with pre-trained vision-and-language models. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00213"},{"key":"37_CR41","unstructured":"Lu, J., Batra, D., Parikh, D., Lee, S.: Vilbert: pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In: NeurIPS (2019)"},{"key":"37_CR42","doi-asserted-by":"crossref","unstructured":"Ma, Y., Jia, J., Zhou, S., Fu, J., Liu, Y., Tong, Z.: Towards better understanding the clothing fashion styles: a multimodal deep learning approach. In: AAAI (2017)","DOI":"10.1609\/aaai.v31i1.10509"},{"key":"37_CR43","unstructured":"Van der Maaten, L., Hinton, G.: Visualizing data using T-SNE. JMLR (2008)"},{"key":"37_CR44","unstructured":"Oord, A.V.D., Li, Y., Vinyals, O.: Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018)"},{"key":"37_CR45","unstructured":"Paszke, A., et al.: Pytorch: an imperative style, high-performance deep learning library. In: NeurIPS (2019)"},{"key":"37_CR46","doi-asserted-by":"crossref","unstructured":"Plummer, B.A., Wang, L., Cervantes, C.M., Caicedo, J.C., Hockenmaier, J., Lazebnik, S.: Flickr30k entities: collecting region-to-phrase correspondences for richer image-to-sentence models. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.303"},{"key":"37_CR47","unstructured":"Qi, D., Su, L., Song, J., Cui, E., Bharti, T., Sacheti, A.: Imagebert: cross-modal pre-training with large-scale weak-supervised image-text data. arXiv preprint arXiv:2001.07966 (2020)"},{"key":"37_CR48","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: ICML (2021)"},{"key":"37_CR49","unstructured":"Ramesh, A., et al.: Zero-shot text-to-image generation. In: ICML (2021)"},{"key":"37_CR50","unstructured":"Rostamzadeh, N., et al.: Fashion-gen: the generative fashion dataset and challenge. arXiv preprint arXiv:1806.08317 (2018)"},{"key":"37_CR51","unstructured":"Shin, M., Cho, Y., Ko, B., Gu, G.: Rtic: Residual learning for text and image composition using graph convolutional network. arXiv preprint arXiv:2104.03015 (2021)"},{"key":"37_CR52","unstructured":"Singh, A., et al.: MMF: a multimodal framework for vision and language research (2020). https:\/\/github.com\/facebookresearch\/mmf"},{"key":"37_CR53","unstructured":"Su, W., et al.: VL-BERT: pre-training of generic visual-linguistic representations. In: ICLR (2020)"},{"key":"37_CR54","doi-asserted-by":"crossref","unstructured":"Sun, S., Chen, Y.C., Li, L., Wang, S., Fang, Y., Liu, J.: Lightningdot: pre-training visual-semantic embeddings for real-time image-text retrieval. In: NAACL-HLT (2021)","DOI":"10.18653\/v1\/2021.naacl-main.77"},{"key":"37_CR55","doi-asserted-by":"crossref","unstructured":"Tan, H., Bansal, M.: LXMERT: learning cross-modality encoder representations from transformers. In: EMNLP-IJCNLP (2019)","DOI":"10.18653\/v1\/D19-1514"},{"key":"37_CR56","doi-asserted-by":"crossref","unstructured":"Tan, R., Vasileva, M.I., Saenko, K., Plummer, B.A.: Learning similarity conditions without explicit supervision. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.01047"},{"key":"37_CR57","unstructured":"Van Den Oord, A., Vinyals, O., et al.: Neural discrete representation learning. In: NeurIPS (2017)"},{"key":"37_CR58","doi-asserted-by":"crossref","unstructured":"Vasileva, M.I., Plummer, B.A., Dusad, K., Rajpal, S., Kumar, R., Forsyth, D.: Learning type-aware embeddings for fashion compatibility. In: ECCV (2018)","DOI":"10.1007\/978-3-030-01270-0_24"},{"key":"37_CR59","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS (2017)"},{"key":"37_CR60","doi-asserted-by":"crossref","unstructured":"Vo, N., Jiang, L., Sun, C., Murphy, K., Li, L.J., Fei-Fei, L., Hays, J.: Composing text and image for image retrieval - an empirical odyssey. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00660"},{"key":"37_CR61","unstructured":"Wang, J., et al.: UFO: a unified transformer for vision-language representation learning. arXiv preprint arXiv:2111.10023 (2021)"},{"key":"37_CR62","unstructured":"Wang, W., Bao, H., Dong, L., Wei, F.: VLMO: unified vision-language pre-training with mixture-of-modality-experts. arXiv preprint arXiv:2111.02358 (2021)"},{"key":"37_CR63","doi-asserted-by":"crossref","unstructured":"Wang, Z., Wang, W., Zhu, H., Liu, M., Qin, B., Wei, F.: Distilled dual-encoder model for vision-language understanding. arXiv preprint arXiv:2112.08723 (2021)","DOI":"10.18653\/v1\/2022.emnlp-main.608"},{"key":"37_CR64","unstructured":"Wang, Z., Yu, J., Yu, A.W., Dai, Z., Tsvetkov, Y., Cao, Y.: Simvlm: simple visual language model pretraining with weak supervision. In: ICLR (2021)"},{"key":"37_CR65","doi-asserted-by":"crossref","unstructured":"Wu, Het al.: Fashion IQ: a new dataset towards retrieving images by natural language feedback. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01115"},{"key":"37_CR66","unstructured":"Wu, Y., et al.: Google\u2019s neural machine translation system: bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144 (2016)"},{"key":"37_CR67","doi-asserted-by":"crossref","unstructured":"Xu, H., et al.: E2E-VLP: End-to-end vision-language pre-training enhanced by visual learning. In: ACL-IJCNLP (2021)","DOI":"10.18653\/v1\/2021.acl-long.42"},{"key":"37_CR68","doi-asserted-by":"crossref","unstructured":"Yang, X., et al.: Fashion captioning: towards generating accurate descriptions with semantic rewards. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58601-0_1"},{"key":"37_CR69","unstructured":"You, H., et al.: Ma-clip: towards modality-agnostic contrastive language-image pre-training. OpenReview (2021)"},{"key":"37_CR70","doi-asserted-by":"crossref","unstructured":"Yu, L., Poirson, P., Yang, S., Berg, A.C., Berg, T.L.: Modeling context in referring expressions. In: ECCV (2016)","DOI":"10.1007\/978-3-319-46475-6_5"},{"key":"37_CR71","doi-asserted-by":"crossref","unstructured":"Zellers, R., Bisk, Y., Farhadi, A., Choi, Y.: From recognition to cognition: Visual commonsense reasoning. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00688"},{"key":"37_CR72","unstructured":"Zhang, L., et al.: Vldeformer: learning visual-semantic embeddings by vision-language transformer decomposing. arXiv preprint arXiv:2110.11338 (2021)"},{"key":"37_CR73","doi-asserted-by":"crossref","unstructured":"Zhang, P., et al.: VINVL: revisiting visual representations in vision-language models. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00553"},{"key":"37_CR74","unstructured":"Zhang, Z., et al: UFC-bert: unifying multi-modal controls for conditional image synthesis. In: NeurIPS (2021)"},{"key":"37_CR75","doi-asserted-by":"crossref","unstructured":"Zhou, L., Palangi, H., Zhang, L., Hu, H., Corso, J., Gao, J.: Unified vision-language pre-training for image captioning and VQA. In: AAAI (2020)","DOI":"10.1609\/aaai.v34i07.7005"},{"key":"37_CR76","doi-asserted-by":"crossref","unstructured":"Zhu, Y., et al.: Knowledge perceived multi-modal pretraining in e-commerce. In: ACM MM (2021)","DOI":"10.1145\/3474085.3475648"},{"key":"37_CR77","doi-asserted-by":"crossref","unstructured":"Zhuge, M., et al.: Kaleido-bert: vision-language pre-training on fashion domain. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01246"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19833-5_37","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,30]],"date-time":"2023-11-30T02:05:10Z","timestamp":1701309910000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19833-5_37"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198328","9783031198335"],"references-count":77,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19833-5_37","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"4 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}