{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:29:06Z","timestamp":1777656546003,"version":"3.51.4"},"publisher-location":"Cham","reference-count":67,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031198328","type":"print"},{"value":"9783031198335","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19833-5_38","type":"book-chapter","created":{"date-parts":[[2022,11,4]],"date-time":"2022-11-04T00:40:30Z","timestamp":1667522430000},"page":"652-670","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":14,"title":["Weakly Supervised Grounding for VQA in Vision-Language Transformers"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6521-2512","authenticated-orcid":false,"given":"Aisha Urooj","family":"Khan","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1079-4441","authenticated-orcid":false,"given":"Hilde","family":"Kuehne","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4031-5886","authenticated-orcid":false,"given":"Chuang","family":"Gan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5354-2805","authenticated-orcid":false,"given":"Niels Da Vitoria","family":"Lobo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6172-5572","authenticated-orcid":false,"given":"Mubarak","family":"Shah","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,4]]},"reference":[{"key":"38_CR1","unstructured":"Abacha, A.B., Hasan, S.A., Datla, V.V., Liu, J., Demner-Fushman, D., M\u00fcller, H.: VQA-med: overview of the medical visual question answering task at imageclef 2019. (2019)"},{"key":"38_CR2","doi-asserted-by":"crossref","unstructured":"Antol, S., et al.: VQA: visual question answering. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2425\u20132433 (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"38_CR3","doi-asserted-by":"crossref","unstructured":"Arbelle, A., et al.: Detector-free weakly supervised grounding by separation. arXiv preprint arXiv:2104.09829 (2021)","DOI":"10.1109\/ICCV48922.2021.00182"},{"key":"38_CR4","doi-asserted-by":"crossref","unstructured":"Caron, M., et al.: Emerging properties in self-supervised vision transformers (2021)","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"38_CR5","doi-asserted-by":"crossref","unstructured":"Chen, K., Gao, J., Nevatia, R.: Knowledge aided consistency for weakly supervised phrase grounding. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4042\u20134050 (2018)","DOI":"10.1109\/CVPR.2018.00425"},{"key":"38_CR6","doi-asserted-by":"crossref","unstructured":"Chen, Y.C., et al.: Uniter: learning universal image-text representations (2019)","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"38_CR7","doi-asserted-by":"crossref","unstructured":"Chen, Z., Ma, L., Luo, W., Wong, K.Y.K.: Weakly-supervised spatio-temporally grounding natural sentence in video. arXiv preprint arXiv:1906.02549 (2019)","DOI":"10.18653\/v1\/P19-1183"},{"key":"38_CR8","doi-asserted-by":"crossref","unstructured":"Das, A., Agrawal, H., Zitnick, C.L., Parikh, D., Batra, D.: Human Attention in Visual Question Answering: Do Humans and Deep Networks Look at the Same Regions? In: Conference on Empirical Methods in Natural Language Processing (EMNLP) (2016)","DOI":"10.18653\/v1\/D16-1092"},{"key":"38_CR9","doi-asserted-by":"crossref","unstructured":"Datta, S., Sikka, K., Roy, A., Ahuja, K., Parikh, D., Divakaran, A.: Align2ground: weakly supervised phrase grounding guided by image-caption alignment. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2601\u20132610 (2019)","DOI":"10.1109\/ICCV.2019.00269"},{"key":"38_CR10","doi-asserted-by":"crossref","unstructured":"Desai, K., Johnson, J.: VirTex: learning visual representations from textual annotations. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01101"},{"key":"38_CR11","unstructured":"Duan, S., Cao, J., Zhao, H.: Capsule-transformer for neural machine translation. arXiv preprint arXiv:2004.14649 (2020)"},{"key":"38_CR12","unstructured":"Duarte, K., Rawat, Y., Shah, M.: Videocapsulenet: a simplified network for action detection. In: Advances in Neural Information Processing Systems, pp. 7610\u20137619 (2018)"},{"key":"38_CR13","doi-asserted-by":"crossref","unstructured":"Duarte, K., Rawat, Y.S., Shah, M.: Capsulevos: semi-supervised video object segmentation using capsule routing. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 8480\u20138489 (2019)","DOI":"10.1109\/ICCV.2019.00857"},{"key":"38_CR14","doi-asserted-by":"crossref","unstructured":"Goyal, Y., Khot, T., Summers-Stay, D., Batra, D., Parikh, D.: Making the V in VQA matter: elevating the role of image understanding in visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6904\u20136913 (2017)","DOI":"10.1109\/CVPR.2017.670"},{"key":"38_CR15","doi-asserted-by":"publisher","unstructured":"Gu, S., Feng, Y.: Improving multi-head attention with capsule networks. In: Tang, J., Kan, MY., Zhao, D., Li, S., Zan, H. (eds.) NLPCC 2019. LNCS (LNAI), vol. 11838, pp. 314\u2013326. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-32233-5_25","DOI":"10.1007\/978-3-030-32233-5_25"},{"key":"38_CR16","doi-asserted-by":"crossref","unstructured":"Gurari, D., et al.: Vizwiz grand challenge: answering visual questions from blind people. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3608\u20133617 (2018)","DOI":"10.1109\/CVPR.2018.00380"},{"key":"38_CR17","doi-asserted-by":"crossref","unstructured":"Hinton, G.: How to represent part-whole hierarchies in a neural network. arXiv preprint arXiv:2102.12627 (2021)","DOI":"10.1162\/neco_a_01557"},{"key":"38_CR18","doi-asserted-by":"publisher","unstructured":"Hinton, G.E., Krizhevsky, A., Wang, S.D.: Transforming auto-encoders. In: Honkela, T., Duch, W., Girolami, M., Kaski, S. (eds.) ICANN 2011. LNCS, vol. 6791, pp. 44\u201351. Springer, Heidelberg (2011). https:\/\/doi.org\/10.1007\/978-3-642-21735-7_6","DOI":"10.1007\/978-3-642-21735-7_6"},{"key":"38_CR19","unstructured":"Hinton, G.E., Sabour, S., Frosst, N.: Matrix capsules with EM routing. In: International Conference on Learning Representations (2018)"},{"key":"38_CR20","doi-asserted-by":"crossref","unstructured":"Huang, D.A., Buch, S., Dery, L., Garg, A., Fei-Fei, L., Niebles, J.C.: Finding\" it\": weakly-supervised reference-aware visual grounding in instructional videos. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5948\u20135957 (2018)","DOI":"10.1109\/CVPR.2018.00623"},{"key":"38_CR21","doi-asserted-by":"crossref","unstructured":"Huang, Z., Zeng, Z., Huang, Y., Liu, B., Fu, D., Fu, J.: Seeing out of the box: End-to-end pre-training for vision-language representation learning. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)","DOI":"10.1109\/CVPR46437.2021.01278"},{"key":"38_CR22","unstructured":"Huang, Z., Zeng, Z., Liu, B., Fu, D., Fu, J.: Pixel-bert: aligning image pixels with text by deep multi-modal transformers. CoRR abs\/2004.00849 (2020). https:\/\/arxiv.org\/abs\/2004.00849"},{"key":"38_CR23","unstructured":"Hudson, D.A., Manning, C.D.: Compositional attention networks for machine reasoning. In: International Conference on Learning Representations (ICLR) (2018)"},{"key":"38_CR24","doi-asserted-by":"crossref","unstructured":"Hudson, D.A., Manning, C.D.: GQA: a new dataset for real-world visual reasoning and compositional question answering. Conference on Computer Vision and Pattern Recognition (CVPR) (2019)","DOI":"10.1109\/CVPR.2019.00686"},{"key":"38_CR25","doi-asserted-by":"crossref","unstructured":"Khan, A.U., Kuehne, H., Duarte, K., Gan, C., Lobo, N., Shah, M.: Found a reason for me? weakly-supervised grounded visual question answering using capsules (2021)","DOI":"10.1109\/CVPR46437.2021.00836"},{"key":"38_CR26","doi-asserted-by":"crossref","unstructured":"Khan, S., Naseer, M., Hayat, M., Zamir, S.W., Khan, F.S., Shah, M.: Transformers in vision: a survey. arXiv preprint arXiv:2101.01169 (2021)","DOI":"10.1145\/3505244"},{"key":"38_CR27","unstructured":"Kim, W., Son, B., Kim, I.: VILT: vision-and-language transformer without convolution or region supervision. In: Meila, M., Zhang, T. (eds.) Proceedings of the 38th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 139, pp. 5583\u20135594. PMLR (2021). https:\/\/proceedings.mlr.press\/v139\/kim21k.html"},{"key":"38_CR28","unstructured":"Kim, W., Son, B., Kim, I.: VILT: vision-and-language transformer without convolution or region supervision. In: Meila, M., Zhang, T. (eds.) Proceedings of the 38th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 139, pp. 5583\u20135594. PMLR (2021). https:\/\/proceedings.mlr.press\/v139\/kim21k.html"},{"key":"38_CR29","doi-asserted-by":"crossref","unstructured":"Krishna, R., et al.: Visual genome: connecting language and vision using crowdsourced dense image annotations. Int. J. Comput. Vision 123(1), 32\u201373 (2017)","DOI":"10.1007\/s11263-016-0981-7"},{"key":"38_CR30","unstructured":"LaLonde, R., Bagci, U.: Capsules for object segmentation. arXiv preprint arXiv:1804.04241 (2018)"},{"key":"38_CR31","doi-asserted-by":"crossref","unstructured":"Li, G., Duan, N., Fang, Y., Gong, M., Jiang, D.: Unicoder-vl: a universal encoder for vision and language by cross-modal pre-training. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 11336\u201311344 (2020)","DOI":"10.1609\/aaai.v34i07.6795"},{"key":"38_CR32","unstructured":"Li, J., Selvaraju, R.R., Gotmare, A.D., Joty, S., Xiong, C., Hoi, S.: Align before fuse: vision and language representation learning with momentum distillation. In: NeurIPS (2021)"},{"key":"38_CR33","unstructured":"Li, L.H., Yatskar, M., Yin, D., Hsieh, C.J., Chang, K.W.: Visualbert: a simple and performant baseline for vision and language. arXiv preprint arXiv:1908.03557 (2019)"},{"key":"38_CR34","doi-asserted-by":"publisher","unstructured":"Li, X., et al.: Oscar: Object-semantics aligned pre-training for vision-language tasks. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 121\u2013137. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_8","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"38_CR35","doi-asserted-by":"publisher","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"38_CR36","unstructured":"Liu, J., et al.: Transformer-based capsule network for stock movement prediction. In: Proceedings of the First Workshop on Financial Technology and Natural Language Processing, pp. 66\u201373 (2019)"},{"key":"38_CR37","doi-asserted-by":"crossref","unstructured":"Liu, Y., Wan, B., Ma, L., He, X.: Relation-aware instance refinement for weakly supervised visual grounding. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5612\u20135621 (2021)","DOI":"10.1109\/CVPR46437.2021.00556"},{"key":"38_CR38","unstructured":"Lu, J., Batra, D., Parikh, D., Lee, S.: Vilbert: pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. arXiv preprint arXiv:1908.02265 (2019)"},{"key":"38_CR39","doi-asserted-by":"crossref","unstructured":"Lu, J., Goswami, V., Rohrbach, M., Parikh, D., Lee, S.: 12-in-1: multi-task vision and language representation learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10437\u201310446 (2020)","DOI":"10.1109\/CVPR42600.2020.01045"},{"key":"38_CR40","unstructured":"Lu, J., Yang, J., Batra, D., Parikh, D.: Hierarchical question-image co-attention for visual question answering. Adv. Neural Inf. Process. Syst. 29 (2016)"},{"key":"38_CR41","doi-asserted-by":"crossref","unstructured":"Mazzia, V., Salvetti, F., Chiaberge, M.: Efficient-capsnet: capsule network with self-attention routing. arXiv preprint arXiv:2101.12491 (2021)","DOI":"10.1038\/s41598-021-93977-0"},{"key":"38_CR42","doi-asserted-by":"crossref","unstructured":"Miech, A., Alayrac, J.B., Laptev, I., Sivic, J., Zisserman, A.: Thinking fast and slow: efficient text-to-visual retrieval with transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9826\u20139836 (2021)","DOI":"10.1109\/CVPR46437.2021.00970"},{"key":"38_CR43","unstructured":"Mobiny, A., Cicalese, P.A., Nguyen, H.V.: Trans-caps: transformer capsule networks with self-attention routing (2021). https:\/\/openreview.net\/forum?id=BUPIRa1D2J"},{"key":"38_CR44","doi-asserted-by":"crossref","unstructured":"Niu, Y., Tang, K., Zhang, H., Lu, Z., Hua, X.S., Wen, J.R.: Counterfactual VQA: a cause-effect look at language bias. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12700\u201312710 (2021)","DOI":"10.1109\/CVPR46437.2021.01251"},{"key":"38_CR45","doi-asserted-by":"crossref","unstructured":"Pfeiffer, J., et al.: XGQA: cross-lingual visual question answering. arXiv preprint arXiv:2109.06082 (2021)","DOI":"10.18653\/v1\/2022.findings-acl.196"},{"key":"38_CR46","doi-asserted-by":"crossref","unstructured":"Pucci, R., Micheloni, C., Martinel, N.: Self-attention agreement among capsules. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 272\u2013280 (2021)","DOI":"10.1109\/ICCVW54120.2021.00035"},{"key":"38_CR47","doi-asserted-by":"crossref","unstructured":"Qiao, T., Dong, J., Xu, D.: Exploring human-like attention supervision in visual question answering. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 32 (2018)","DOI":"10.1609\/aaai.v32i1.12272"},{"key":"38_CR48","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision (2021)"},{"key":"38_CR49","unstructured":"Ramakrishnan, S., Agrawal, A., Lee, S.: Overcoming language priors in visual question answering with adversarial regularization. arXiv preprint arXiv:1810.03649 (2018)"},{"key":"38_CR50","unstructured":"Ribeiro, F.D.S., Duarte, K., Everett, M., Leontidis, G., Shah, M.: Learning with capsules: a survey. arXiv preprint arXiv:2206.02664 (2022)"},{"key":"38_CR51","doi-asserted-by":"crossref","unstructured":"Riquelme, F., De Goyeneche, A., Zhang, Y., Niebles, J.C., Soto, A.: Explaining VQA predictions using visual grounding and a knowledge base. Image Vision Comput. 101, 103968 (2020)","DOI":"10.1016\/j.imavis.2020.103968"},{"key":"38_CR52","unstructured":"Sabour, S., Frosst, N., Hinton, G.E.: Dynamic routing between capsules. In: NIPS (2017)"},{"key":"38_CR53","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., et al.: Taking a hint: leveraging explanations to make vision and language models more grounded. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2591\u20132600 (2019)","DOI":"10.1109\/ICCV.2019.00268"},{"key":"38_CR54","doi-asserted-by":"crossref","unstructured":"Shi, J., Xu, J., Gong, B., Xu, C.: Not all frames are equal: weakly-supervised video grounding with contextual similarity and visual clustering losses. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10444\u201310452 (2019)","DOI":"10.1109\/CVPR.2019.01069"},{"key":"38_CR55","unstructured":"Su, W., et al.: Vl-bert: pre-training of generic visual-linguistic representations. arXiv preprint arXiv:1908.08530 (2019)"},{"key":"38_CR56","doi-asserted-by":"crossref","unstructured":"Tan, H., Bansal, M.: Lxmert: learning cross-modality encoder representations from transformers. In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing (2019)","DOI":"10.18653\/v1\/D19-1514"},{"key":"38_CR57","doi-asserted-by":"crossref","unstructured":"Wang, L., Huang, J., Li, Y., Xu, K., Yang, Z., Yu, D.: Improving weakly supervised visual grounding by contrastive knowledge distillation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14090\u201314100 (2021)","DOI":"10.1109\/CVPR46437.2021.01387"},{"key":"38_CR58","doi-asserted-by":"crossref","unstructured":"Whitehead, S., Wu, H., Ji, H., Feris, R., Saenko, K.: Separating skills and concepts for novel visual question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5632\u20135641 (2021)","DOI":"10.1109\/CVPR46437.2021.00558"},{"key":"38_CR59","unstructured":"Wu, L., Liu, X., Liu, Q.: Centroid transformers: learning to abstract with attention. arXiv preprint arXiv:2102.08606 (2021)"},{"key":"38_CR60","doi-asserted-by":"crossref","unstructured":"Xiao, F., Sigal, L., Jae Lee, Y.: Weakly-supervised visual grounding of phrases with linguistic structures. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5945\u20135954 (2017)","DOI":"10.1109\/CVPR.2017.558"},{"key":"38_CR61","doi-asserted-by":"crossref","unstructured":"Yang, X., Liu, X., Jian, M., Gao, X., Wang, M.: Weakly-supervised video object grounding by exploring spatio-temporal contexts. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 1939\u20131947 (2020)","DOI":"10.1145\/3394171.3413610"},{"key":"38_CR62","doi-asserted-by":"crossref","unstructured":"Yang, Z., He, X., Gao, J., Deng, L., Smola, A.: Stacked attention networks for image question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 21\u201329 (2016)","DOI":"10.1109\/CVPR.2016.10"},{"issue":"CSCW2","key":"38_CR63","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3415220","volume":"4","author":"X Zeng","year":"2020","unstructured":"Zeng, X., Wang, Y., Chiu, T.Y., Bhattacharya, N., Gurari, D.: Vision skills needed to answer visual questions. Proc. ACM Hum. Comput. Interact. 4(CSCW2), 1\u201331 (2020)","journal-title":"Proc. ACM Hum. Comput. Interact."},{"key":"38_CR64","doi-asserted-by":"crossref","unstructured":"Zhan, L.M., Liu, B., Fan, L., Chen, J., Wu, X.M.: Medical visual question answering via conditional reasoning. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 2345\u20132354 (2020)","DOI":"10.1145\/3394171.3413761"},{"issue":"10","key":"38_CR65","doi-asserted-by":"publisher","first-page":"1084","DOI":"10.1007\/s11263-017-1059-x","volume":"126","author":"J Zhang","year":"2018","unstructured":"Zhang, J., Bargal, S.A., Lin, Z., Brandt, J., Shen, X., Sclaroff, S.: Top-down neural attention by excitation backprop. Int. J. Comput. Vision 126(10), 1084\u20131102 (2018)","journal-title":"Int. J. Comput. Vision"},{"key":"38_CR66","doi-asserted-by":"crossref","unstructured":"Zhang, S., Qu, L., You, S., Yang, Z., Zhang, J.: Automatic generation of grounded visual questions. arXiv preprint arXiv:1612.06530 (2016)","DOI":"10.24963\/ijcai.2017\/592"},{"key":"38_CR67","doi-asserted-by":"publisher","unstructured":"Zhang, Y., Niebles, J.C., Soto, A.: Interpretable visual question answering by visual grounding from attention supervision mining. In: 2019 IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 349\u2013357 (2019). https:\/\/doi.org\/10.1109\/WACV.2019.00043","DOI":"10.1109\/WACV.2019.00043"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19833-5_38","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,7]],"date-time":"2024-10-07T06:26:47Z","timestamp":1728282407000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19833-5_38"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198328","9783031198335"],"references-count":67,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19833-5_38","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"4 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}