{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T23:36:15Z","timestamp":1767137775128,"version":"build-2238731810"},"publisher-location":"Cham","reference-count":33,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031826696","type":"print"},{"value":"9783031826702","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-82670-2_7","type":"book-chapter","created":{"date-parts":[[2025,2,6]],"date-time":"2025-02-06T04:40:11Z","timestamp":1738816811000},"page":"80-93","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Beyond Image-Text Matching: Verb Understanding in\u00a0Multimodal Transformers Using Guided Masking"],"prefix":"10.1007","author":[{"given":"Ivana","family":"Be\u0148ov\u00e1","sequence":"first","affiliation":[]},{"given":"Jana","family":"Ko\u0161eck\u00e1","sequence":"additional","affiliation":[]},{"given":"Michal","family":"Gregor","sequence":"additional","affiliation":[]},{"given":"Martin","family":"Tamajka","sequence":"additional","affiliation":[]},{"given":"Marcel","family":"Vesel\u00fd","sequence":"additional","affiliation":[]},{"given":"Mari\u00e1n","family":"\u0160imko","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,7]]},"reference":[{"key":"7_CR1","doi-asserted-by":"crossref","unstructured":"Aflalo, E., et al.: Vl-interpret: an interactive visualization tool for interpreting vision-language transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 21406\u201321415 (2022)","DOI":"10.1109\/CVPR52688.2022.02072"},{"key":"7_CR2","doi-asserted-by":"crossref","unstructured":"Bi, J., et al.: Vl-match: enhancing vision-language pretraining with token-level and instance-level matching. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2584\u20132593 (2023)","DOI":"10.1109\/ICCV51070.2023.00244"},{"key":"7_CR3","doi-asserted-by":"publisher","first-page":"978","DOI":"10.1162\/tacl_a_00408","volume":"9","author":"E Bugliarello","year":"2021","unstructured":"Bugliarello, E., Cotterell, R., Okazaki, N., Elliott, D.: Multimodal pretraining unmasked: a meta-analysis and a unified framework of vision-and-language BERTs. Trans. Assoc. Comput. Linguist. 9, 978\u2013994 (2021)","journal-title":"Trans. Assoc. Comput. Linguist."},{"key":"7_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"565","DOI":"10.1007\/978-3-030-58539-6_34","volume-title":"Computer Vision \u2013 ECCV 2020","author":"J Cao","year":"2020","unstructured":"Cao, J., Gan, Z., Cheng, Yu., Yu, L., Chen, Y.-C., Liu, J.: Behind the scene: revealing the secrets of pre-trained vision-and-language models. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12351, pp. 565\u2013580. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58539-6_34"},{"key":"7_CR5","doi-asserted-by":"crossref","unstructured":"Chefer, H., Gur, S., Wolf, L.: Generic attention-model explainability for interpreting bi-modal and encoder-decoder transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 397\u2013406 (2021)","DOI":"10.1109\/ICCV48922.2021.00045"},{"key":"7_CR6","doi-asserted-by":"crossref","unstructured":"Chen, P., Li, Q., Biaz, S., Bui, T., Nguyen, A.: gscorecam: What objects is clip looking at? In: Proceedings of the Asian Conference on Computer Vision, pp. 1959\u20131975 (2022)","DOI":"10.1007\/978-3-031-26316-3_35"},{"key":"7_CR7","doi-asserted-by":"crossref","unstructured":"Chen, Y.C., et a.: Uniter: learning universal image-text representations (2019)","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"7_CR8","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"7_CR9","doi-asserted-by":"crossref","unstructured":"Frank, S., Bugliarello, E., Elliott, D.: Vision-and-language or vision-for-language? On cross-modal influence in multimodal transformers. arXiv preprint arXiv:2109.04448 (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.775"},{"key":"7_CR10","unstructured":"Gupta, S., Malik, J.: Visual semantic role labeling. arXiv preprint arXiv:1505.04474 (2015)"},{"key":"7_CR11","doi-asserted-by":"crossref","unstructured":"Hendricks, L.A., Nematzadeh, A.: Probing image-language transformers for verb understanding. arXiv preprint arXiv:2106.09141 (2021)","DOI":"10.18653\/v1\/2021.findings-acl.318"},{"key":"7_CR12","doi-asserted-by":"crossref","unstructured":"Herzig, R., et al.: Incorporating structured representations into pretrained vision & language models using scene graphs. arXiv preprint arXiv:2305.06343 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.870"},{"key":"7_CR13","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: Blip: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International Conference on Machine Learning, pp. 12888\u201312900. PMLR (2022)"},{"key":"7_CR14","unstructured":"Li, J., Selvaraju, R., Gotmare, A., Joty, S., Xiong, C., Hoi, S.C.H.: Align before fuse: vision and language representation learning with momentum distillation. In: Advances in Neural Information Processing Systems, vol. 34, pp. 9694\u20139705 (2021)"},{"key":"7_CR15","unstructured":"Li, L.H., Yatskar, M., Yin, D., Hsieh, C.J., Chang, K.W.: VisualbERT: a simple and performant baseline for vision and language. arXiv preprint arXiv:1908.03557 (2019)"},{"key":"7_CR16","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"7_CR17","doi-asserted-by":"crossref","unstructured":"Liu, F., Emerson, G., Collier, N.: Visual spatial reasoning. arXiv preprint arXiv:2205.00363 (2022)","DOI":"10.1162\/tacl_a_00566"},{"key":"7_CR18","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning (2023)"},{"key":"7_CR19","doi-asserted-by":"crossref","unstructured":"Liu, X., Yin, D., Feng, Y., Zhao, D.: Things not written in text: exploring spatial commonsense from visual signals. arXiv preprint arXiv:2203.08075 (2022)","DOI":"10.18653\/v1\/2022.acl-long.168"},{"key":"7_CR20","unstructured":"Lu, J., Batra, D., Parikh, D., Lee, S.: VilBERT: pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"7_CR21","unstructured":"Parcalabescu, L., Gatt, A., Frank, A., Calixto, I.: Seeing past words: testing the cross-modal capabilities of pretrained v &l models on counting tasks. arXiv preprint arXiv:2012.12352 (2020)"},{"key":"7_CR22","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"7_CR23","doi-asserted-by":"crossref","unstructured":"Salin, E., Farah, B., Ayache, S., Favre, B.: Are vision-language transformers learning multimodal representations? A probing perspective. In: AAAI 2022 (2022)","DOI":"10.1609\/aaai.v36i10.21375"},{"key":"7_CR24","doi-asserted-by":"crossref","unstructured":"Sharma, P., Ding, N., Goodman, S., Soricut, R.: Conceptual captions: a cleaned, hypernymed, image alt-text dataset for automatic image captioning. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2556\u20132565 (2018)","DOI":"10.18653\/v1\/P18-1238"},{"key":"7_CR25","doi-asserted-by":"crossref","unstructured":"Shekhar, R., et al.: Foil it! Find one mismatch between image and language caption. arXiv preprint arXiv:1705.01359 (2017)","DOI":"10.18653\/v1\/P17-1024"},{"key":"7_CR26","doi-asserted-by":"crossref","unstructured":"Singh, A., et al.: Flava: a foundational language and vision alignment model. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15638\u201315650 (2022)","DOI":"10.1109\/CVPR52688.2022.01519"},{"key":"7_CR27","doi-asserted-by":"crossref","unstructured":"Tan, H., Bansal, M.: Lxmert: learning cross-modality encoder representations from transformers. arXiv preprint arXiv:1908.07490 (2019)","DOI":"10.18653\/v1\/D19-1514"},{"key":"7_CR28","doi-asserted-by":"crossref","unstructured":"Thrush, T., et al.: Winoground: probing vision and language models for visio-linguistic compositionality. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5238\u20135248 (2022)","DOI":"10.1109\/CVPR52688.2022.00517"},{"key":"7_CR29","doi-asserted-by":"crossref","unstructured":"Van\u00a0Nguyen, M., Lai, V.D., Veyseh, A.P.B., Nguyen, T.H.: Trankit: a light-weight transformer-based toolkit for multilingual natural language processing. arXiv preprint arXiv:2101.03289 (2021)","DOI":"10.18653\/v1\/2021.eacl-demos.10"},{"key":"7_CR30","doi-asserted-by":"crossref","unstructured":"Yang, Z., Kafle, K., Dernoncourt, F., Ordonez, V.: Improving visual grounding by encouraging consistent gradient-based explanations. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19165\u201319174 (2023)","DOI":"10.1109\/CVPR52729.2023.01837"},{"key":"7_CR31","unstructured":"Yarom, M., et al.: What you see is what you read? Improving text-image alignment evaluation. arXiv preprint arXiv:2305.10400 (2023)"},{"key":"7_CR32","unstructured":"Yuksekgonul, M., Bianchi, F., Kalluri, P., Jurafsky, D., Zou, J.: When and why vision-language models behave like bags-of-words, and what to do about it? arXiv e-prints, pp. arXiv\u20132210 (2022)"},{"key":"7_CR33","unstructured":"Zeng, Y., Zhang, X., Li, H.: Multi-grained vision language pre-training: aligning texts with visual concepts. arXiv preprint arXiv:2111.08276 (2021)"}],"updated-by":[{"DOI":"10.1007\/978-3-031-82670-2_26","type":"correction","label":"Correction","source":"publisher","updated":{"date-parts":[[2025,8,6]],"date-time":"2025-08-06T00:00:00Z","timestamp":1754438400000}}],"container-title":["Lecture Notes in Computer Science","SOFSEM 2025: Theory and Practice of Computer Science"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-82670-2_7","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T04:41:02Z","timestamp":1757133662000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-82670-2_7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031826696","9783031826702"],"references-count":33,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-82670-2_7","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"7 February 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"6 August 2025","order":2,"name":"change_date","label":"Change Date","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"Correction","order":3,"name":"change_type","label":"Change Type","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"A correction has been published.","order":4,"name":"change_details","label":"Change Details","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"SOFSEM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Current Trends in Theory and Practice of Computer Science","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Bratislava","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Slovakia","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 January 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24 January 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"50","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"sofsem2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.sofsem.sk","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}