{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,10]],"date-time":"2026-02-10T18:42:39Z","timestamp":1770748959368,"version":"3.50.0"},"publisher-location":"Cham","reference-count":58,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031732416","type":"print"},{"value":"9783031732423","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73242-3_14","type":"book-chapter","created":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T09:15:43Z","timestamp":1730106943000},"page":"241-259","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["VisFocus: Prompt-Guided Vision Encoders for\u00a0OCR-Free Dense Document Understanding"],"prefix":"10.1007","author":[{"given":"Ofir","family":"Abramovich","sequence":"first","affiliation":[]},{"given":"Niv","family":"Nayman","sequence":"additional","affiliation":[]},{"given":"Sharon","family":"Fogel","sequence":"additional","affiliation":[]},{"given":"Inbal","family":"Lavi","sequence":"additional","affiliation":[]},{"given":"Ron","family":"Litman","sequence":"additional","affiliation":[]},{"given":"Shahar","family":"Tsiper","sequence":"additional","affiliation":[]},{"given":"Royee","family":"Tichauer","sequence":"additional","affiliation":[]},{"given":"Srikar","family":"Appalaraju","sequence":"additional","affiliation":[]},{"given":"Shai","family":"Mazor","sequence":"additional","affiliation":[]},{"given":"R.","family":"Manmatha","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,29]]},"reference":[{"key":"14_CR1","unstructured":"Alayrac, J.B., et al.: Flamingo: a visual language model for few-shot learning (2022)"},{"key":"14_CR2","doi-asserted-by":"crossref","unstructured":"Appalaraju, S., Jasani, B., Kota, B.U., Xie, Y., Manmatha, R.: DocFormer: end-to-end transformer for document understanding. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 993\u20131003 (2021)","DOI":"10.1109\/ICCV48922.2021.00103"},{"key":"14_CR3","doi-asserted-by":"crossref","unstructured":"Appalaraju, S., Tang, P., Dong, Q., Sankaran, N., Zhou, Y., Manmatha, R.: DocFormerv2: Local features for document understanding. In: AAAI Conference on Artificial Intelligence (2024)","DOI":"10.1609\/aaai.v38i2.27828"},{"key":"14_CR4","doi-asserted-by":"crossref","unstructured":"Baechler, G., et al.: ScreenAI: a vision-language model for UI and infographics understanding. arXiv preprint arXiv:2402.04615 (2024)","DOI":"10.24963\/ijcai.2024\/339"},{"key":"14_CR5","unstructured":"Bai, J., et al.: Qwen technical report. arXiv preprint arXiv:2309.16609 (2023)"},{"key":"14_CR6","unstructured":"Bai, J., et al.: Qwen-VL: a frontier large vision-language model with versatile abilities. arXiv preprint arXiv:2308.12966 (2023)"},{"key":"14_CR7","unstructured":"Bai, J., et al.: Qwen-VL: A versatile vision-language model for understanding, localization, text reading, and beyond (2023)"},{"key":"14_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"241","DOI":"10.1007\/978-3-031-25069-9_16","volume-title":"Computer Vision - ECCV 2022 Workshops","author":"AF Biten","year":"2022","unstructured":"Biten, A.F., Tito, R., Gomez, L., Valveny, E., Karatzas, D.: OCR-IDL: OCR annotations for industry document library dataset. In: Karlinsky, L., Michaeli, T., Nishino, K. (eds.) ECCV 2022. LNCS, vol. 13804, pp. 241\u2013252. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-25069-9_16"},{"key":"14_CR9","unstructured":"Chen, X., et al.: PaLI-3 vision language models: smaller, faster, stronger (2023)"},{"key":"14_CR10","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"280","DOI":"10.1007\/978-3-031-25069-9_19","volume-title":"Computer Vision \u2013 ECCV 2022 Workshops","author":"B Davis","year":"2022","unstructured":"Davis, B., Morse, B., Price, B., Tensmeyer, C., Wigington, C., Morariu, V.: End-to-end document recognition and understanding with dessurt. In: Karlinsky, L., Michaeli, T., Nishino, K. (eds.) ECCV 2022. LNCS, vol. 13804, pp. 280\u2013296. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-25069-9_19"},{"key":"14_CR11","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"14_CR12","unstructured":"Dosovitskiy, A., et al.: An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. CoRR abs\/2010.11929 (2020). https:\/\/arxiv.org\/abs\/2010.11929"},{"key":"14_CR13","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. In: International Conference on Learning Representations (2020)"},{"key":"14_CR14","doi-asserted-by":"publisher","unstructured":"Fujinuma, Y., Varia, S., Sankaran, N., Appalaraju, S., Min, B., Vyas, Y.: A multi-modal multilingual benchmark for document image classification. In: Bouamor, H., Pino, J., Bali, K. (eds.) Findings of the Association for Computational Linguistics: EMNLP 2023, pp. 14361\u201314376. Association for Computational Linguistics, Singapore (2023). https:\/\/doi.org\/10.18653\/v1\/2023.findings-emnlp.958, https:\/\/aclanthology.org\/2023.findings-emnlp.958","DOI":"10.18653\/v1\/2023.findings-emnlp.958"},{"key":"14_CR15","doi-asserted-by":"crossref","unstructured":"Ganz, R., et al.: Question aware vision transformer for multimodal reasoning. arXiv preprint arXiv:2402.05472 (2024)","DOI":"10.1109\/CVPR52733.2024.01315"},{"key":"14_CR16","doi-asserted-by":"crossref","unstructured":"Ganz, R., Nuriel, O., Aberdam, A., Kittenplon, Y., Mazor, S., Litman, R.: Towards models that can see and read. arXiv preprint arXiv:2301.07389 (2023)","DOI":"10.1109\/ICCV51070.2023.01985"},{"key":"14_CR17","doi-asserted-by":"crossref","unstructured":"Guillaume\u00a0Jaume, Hazim Kemal\u00a0Ekenel, J.P.T.: FUNSD: a dataset for form understanding in noisy scanned documents. In: Accepted to ICDAR-OST (2019)","DOI":"10.1109\/ICDARW.2019.10029"},{"key":"14_CR18","doi-asserted-by":"crossref","unstructured":"Harley, A.W., Ufkes, A., Derpanis, K.G.: Evaluation of deep convolutional nets for document image classification and retrieval (2015)","DOI":"10.1109\/ICDAR.2015.7333910"},{"key":"14_CR19","unstructured":"Harley, A.W., Ufkes, A., Derpanis, K.G.: Evaluation of deep convolutional nets for document image classification and retrieval. In: International Conference on Document Analysis and Recognition (ICDAR)"},{"key":"14_CR20","doi-asserted-by":"crossref","unstructured":"Huang, Y., Lv, T., Cui, L., Lu, Y., Wei, F.: LayoutLMv3: pre-training for document AI with unified text and image masking. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 4083\u20134091 (2022)","DOI":"10.1145\/3503161.3548112"},{"key":"14_CR21","doi-asserted-by":"publisher","unstructured":"Hwang, W., Lee, H., Yim, J., Kim, G., Seo, M.: Cost-effective end-to-end information extraction for semi-structured document images. In: Moens, M.F., Huang, X., Specia, L., Yih, S.W.T. (eds.) Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp. 3375\u20133383. Association for Computational Linguistics, Online and Punta Cana, Dominican Republic (2021). https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.271, https:\/\/aclanthology.org\/2021.emnlp-main.271","DOI":"10.18653\/v1\/2021.emnlp-main.271"},{"key":"14_CR22","doi-asserted-by":"crossref","unstructured":"Jaume, G., Ekenel, H.K., Thiran, J.P.: FUNSD: a dataset for form understanding in noisy scanned documents (2019)","DOI":"10.1109\/ICDARW.2019.10029"},{"key":"14_CR23","doi-asserted-by":"crossref","unstructured":"Jiao, X., Yin, Y., Shang, L., Jiang, X., Chen, X., Li, L., Wang, F., Liu, Q.: TinyBERT: distilling BERT for natural language understanding (2019)","DOI":"10.18653\/v1\/2020.findings-emnlp.372"},{"key":"14_CR24","doi-asserted-by":"crossref","unstructured":"Kembhavi, A., Salvato, M., Kolve, E., Seo, M., Hajishirzi, H., Farhadi, A.: A diagram is worth a dozen images. ArXiv abs\/1603.07396 (2016). https:\/\/api.semanticscholar.org\/CorpusID:2682274","DOI":"10.1007\/978-3-319-46493-0_15"},{"key":"14_CR25","unstructured":"Kim, G., et al.: Donut: document understanding transformer without OCR. arXiv preprint arXiv:2111.156647, 15 (2021)"},{"key":"14_CR26","unstructured":"Lee, K., et al.: Pix2Struct: screenshot parsing as pretraining for visual language understanding. In: International Conference on Machine Learning, pp. 18893\u201318912. PMLR (2023)"},{"key":"14_CR27","doi-asserted-by":"publisher","unstructured":"Lewis, D., Agam, G., Argamon, S., Frieder, O., Grossman, D., Heard, J.: Building a test collection for complex document information processing. In: Proceedings of the 29th Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR 2006, pp. 665\u2013666. Association for Computing Machinery, New York (2006). https:\/\/doi.org\/10.1145\/1148170.1148307","DOI":"10.1145\/1148170.1148307"},{"key":"14_CR28","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models (2023)"},{"key":"14_CR29","doi-asserted-by":"crossref","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning (2023)","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"14_CR30","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning (2023)"},{"key":"14_CR31","doi-asserted-by":"crossref","unstructured":"Liu, Z., et\u00a0al.: Swin transformer v2: scaling up capacity and resolution. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12009\u201312019 (2022)","DOI":"10.1109\/CVPR52688.2022.01170"},{"key":"14_CR32","unstructured":"Loshchilov, I., Hutter, F.: SGDR: stochastic gradient descent with restarts. CoRR abs\/1608.03983 (2016). http:\/\/arxiv.org\/abs\/1608.03983"},{"key":"14_CR33","unstructured":"Loshchilov, I., Hutter, F.: Fixing weight decay regularization in Adam. CoRR abs\/1711.05101 (2017). http:\/\/arxiv.org\/abs\/1711.05101"},{"key":"14_CR34","doi-asserted-by":"publisher","unstructured":"Masry, A., Long, D., Tan, J.Q., Joty, S., Hoque, E.: ChartQA: a benchmark for question answering about charts with visual and logical reasoning. In: Findings of the Association for Computational Linguistics: ACL 2022, pp. 2263\u20132279. Association for Computational Linguistics, Dublin (2022). https:\/\/doi.org\/10.18653\/v1\/2022.findings-acl.177, https:\/\/aclanthology.org\/2022.findings-acl.177","DOI":"10.18653\/v1\/2022.findings-acl.177"},{"key":"14_CR35","doi-asserted-by":"crossref","unstructured":"Mathew, M., Bagal, V., Tito, R., Karatzas, D., Valveny, E., Jawahar, C.: InfographicVQA. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 1697\u20131706 (2022)","DOI":"10.1109\/WACV51458.2022.00264"},{"key":"14_CR36","doi-asserted-by":"crossref","unstructured":"Mathew, M., Karatzas, D., Jawahar, C.: DocVQA: a dataset for VQA on document images. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 2200\u20132209 (2021)","DOI":"10.1109\/WACV48630.2021.00225"},{"key":"14_CR37","doi-asserted-by":"crossref","unstructured":"Mishra, A., Shekhar, S., Singh, A.K., Chakraborty, A.: OCR-VQA: visual question answering by reading text in images. In: ICDAR (2019)","DOI":"10.1109\/ICDAR.2019.00156"},{"key":"14_CR38","doi-asserted-by":"crossref","unstructured":"Powalski, R., Borchmann, \u0141., Jurkiewicz, D., Dwojak, T., Pietruszka, M., Pa\u0142ka, G.: Going full-tilt boogie on document understanding with text-image-layout transformer. ArXiv abs\/2102.09550 (2021). https:\/\/api.semanticscholar.org\/CorpusID:231951453","DOI":"10.1007\/978-3-030-86331-9_47"},{"key":"14_CR39","doi-asserted-by":"crossref","unstructured":"Powalski, R., Borchmann, \u0141., Jurkiewicz, D., Dwojak, T., Pietruszka, M., Pa\u0142ka, G.: Going full-tilt boogie on document understanding with text-image-layout transformer (2021)","DOI":"10.1007\/978-3-030-86331-9_47"},{"key":"14_CR40","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv e-prints (2019)"},{"issue":"1","key":"14_CR41","first-page":"5485","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res. 21(1), 5485\u20135551 (2020)","journal-title":"J. Mach. Learn. Res."},{"key":"14_CR42","unstructured":"Seunghyun, P., et al.: CORD: a consolidated receipt dataset for post-OCR parsing (2019)"},{"key":"14_CR43","unstructured":"Srivastava, N., Hinton, G., Krizhevsky, A., Sutskever, I., Salakhutdinov, R.: Dropout: a simple way to prevent neural networks from overfitting. J. Mach. Learn. Res. 15(56), 1929\u20131958 (2014). http:\/\/jmlr.org\/papers\/v15\/srivastava14a.html"},{"key":"14_CR44","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"348","DOI":"10.1007\/11669487_31","volume-title":"Document Analysis Systems VII","author":"K Taghva","year":"2006","unstructured":"Taghva, K., Beckley, R., Coombs, J.: The effects of OCR error on the extraction of private information. In: Bunke, H., Spitz, A.L. (eds.) DAS 2006. LNCS, vol. 3872, pp. 348\u2013357. Springer, Heidelberg (2006). https:\/\/doi.org\/10.1007\/11669487_31"},{"key":"14_CR45","doi-asserted-by":"crossref","unstructured":"Tanaka, R., Iki, T., Nishida, K., Saito, K., Suzuki, J.: InstructDoc: a dataset for zero-shot generalization of visual document understanding with instructions (2024)","DOI":"10.1609\/aaai.v38i17.29874"},{"key":"14_CR46","doi-asserted-by":"crossref","unstructured":"Tang, Z., et al.: Unifying vision, text, and layout for universal document processing. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 19254\u201319264 (2022). https:\/\/api.semanticscholar.org\/CorpusID:254275326","DOI":"10.1109\/CVPR52729.2023.01845"},{"key":"14_CR47","unstructured":"Tay, Y., et\u00a0al.: UL2: unifying language learning paradigms. In: The Eleventh International Conference on Learning Representations (2022)"},{"key":"14_CR48","unstructured":"Touvron, H., et\u00a0al.: LLaMA: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"14_CR49","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"14_CR50","unstructured":"Wang, D., et al.: DocLLM: a layout-aware generative language model for multimodal document understanding (2023)"},{"key":"14_CR51","unstructured":"Wang, W., et\u00a0al.: VisionLLM: large language model is also an open-ended decoder for vision-centric tasks. arXiv preprint arXiv:2305.11175 (2023)"},{"key":"14_CR52","doi-asserted-by":"publisher","unstructured":"Xu, Y., et al.: LayoutLMv2: multi-modal pre-training for visually-rich document understanding. In: Zong, C., Xia, F., Li, W., Navigli, R. (eds.) Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 2579\u20132591. Association for Computational Linguistics, Online (2021). https:\/\/doi.org\/10.18653\/v1\/2021.acl-long.201, https:\/\/aclanthology.org\/2021.acl-long.201","DOI":"10.18653\/v1\/2021.acl-long.201"},{"key":"14_CR53","doi-asserted-by":"crossref","unstructured":"Xu, Y., Li, M., Cui, L., Huang, S., Wei, F., Zhou, M.: LayoutLM: pre-training of text and layout for document image understanding. In: Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pp. 1192\u20131200 (2020)","DOI":"10.1145\/3394486.3403172"},{"key":"14_CR54","unstructured":"Ye, J., et\u00a0al.: mPLUG-DocOwl: modularized multimodal large language model for document understanding. arXiv preprint arXiv:2307.02499 (2023)"},{"key":"14_CR55","doi-asserted-by":"crossref","unstructured":"Ye, J., et\u00a0al.: UReader: universal OCR-free visually-situated language understanding with multimodal large language model. In: The 2023 Conference on Empirical Methods in Natural Language Processing (2023)","DOI":"10.18653\/v1\/2023.findings-emnlp.187"},{"key":"14_CR56","unstructured":"Ye, Q., et\u00a0al.: mPLUG-Owl: modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178 (2023)"},{"key":"14_CR57","doi-asserted-by":"crossref","unstructured":"Zhai, X., Kolesnikov, A., Houlsby, N., Beyer, L.: Scaling vision transformers (2021)","DOI":"10.1109\/CVPR52688.2022.01179"},{"key":"14_CR58","unstructured":"Zhang, Y., et al.: LLaVAR: enhanced visual instruction tuning for text-rich image understanding (2023)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73242-3_14","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T09:33:03Z","timestamp":1730107983000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73242-3_14"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,29]]},"ISBN":["9783031732416","9783031732423"],"references-count":58,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73242-3_14","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,29]]},"assertion":[{"value":"29 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}