{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,17]],"date-time":"2025-09-17T06:11:36Z","timestamp":1758089496952,"version":"3.44.0"},"publisher-location":"Cham","reference-count":27,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783032046239"},{"type":"electronic","value":"9783032046246"}],"license":[{"start":{"date-parts":[[2025,9,17]],"date-time":"2025-09-17T00:00:00Z","timestamp":1758067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,17]],"date-time":"2025-09-17T00:00:00Z","timestamp":1758067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-04624-6_16","type":"book-chapter","created":{"date-parts":[[2025,9,16]],"date-time":"2025-09-16T05:34:37Z","timestamp":1758000877000},"page":"270-286","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["KIEval: Evaluation Metric for\u00a0Document Key Information Extraction"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8168-6830","authenticated-orcid":false,"given":"Minsoo","family":"Khang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-7754-1805","authenticated-orcid":false,"given":"Sang Chul","family":"Jung","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5338-0113","authenticated-orcid":false,"given":"Sungrae","family":"Park","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-8685-810X","authenticated-orcid":false,"given":"Teakgyu","family":"Hong","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,17]]},"reference":[{"key":"16_CR1","doi-asserted-by":"crossref","unstructured":"Appalaraju, S., Jasani, B., Kota, B.U., Xie, Y., Manmatha, R.: DocFormer: end-to-end transformer for document understanding. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 993\u20131003 (2021)","DOI":"10.1109\/ICCV48922.2021.00103"},{"key":"16_CR2","doi-asserted-by":"crossref","unstructured":"Biten, A.F., et al.: ICDAR 2019 competition on scene text visual question answering. In: 2019 International Conference on Document Analysis and Recognition (ICDAR), pp. 1563\u20131570. IEEE (2019)","DOI":"10.1109\/ICDAR.2019.00251"},{"key":"16_CR3","unstructured":"Chen, Z., et\u00a0al.: Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271 (2024)"},{"key":"16_CR4","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16x16 words: transformers for image recognition at scale. In: International Conference on Learning Representations (2020)"},{"key":"16_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"532","DOI":"10.1007\/978-3-030-86549-8_34","volume-title":"Document Analysis and Recognition \u2013 ICDAR 2021","author":"\u0141 Garncarek","year":"2021","unstructured":"Garncarek, \u0141, et al.: LAMBERT: layout-aware language modeling for information extraction. In: Llad\u00f3s, J., Lopresti, D., Uchida, S. (eds.) ICDAR 2021. LNCS, vol. 12821, pp. 532\u2013547. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86549-8_34"},{"key":"16_CR6","doi-asserted-by":"crossref","unstructured":"He, J., et al.: ICL-D3IE: in-context learning with diverse demonstrations updating for document information extraction. arxiv 2023. arXiv preprint arXiv:2303.05063","DOI":"10.1109\/ICCV51070.2023.01785"},{"key":"16_CR7","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"16_CR8","doi-asserted-by":"crossref","unstructured":"Hong, T., Kim, D., Ji, M., Hwang, W., Nam, D., Park, S.: BROS: a pre-trained language model focusing on text and layout for better key information extraction from documents. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a036, pp. 10767\u201310775 (2022)","DOI":"10.1609\/aaai.v36i10.21322"},{"key":"16_CR9","doi-asserted-by":"crossref","unstructured":"Huang, Y., Lv, T., Cui, L., Lu, Y., Wei, F.: LayoutLMv3: pre-training for document ai with unified text and image masking. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 4083\u20134091 (2022)","DOI":"10.1145\/3503161.3548112"},{"key":"16_CR10","doi-asserted-by":"crossref","unstructured":"Hwang, W., Yim, J., Park, S., Yang, S., Seo, M.: Spatial dependency parsing for semi-structured document information extraction. arXiv preprint arXiv:2005.00642 (2020)","DOI":"10.18653\/v1\/2021.findings-acl.28"},{"key":"16_CR11","doi-asserted-by":"crossref","unstructured":"Jaume, G., Ekenel, H.K., Thiran, J.P.: FUNSD: a dataset for form understanding in noisy scanned documents. In: 2019 International Conference on Document Analysis and Recognition Workshops (ICDARW), vol.\u00a02, pp.\u00a01\u20136. IEEE (2019)","DOI":"10.1109\/ICDARW.2019.10029"},{"key":"16_CR12","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"498","DOI":"10.1007\/978-3-031-19815-1_29","volume-title":"ECCV 2022","author":"G Kim","year":"2022","unstructured":"Kim, G., et al.: OCR-free document understanding transformer. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13688, pp. 498\u2013517. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19815-1_29"},{"key":"16_CR13","unstructured":"Lee, K., et al.: Pix2struct: screenshot parsing as pretraining for visual language understanding. In: International Conference on Machine Learning, pp. 18893\u201318912. PMLR (2023)"},{"key":"16_CR14","doi-asserted-by":"crossref","unstructured":"Li, C., et al.: StructuralLM: structural pre-training for form understanding. In: Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 6309\u20136318 (2021)","DOI":"10.18653\/v1\/2021.acl-long.493"},{"key":"16_CR15","unstructured":"OpenAI: GPT-4V(ision) system card (2023)"},{"key":"16_CR16","unstructured":"Park, S., et al.: CORD: a consolidated receipt dataset for post-OCR parsing. In: Workshop on Document Intelligence at NeurIPS 2019 (2019)"},{"key":"16_CR17","doi-asserted-by":"crossref","unstructured":"Peng, Q., et\u00a0al.: Ernie-layout: layout knowledge enhanced pre-training for visually-rich document understanding. arXiv preprint arXiv:2210.06155 (2022)","DOI":"10.18653\/v1\/2022.findings-emnlp.274"},{"key":"16_CR18","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"732","DOI":"10.1007\/978-3-030-86331-9_47","volume-title":"Document Analysis and Recognition \u2013 ICDAR 2021","author":"R Powalski","year":"2021","unstructured":"Powalski, R., Borchmann, \u0141, Jurkiewicz, D., Dwojak, T., Pietruszka, M., Pa\u0142ka, G.: Going full-TILT Boogie on document understanding with text-image-layout transformer. In: Llad\u00f3s, J., Lopresti, D., Uchida, S. (eds.) ICDAR 2021, Part II. LNCS, vol. 12822, pp. 732\u2013747. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86331-9_47"},{"key":"16_CR19","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"635","DOI":"10.1007\/978-3-030-86337-1_42","volume-title":"Document Analysis and Recognition - ICDAR 2021","author":"R Tito","year":"2021","unstructured":"Tito, R., Mathew, M., Jawahar, C.V., Valveny, E., Karatzas, D.: ICDAR 2021 competition on document visual question answering. In: Llad\u00f3s, J., Lopresti, D., Uchida, S. (eds.) ICDAR 2021, Part IV. LNCS, vol. 12824, pp. 635\u2013649. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86337-1_42"},{"key":"16_CR20","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural Inf. Process. systems 30 (2017)"},{"key":"16_CR21","doi-asserted-by":"crossref","unstructured":"Wang, J., Jin, L., Ding, K.: LiLT: a simple yet effective language-independent layout transformer for structured document understanding. In: Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 7747\u20137757 (2022)","DOI":"10.18653\/v1\/2022.acl-long.534"},{"key":"16_CR22","unstructured":"Wang, P., et al.: Qwen2-VL: enhancing vision-language model\u2019s perception of the world at any resolution. arXiv preprint arXiv:2409.12191 (2024)"},{"key":"16_CR23","doi-asserted-by":"crossref","unstructured":"Xu, Y., et\u00a0al.: LayoutLMv2: multi-modal pre-training for visually-rich document understanding. arXiv preprint arXiv:2012.14740 (2020)","DOI":"10.18653\/v1\/2021.acl-long.201"},{"key":"16_CR24","doi-asserted-by":"crossref","unstructured":"Xu, Y., Li, M., Cui, L., Huang, S., Wei, F., Zhou, M.: LayoutLM: pre-training of text and layout for document image understanding. In: Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pp. 1192\u20131200 (2020)","DOI":"10.1145\/3394486.3403172"},{"key":"16_CR25","doi-asserted-by":"crossref","unstructured":"Xu, Y., et al.: XFUND: a benchmark dataset for multilingual visually rich form understanding. In: Findings of the Association for Computational Linguistics: ACL 2022, pp. 3214\u20133224 (2022)","DOI":"10.18653\/v1\/2022.findings-acl.253"},{"key":"16_CR26","doi-asserted-by":"crossref","unstructured":"Yu, W., et\u00a0al.: ICDAR 2023 competition on structured text extraction from visually-rich document images. arXiv preprint arXiv:2306.03287 (2023)","DOI":"10.1007\/978-3-031-41679-8_32"},{"key":"16_CR27","unstructured":"Zhang, J., You, Z., Wang, J., Le, X.: SAIL: sample-centric in-context learning for document information extraction. arXiv preprint arXiv:2412.17092 (2024)"}],"container-title":["Lecture Notes in Computer Science","Document Analysis and Recognition \u2013 ICDAR 2025"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-04624-6_16","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,16]],"date-time":"2025-09-16T05:34:47Z","timestamp":1758000887000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-04624-6_16"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,17]]},"ISBN":["9783032046239","9783032046246"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-04624-6_16","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2025,9,17]]},"assertion":[{"value":"17 September 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICDAR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Document Analysis and Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Wuhan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16 September 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 September 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icdar2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/iapr.org\/icdar2025","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}