{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,27]],"date-time":"2026-02-27T15:25:24Z","timestamp":1772205924139,"version":"3.50.1"},"publisher-location":"Cham","reference-count":22,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031416781","type":"print"},{"value":"9783031416798","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-41679-8_32","type":"book-chapter","created":{"date-parts":[[2023,8,18]],"date-time":"2023-08-18T07:02:59Z","timestamp":1692342179000},"page":"536-552","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":11,"title":["ICDAR 2023 Competition on\u00a0Structured Text Extraction from\u00a0Visually-Rich Document Images"],"prefix":"10.1007","author":[{"given":"Wenwen","family":"Yu","sequence":"first","affiliation":[]},{"given":"Chengquan","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Haoyu","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Wei","family":"Hua","sequence":"additional","affiliation":[]},{"given":"Bohan","family":"Li","sequence":"additional","affiliation":[]},{"given":"Huang","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Mingyu","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Mingrui","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Jianfeng","family":"Kuang","sequence":"additional","affiliation":[]},{"given":"Mengjun","family":"Cheng","sequence":"additional","affiliation":[]},{"given":"Yuning","family":"Du","sequence":"additional","affiliation":[]},{"given":"Shikun","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Xiaoguang","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Pengyuan","family":"Lyu","sequence":"additional","affiliation":[]},{"given":"Kun","family":"Yao","sequence":"additional","affiliation":[]},{"given":"Yuechen","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Yuliang","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Wanxiang","family":"Che","sequence":"additional","affiliation":[]},{"given":"Errui","family":"Ding","sequence":"additional","affiliation":[]},{"given":"Cheng-Lin","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Jiebo","family":"Luo","sequence":"additional","affiliation":[]},{"given":"Shuicheng","family":"Yan","sequence":"additional","affiliation":[]},{"given":"Min","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Dimosthenis","family":"Karatzas","sequence":"additional","affiliation":[]},{"given":"Xing","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Jingdong","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xiang","family":"Bai","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,8,19]]},"reference":[{"key":"32_CR1","doi-asserted-by":"crossref","unstructured":"Appalaraju, S., Jasani, B., Kota, B.U., Xie, Y., Manmatha, R.: Docformer: end-to-end transformer for document understanding. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 993\u20131003 (2021)","DOI":"10.1109\/ICCV48922.2021.00103"},{"key":"32_CR2","doi-asserted-by":"crossref","unstructured":"Cao, H., et al.: Query-driven generative network for document information extraction in the wild. In: Proceedings of the 30th ACM International Conference on Multimedia (2022)","DOI":"10.1145\/3503161.3547877"},{"key":"32_CR3","doi-asserted-by":"crossref","unstructured":"Cao, H., et al.: GMN: generative multi-modal network for practical document information extraction. In: North American Chapter of the Association for Computational Linguistics (2022)","DOI":"10.18653\/v1\/2022.naacl-main.276"},{"key":"32_CR4","doi-asserted-by":"crossref","unstructured":"Fang, S., Xie, H., Wang, Y., Mao, Z., Zhang, Y.: Read like humans: autonomous, bidirectional and iterative language modeling for scene text recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7098\u20137107 (2021)","DOI":"10.1109\/CVPR46437.2021.00702"},{"key":"32_CR5","doi-asserted-by":"crossref","unstructured":"Guo, H., Qin, X., Liu, J., Han, J., Liu, J., Ding, E.: Eaten: entity-aware attention for single shot visual text extraction. In: 2019 International Conference on Document Analysis and Recognition (ICDAR), pp. 254\u2013259. IEEE (2019)","DOI":"10.1109\/ICDAR.2019.00049"},{"key":"32_CR6","doi-asserted-by":"crossref","unstructured":"Huang, Z., et al.: ICDAR 2019 competition on scanned receipt OCR and information extraction. In: 2019 International Conference on Document Analysis and Recognition (ICDAR), pp. 1516\u20131520. IEEE (2019)","DOI":"10.1109\/ICDAR.2019.00244"},{"key":"32_CR7","doi-asserted-by":"crossref","unstructured":"Jaume, G., Ekenel, H.K., Thiran, J.P.: FUNSD: a dataset for form understanding in noisy scanned documents. In: ICDARW, vol. 2, pp. 1\u20136 (2019)","DOI":"10.1109\/ICDARW.2019.10029"},{"key":"32_CR8","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"498","DOI":"10.1007\/978-3-031-19815-1_29","volume-title":"ECCV 2022","author":"G Kim","year":"2022","unstructured":"Kim, G., et al.: OCR-free document understanding transformer. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13688, pp. 498\u2013517. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19815-1_29"},{"key":"32_CR9","doi-asserted-by":"crossref","unstructured":"Kuang, J., et al.: Visual information extraction in the wild: practical dataset and end-to-end solution. In: ICDAR (2023)","DOI":"10.1007\/978-3-031-41731-3_3"},{"key":"32_CR10","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: Structext: structured text understanding with multi-modal transformers. In: Proceedings of the 29th ACM International Conference on Multimedia, pp. 1912\u20131920 (2021)","DOI":"10.1145\/3474085.3475345"},{"key":"32_CR11","doi-asserted-by":"crossref","unstructured":"Liao, M., Wan, Z., Yao, C., Chen, K., Bai, X.: Real-time scene text detection with differentiable binarization. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 11474\u201311481 (2020)","DOI":"10.1609\/aaai.v34i07.6812"},{"key":"32_CR12","unstructured":"Park, S., et al.: Cord: a consolidated receipt dataset for post-OCR parsing. In: Workshop on Document Intelligence at NeurIPS 2019 (2019)"},{"issue":"11","key":"32_CR13","doi-asserted-by":"publisher","first-page":"2298","DOI":"10.1109\/TPAMI.2016.2646371","volume":"39","author":"B Shi","year":"2016","unstructured":"Shi, B., Bai, X., Yao, C.: An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition. IEEE Trans. Pattern Anal. Mach. Intell. 39(11), 2298\u20132304 (2016)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"32_CR14","doi-asserted-by":"crossref","unstructured":"Wang, J., et al.: Towards robust visual information extraction in real world: new dataset and novel solution. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, pp. 2738\u20132745 (2021)","DOI":"10.1609\/aaai.v35i4.16378"},{"key":"32_CR15","doi-asserted-by":"crossref","unstructured":"Xu, Y., Li, M., Cui, L., Huang, S., Wei, F., Zhou, M.: Layoutlm: -training of text and layout for document image understanding. In: Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pp. 1192\u20131200 (2020)","DOI":"10.1145\/3394486.3403172"},{"key":"32_CR16","doi-asserted-by":"crossref","unstructured":"Xu, Y., et al.: XFUND: a benchmark dataset for multilingual visually rich form understanding. In: Findings of the Association for Computational Linguistics: ACL 2022, pp. 3214\u20133224 (2022)","DOI":"10.18653\/v1\/2022.findings-acl.253"},{"key":"32_CR17","doi-asserted-by":"crossref","unstructured":"Yu, D., et al.: Towards accurate scene text recognition with semantic reasoning networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12113\u201312122 (2020)","DOI":"10.1109\/CVPR42600.2020.01213"},{"key":"32_CR18","doi-asserted-by":"crossref","unstructured":"Yu, W., Lu, N., Qi, X., Gong, P., Xiao, R.: Pick: processing key information extraction from documents using improved graph learning-convolutional networks. In: 2020 25th International Conference on Pattern Recognition (ICPR), pp. 4363\u20134370. IEEE (2021)","DOI":"10.1109\/ICPR48806.2021.9412927"},{"key":"32_CR19","doi-asserted-by":"crossref","unstructured":"Zhang, C., et al.: Look more than once: An accurate detector for text of arbitrary shapes. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10552\u201310561 (2019)","DOI":"10.1109\/CVPR.2019.01080"},{"key":"32_CR20","doi-asserted-by":"crossref","unstructured":"Zhang, P., et al.: Trie: end-to-end text reading and information extraction for document understanding. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 1413\u20131422 (2020)","DOI":"10.1145\/3394171.3413900"},{"key":"32_CR21","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Ma, J., Du, J., Wang, L., Zhang, J.: Multimodal pre-training based on graph attention network for document understanding. IEEE Trans. Multimedia (2022)","DOI":"10.1109\/TMM.2022.3214102"},{"key":"32_CR22","doi-asserted-by":"crossref","unstructured":"Zhou, X., et al.: East: an efficient and accurate scene text detector. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5551\u20135560 (2017)","DOI":"10.1109\/CVPR.2017.283"}],"container-title":["Lecture Notes in Computer Science","Document Analysis and Recognition - ICDAR 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-41679-8_32","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,12,19]],"date-time":"2023-12-19T07:13:20Z","timestamp":1702970000000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-41679-8_32"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031416781","9783031416798"],"references-count":22,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-41679-8_32","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"19 August 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICDAR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Document Analysis and Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"San Jos\u00e9, CA","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"USA","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 August 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 August 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icdar2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icdar2023.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easychair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"316","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"154","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"49% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.89","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1.50","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Number and type of other papers accepted : IJDAR track papers","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}