{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T08:38:14Z","timestamp":1768293494010,"version":"3.49.0"},"publisher-location":"Cham","reference-count":22,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031705519","type":"print"},{"value":"9783031705526","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-70552-6_15","type":"book-chapter","created":{"date-parts":[[2024,9,10]],"date-time":"2024-09-10T04:02:14Z","timestamp":1725940934000},"page":"248-263","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Information Extraction from\u00a0Visually Rich Documents Using Directed Weighted Graph Neural Network"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-8283-1572","authenticated-orcid":false,"given":"Hamza","family":"Gbada","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6371-9851","authenticated-orcid":false,"given":"Karim","family":"Kalti","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8181-4684","authenticated-orcid":false,"given":"Mohamed Ali","family":"Mahjoub","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,11]]},"reference":[{"key":"15_CR1","doi-asserted-by":"publisher","unstructured":"Belhadj, D., Bela\u00efd, A., Bela\u00efd, Y.: Improving information extraction from semi-structured documents using attention based semi-variational graph auto-encoder. In: Fink, G.A., Jain, R., Kise, K., Zanibbi, R. (eds.) Document Analysis and Recognition - ICDAR 2023, ICDAR 2023, LNCS, vol. 14188, pp. 113\u2013129. Springer, Cham (2023). https:\/\/doi.org\/10.1007\/978-3-031-41679-8_7","DOI":"10.1007\/978-3-031-41679-8_7"},{"key":"15_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"854","DOI":"10.1007\/978-3-030-86331-9_55","volume-title":"Document Analysis and Recognition \u2013 ICDAR 2021","author":"D Belhadj","year":"2021","unstructured":"Belhadj, D., Bela\u00efd, Y., Bela\u00efd, A.: Consideration of the word\u2019s neighborhood in GATs for information extraction in semi-structured documents. In: Llad\u00f3s, J., Lopresti, D., Uchida, S. (eds.) ICDAR 2021. LNCS, vol. 12822, pp. 854\u2013869. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86331-9_55"},{"key":"15_CR3","doi-asserted-by":"publisher","unstructured":"Chen, Y.M., Hou, X.T., Lou, D.F., Liao, Z.L., Liu, C.L.: DAMGCN: entity linking in visually rich documents with dependency-aware multimodal graph convolutional network. In: Fink, G.A., Jain, R., Kise, K., Zanibbi, R. (eds.) Document Analysis and Recognition - ICDAR 2023, ICDAR 2023, LNCS, vol. 14189, pp. 33\u201347. Springer, Cham (2023). https:\/\/doi.org\/10.1007\/978-3-031-41682-8_3","DOI":"10.1007\/978-3-031-41682-8_3"},{"key":"15_CR4","doi-asserted-by":"publisher","unstructured":"Gbada, H., Kalti, K., Mahjoub, M.: Multimodal weighted graph representation for information extraction from visually rich documents. Neurocomputing 573, 127223 (2024). https:\/\/doi.org\/10.1016\/j.neucom.2023.127223","DOI":"10.1016\/j.neucom.2023.127223"},{"key":"15_CR5","doi-asserted-by":"crossref","unstructured":"Harley, A.W., Ufkes, A., Derpanis, K.G.: Evaluation of deep convolutional nets for document image classification and retrieval. In: 2015 13th International Conference on Document Analysis and Recognition (ICDAR), pp. 991\u2013995. IEEE (2015)","DOI":"10.1109\/ICDAR.2015.7333910"},{"key":"15_CR6","doi-asserted-by":"crossref","unstructured":"Huang, Z., et al.: Icdar2019 competition on scanned receipt ocr and information extraction. In: 2019 International Conference on Document Analysis and Recognition (ICDAR), pp. 1516\u20131520. IEEE (2019)","DOI":"10.1109\/ICDAR.2019.00244"},{"key":"15_CR7","unstructured":"Huang, Z., Xu, W., Yu, K.: Bidirectional LSTM-CRF models for sequence tagging. CoRR abs\/1508.01991 (2015), http:\/\/arxiv.org\/abs\/1508.01991"},{"key":"15_CR8","doi-asserted-by":"publisher","unstructured":"Hwang, W., Yim, J., Park, S., Yang, S., Seo, M.: Spatial dependency parsing for semi-structured document information extraction. In: Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pp. 330\u2013343. Association for Computational Linguistics, August 2021. https:\/\/doi.org\/10.18653\/v1\/2021.findings-acl.28, https:\/\/aclanthology.org\/2021.findings-acl.28","DOI":"10.18653\/v1\/2021.findings-acl.28"},{"key":"15_CR9","doi-asserted-by":"crossref","unstructured":"Jaume, G., Ekenel, H.K., Thiran, J.P.: Funsd: a dataset for form understanding in noisy scanned documents. In: 2019 International Conference on Document Analysis and Recognition Workshops (ICDARW), vol.\u00a02, pp.\u00a01\u20136. IEEE (2019)","DOI":"10.1109\/ICDARW.2019.10029"},{"key":"15_CR10","unstructured":"Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. In: Bengio, Y., LeCun, Y. (eds.) 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, 7-9 May 2015, Conference Track Proceedings (2015). http:\/\/arxiv.org\/abs\/1412.6980"},{"key":"15_CR11","series-title":"Lecture Notes in Information Systems and Organisation","doi-asserted-by":"publisher","first-page":"5","DOI":"10.1007\/978-3-030-86797-3_1","volume-title":"Innovation Through Information Systems","author":"F Krieger","year":"2021","unstructured":"Krieger, F., Drews, P., Funk, B., Wobbe, T.: Information extraction from invoices: a graph neural network approach for datasets with high layout variety. In: Ahlemann, F., Sch\u00fctte, R., Stieglitz, S. (eds.) WI 2021. LNISO, vol. 47, pp. 5\u201320. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86797-3_1"},{"key":"15_CR12","doi-asserted-by":"crossref","unstructured":"Liu, X., Gao, F., Zhang, Q., Zhao, H.: Graph convolution for multimodal information extraction from visually rich documents. arXiv preprint arXiv:1903.11279 (2019)","DOI":"10.18653\/v1\/N19-2005"},{"key":"15_CR13","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"144","DOI":"10.1007\/978-3-030-21074-8_12","volume-title":"Computer Vision \u2013 ACCV 2018 Workshops","author":"D Lohani","year":"2019","unstructured":"Lohani, D., Bela\u00efd, A., Bela\u00efd, Y.: An invoice reading system using a graph convolutional network. In: Carneiro, G., You, S. (eds.) ACCV 2018. LNCS, vol. 11367, pp. 144\u2013158. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-21074-8_12"},{"key":"15_CR14","unstructured":"Park, S., et al.: Cord: a consolidated receipt dataset for post-ocr parsing. In: Workshop on Document Intelligence at NeurIPS 2019 (2019)"},{"key":"15_CR15","unstructured":"Paszke, A., et\u00a0al.: Pytorch: an imperative style, high-performance deep learning library. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"15_CR16","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"234","DOI":"10.1007\/978-3-319-24574-4_28","volume-title":"Medical Image Computing and Computer-Assisted Intervention \u2013 MICCAI 2015","author":"O Ronneberger","year":"2015","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W.M., Frangi, A.F. (eds.) MICCAI 2015. LNCS, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28"},{"key":"15_CR17","doi-asserted-by":"publisher","unstructured":"Shi, D., Liu, S., Du, J., Zhu, H.: LayoutGCN: a lightweight architecture for visually rich document understanding. In: Fink, G.A., Jain, R., Kise, K., Zanibbi, R. (eds.) Document Analysis and Recognition - ICDAR 2023, ICDAR 2023, LNCS, vol. 14189, pp. 149\u2013165. Springer, Cham (2023). https:\/\/doi.org\/10.1007\/978-3-031-41682-8_10","DOI":"10.1007\/978-3-031-41682-8_10"},{"key":"15_CR18","unstructured":"Sun, H., Kuang, Z., Yue, X., Lin, C., Zhang, W.: Spatial dual-modality graph reasoning for key information extraction. arXiv preprint arXiv:2103.14470 (2021), https:\/\/arxiv.org\/abs\/2103.14470"},{"key":"15_CR19","unstructured":"Veli\u010dkovi\u0107, P., Cucurull, G., Casanova, A., Romero, A., Li\u00f2, P., Bengio, Y.: Graph attention networks. In: International Conference on Learning Representations (2018). https:\/\/openreview.net\/forum?id=rJXMpikCZ"},{"key":"15_CR20","unstructured":"Wang, M., et\u00a0al.: Deep graph library: a graph-centric, highly-performant package for graph neural networks. arXiv preprint arXiv:1909.01315 (2019)"},{"key":"15_CR21","unstructured":"Welling, M., Kipf, T.N.: Semi-supervised classification with graph convolutional networks. In: International Conference on Learning Representations (ICLR 2017) (2016)"},{"key":"15_CR22","doi-asserted-by":"crossref","unstructured":"Xu, Y., et al.: Xfund: a benchmark dataset for multilingual visually rich form understanding. In: Findings of the Association for Computational Linguistics: ACL 2022, pp. 3214\u20133224 (2022)","DOI":"10.18653\/v1\/2022.findings-acl.253"}],"container-title":["Lecture Notes in Computer Science","Document Analysis and Recognition - ICDAR 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-70552-6_15","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,15]],"date-time":"2025-06-15T14:07:05Z","timestamp":1749996425000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-70552-6_15"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031705519","9783031705526"],"references-count":22,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-70552-6_15","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"11 September 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICDAR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Document Analysis and Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Athens","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30 August 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 September 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icdar2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icdar2024.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}