{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T07:11:25Z","timestamp":1767337885656,"version":"3.40.3"},"publisher-location":"Cham","reference-count":45,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031704413"},{"type":"electronic","value":"9783031704420"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-70442-0_5","type":"book-chapter","created":{"date-parts":[[2024,9,10]],"date-time":"2024-09-10T08:09:40Z","timestamp":1725955780000},"page":"71-88","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Instruction Makes a\u00a0Difference"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5582-2031","authenticated-orcid":false,"given":"Tosin","family":"Adewumi","sequence":"first","affiliation":[]},{"given":"Nudrat","family":"Habib","sequence":"additional","affiliation":[]},{"given":"Lama","family":"Alkhaled","sequence":"additional","affiliation":[]},{"given":"Elisa","family":"Barney","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,11]]},"reference":[{"doi-asserted-by":"publisher","unstructured":"Adewumi, T., et al.: Afriwoz: corpus for exploiting cross-lingual transfer for dialogue generation in low-resource, African languages. In: 2023 International Joint Conference on Neural Networks (IJCNN), pp.\u00a01\u20138 (2023). https:\/\/doi.org\/10.1109\/IJCNN54540.2023.10191208","key":"5_CR1","DOI":"10.1109\/IJCNN54540.2023.10191208"},{"unstructured":"Adewumi, T., et al.: ProCoT: stimulating critical thinking and writing of students through engagement with large language models (LLMs). arXiv preprint arXiv:2312.09801 (2023)","key":"5_CR2"},{"doi-asserted-by":"publisher","unstructured":"Adewumi, T., Liwicki, F., Liwicki, M.: State-of-the-art in open-domain conversational AI: a survey. Information 13(6) (2022). https:\/\/doi.org\/10.3390\/info13060298. https:\/\/www.mdpi.com\/2078-2489\/13\/6\/298","key":"5_CR3","DOI":"10.3390\/info13060298"},{"unstructured":"AIIM: State of the intelligent information management industry: Pivotal moment in information management. Association for Intelligent Information Management (2023)","key":"5_CR4"},{"unstructured":"Alayrac, J.B., et al.: Flamingo: a visual language model for few-shot learning. In: Advances in Neural Information Processing Systems, vol. 35, pp. 23716\u201323736 (2022)","key":"5_CR5"},{"doi-asserted-by":"crossref","unstructured":"Anderson, P., et al.: Bottom-up and top-down attention for image captioning and visual question answering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6077\u20136086 (2018)","key":"5_CR6","DOI":"10.1109\/CVPR.2018.00636"},{"doi-asserted-by":"crossref","unstructured":"Chen, L., et al.: ShareGPT4V: improving large multi-modal models with better captions. arXiv preprint arXiv:2311.12793 (2023)","key":"5_CR7","DOI":"10.1007\/978-3-031-72643-9_22"},{"unstructured":"Chen, X., et\u00a0al.: PaLI: a jointly-scaled multilingual language-image model. arXiv preprint arXiv:2209.06794 (2022)","key":"5_CR8"},{"unstructured":"Dai, W., et al.: InstructBLIP: towards general-purpose vision-language models with instruction tuning (2023)","key":"5_CR9"},{"unstructured":"Dao, T.: FlashAttention-2: faster attention with better parallelism and work partitioning (2023)","key":"5_CR10"},{"unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth $$16 \\times 16$$ words: transformers for image recognition at scale. In: Proceedings of ICLR (2021)","key":"5_CR11"},{"unstructured":"Fu, C., et al.: MME: a comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394 (2023)","key":"5_CR12"},{"doi-asserted-by":"crossref","unstructured":"Gehman, S., Gururangan, S., Sap, M., Choi, Y., Smith, N.A.: RealToxicityPrompts: evaluating neural toxic degeneration in language models. arXiv preprint arXiv:2009.11462 (2020)","key":"5_CR13","DOI":"10.18653\/v1\/2020.findings-emnlp.301"},{"doi-asserted-by":"crossref","unstructured":"Hao, L., Gao, L., Yi, X., Tang, Z.: A table detection method for pdf documents based on convolutional neural networks. In: 2016 12th IAPR Workshop on Document Analysis Systems (DAS), pp. 287\u2013292. IEEE (2016)","key":"5_CR14","DOI":"10.1109\/DAS.2016.23"},{"doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask R-CNN. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2961\u20132969 (2017)","key":"5_CR15","DOI":"10.1109\/ICCV.2017.322"},{"doi-asserted-by":"crossref","unstructured":"Hu, R., Singh, A., Darrell, T., Rohrbach, M.: Iterative answer prediction with pointer-augmented multimodal transformers for TextVQA. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9992\u201310002 (2020)","key":"5_CR16","DOI":"10.1109\/CVPR42600.2020.01001"},{"doi-asserted-by":"crossref","unstructured":"Hu, W., Xu, Y., Li, Y., Li, W., Chen, Z., Tu, Z.: BLIVA: a simple multimodal LLM for better handling of text-rich visual questions (2024)","key":"5_CR17","DOI":"10.1609\/aaai.v38i3.27999"},{"key":"5_CR18","series-title":"LNCS","first-page":"498","volume-title":"ECCV 2022","author":"G Kim","year":"2022","unstructured":"Kim, G., et al.: OCR-free document understanding transformer. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13688, pp. 498\u2013517. Springer, Cham (2022)"},{"unstructured":"Kojima, T., Gu, S.S., Reid, M., Matsuo, Y., Iwasawa, Y.: Large language models are zero-shot reasoners. In: Advances in Neural Information Processing Systems, vol. 35, pp. 22199\u201322213 (2022)","key":"5_CR19"},{"doi-asserted-by":"publisher","unstructured":"Li, Y., Du, Y., Zhou, K., Wang, J., Zhao, X., Wen, J.R.: Evaluating object hallucination in large vision-language models. In: Bouamor, H., Pino, J., Bali, K. (eds.) Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, Singapore, pp. 292\u2013305. Association for Computational Linguistics (2023). https:\/\/doi.org\/10.18653\/v1\/2023.emnlp-main.20. https:\/\/aclanthology.org\/2023.emnlp-main.20","key":"5_CR20","DOI":"10.18653\/v1\/2023.emnlp-main.20"},{"doi-asserted-by":"crossref","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning (2023)","key":"5_CR21","DOI":"10.1109\/CVPR52733.2024.02484"},{"unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. In: NeurIPS (2023)","key":"5_CR22"},{"issue":"3","key":"5_CR23","doi-asserted-by":"publisher","first-page":"235","DOI":"10.1007\/s10032-021-00383-3","volume":"24","author":"M Mathew","year":"2021","unstructured":"Mathew, M., Gomez, L., Karatzas, D., Jawahar, C.: Asking questions on handwritten document collections. Int. J. Doc. Anal. Recogn. (IJDAR) 24(3), 235\u2013249 (2021)","journal-title":"Int. J. Doc. Anal. Recogn. (IJDAR)"},{"doi-asserted-by":"crossref","unstructured":"Mathew, M., Karatzas, D., Jawahar, C.: DocVQA: a dataset for VQA on document images. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 2200\u20132209 (2021)","key":"5_CR24","DOI":"10.1109\/WACV48630.2021.00225"},{"doi-asserted-by":"publisher","unstructured":"Mishra, S., Khashabi, D., Baral, C., Hajishirzi, H.: Cross-task generalization via natural language crowdsourcing instructions. In: Muresan, S., Nakov, P., Villavicencio, A. (eds.) Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), Dublin, Ireland, pp. 3470\u20133487. Association for Computational Linguistics (2022). https:\/\/doi.org\/10.18653\/v1\/2022.acl-long.244. https:\/\/aclanthology.org\/2022.acl-long.244","key":"5_CR25","DOI":"10.18653\/v1\/2022.acl-long.244"},{"doi-asserted-by":"crossref","unstructured":"Parrish, A., et al.: BBQ: a hand-built bias benchmark for question answering (2022)","key":"5_CR26","DOI":"10.18653\/v1\/2022.findings-acl.165"},{"unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)","key":"5_CR27"},{"doi-asserted-by":"publisher","unstructured":"Rajpurkar, P., Zhang, J., Lopyrev, K., Liang, P.: SQuAD: 100,000+ questions for machine comprehension of text. In: Su, J., Duh, K., Carreras, X. (eds.) Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pp. 2383\u20132392. Association for Computational Linguistics, Austin, Texas (2016). https:\/\/doi.org\/10.18653\/v1\/D16-1264. https:\/\/aclanthology.org\/D16-1264","key":"5_CR28","DOI":"10.18653\/v1\/D16-1264"},{"unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. In: Advances in Neural Information Processing Systems, vol. 28 (2015)","key":"5_CR29"},{"doi-asserted-by":"publisher","unstructured":"Singh, A., et al.: Towards VQA models that can read. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Los Alamitos, CA, USA, pp. 8309\u20138318. IEEE Computer Society (2019). https:\/\/doi.org\/10.1109\/CVPR.2019.00851","key":"5_CR30","DOI":"10.1109\/CVPR.2019.00851"},{"doi-asserted-by":"crossref","unstructured":"Teney, D., Anderson, P., He, X., Van Den\u00a0Hengel, A.: Tips and tricks for visual question answering: learnings from the 2017 challenge. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4223\u20134232 (2018)","key":"5_CR31","DOI":"10.1109\/CVPR.2018.00444"},{"key":"5_CR32","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"635","DOI":"10.1007\/978-3-030-86337-1_42","volume-title":"Document Analysis and Recognition \u2013 ICDAR 2021","author":"R Tito","year":"2021","unstructured":"Tito, R., Mathew, M., Jawahar, C.V., Valveny, E., Karatzas, D.: ICDAR 2021 competition on document visual question answering. In: Llad\u00f3s, J., Lopresti, D., Uchida, S. (eds.) ICDAR 2021. LNCS, vol. 12824, pp. 635\u2013649. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-86337-1_42"},{"unstructured":"Touvron, H., et\u00a0al.: Llama: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)","key":"5_CR33"},{"unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)","key":"5_CR34"},{"doi-asserted-by":"crossref","unstructured":"Wang, W., et\u00a0al.: Image as a foreign language: Beit pretraining for vision and vision-language tasks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19175\u201319186 (2023)","key":"5_CR35","DOI":"10.1109\/CVPR52729.2023.01838"},{"doi-asserted-by":"crossref","unstructured":"Wang, X., et al.: On the general value of evidence, and bilingual scene-text visual question answering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10126\u201310135 (2020)","key":"5_CR36","DOI":"10.1109\/CVPR42600.2020.01014"},{"unstructured":"Wang, Y., Li, H., Han, X., Nakov, P., Baldwin, T.: Do-not-answer: a dataset for evaluating safeguards in LLMs. arXiv preprint arXiv:2308.13387 (2023)","key":"5_CR37"},{"unstructured":"Wei, J., et al.: Chain-of-thought prompting elicits reasoning in large language models. In: Advances in Neural Information Processing Systems, vol. 35, pp. 24824\u201324837 (2022)","key":"5_CR38"},{"unstructured":"Xu, W., Banburski-Fahey, A., Jojic, N.: Reprompting: automated chain-of-thought prompt inference through gibbs sampling. arXiv preprint arXiv:2305.09993 (2023)","key":"5_CR39"},{"doi-asserted-by":"crossref","unstructured":"Xu, Y., Li, M., Cui, L., Huang, S., Wei, F., Zhou, M.: Layoutlm: pre-training of text and layout for document image understanding. In: Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pp. 1192\u20131200 (2020)","key":"5_CR40","DOI":"10.1145\/3394486.3403172"},{"doi-asserted-by":"crossref","unstructured":"Yuan, L., et al.: Tokens-to-token ViT: training vision transformers from scratch on imagenet. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 558\u2013567 (2021)","key":"5_CR41","DOI":"10.1109\/ICCV48922.2021.00060"},{"unstructured":"Zhao, B., Wu, B., Huang, T.: SVIT: scaling up visual instruction tuning. arXiv preprint arXiv:2307.04087 (2023)","key":"5_CR42"},{"unstructured":"Zheng, L., et\u00a0al.: LMSYS-Chat-1M: a large-scale real-world LLM conversation dataset. arXiv preprint arXiv:2309.11998 (2023)","key":"5_CR43"},{"unstructured":"Zhou, Q., Wang, Z., Chu, W., Xu, Y., Li, H., Qi, Y.: InfMLLM: a unified framework for visual-language tasks (2023)","key":"5_CR44"},{"unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: MiniGPT-4: enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023)","key":"5_CR45"}],"container-title":["Lecture Notes in Computer Science","Document Analysis Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-70442-0_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T23:12:52Z","timestamp":1732749172000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-70442-0_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031704413","9783031704420"],"references-count":45,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-70442-0_5","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"11 September 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"DAS","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Workshop on Document Analysis Systems","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Athens","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30 August 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"31 August 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"das2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/das2024.seecs.edu.pk\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}