{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,16]],"date-time":"2025-09-16T21:26:08Z","timestamp":1758057968228,"version":"3.44.0"},"publisher-location":"Cham","reference-count":40,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783032046291","type":"print"},{"value":"9783032046307","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,9,16]],"date-time":"2025-09-16T00:00:00Z","timestamp":1757980800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,16]],"date-time":"2025-09-16T00:00:00Z","timestamp":1757980800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-04630-7_1","type":"book-chapter","created":{"date-parts":[[2025,9,15]],"date-time":"2025-09-15T23:47:20Z","timestamp":1757980040000},"page":"3-21","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Evaluating Compliance with\u00a0Visualization Guidelines in\u00a0Diagrams for\u00a0Scientific Publications Using Large Vision Language Models"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5038-5899","authenticated-orcid":false,"given":"Johannes","family":"R\u00fcckert","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7540-4980","authenticated-orcid":false,"given":"Louise","family":"Bloch","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3315-7536","authenticated-orcid":false,"given":"Christoph M.","family":"Friedrich","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,16]]},"reference":[{"key":"1_CR1","unstructured":"Alayrac, J.B., et al.: Flamingo: a visual language model for few-shot learning. In: Oh, A.H., Agarwal, A., Belgrave, D., Cho, K. (eds.) Advances in Neural Information Processing Systems (2022). https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2022\/file\/960a172bc7fbf0177ccccbb411a7d800-Paper-Conference.pdf. Accessed 18 June 2025"},{"key":"1_CR2","unstructured":"Bai, J., et al.: Qwen-VL: a versatile vision-language model for understanding, localization, text reading, and beyond (2023). https:\/\/arxiv.org\/abs\/2308.12966v3. Accessed 18 June 2025"},{"key":"1_CR3","unstructured":"Bai, S., et al.: Qwen2.5-VL technical report (2025). https:\/\/arxiv.org\/abs\/2502.13923v1. Accessed 18 June 2025"},{"key":"1_CR4","doi-asserted-by":"publisher","unstructured":"Bresciani, S., Eppler, M.J.: The pitfalls of visual representations: a review and classification of common errors made while designing and interpreting visualizations. SAGE Open 5(4), 2158244015611451 (2015). https:\/\/doi.org\/10.1177\/2158244015611451. Accessed 18 June 2025","DOI":"10.1177\/2158244015611451"},{"key":"1_CR5","unstructured":"Chen, X., et al.: Janus-pro: unified multimodal understanding and generation with data and model scaling (2025). https:\/\/arxiv.org\/abs\/2501.17811v1. Accessed 18 June 2025"},{"key":"1_CR6","unstructured":"Chen, Z., et al.: Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling (2025). https:\/\/arxiv.org\/abs\/2412.05271v4. Accessed 18 June 2025"},{"key":"1_CR7","doi-asserted-by":"publisher","unstructured":"Davila, K., et al.: Chart-info 2024: a dataset for chart analysis and recognition. In: Antonacopoulos, A., Chaudhuri, S., Chellappa, R., Liu, C.L., Bhattacharya, S., Pal, U. (eds.) Pattern Recognition, pp. 297\u2013315. Springer, Cham (2025). https:\/\/doi.org\/10.1007\/978-3-031-78495-8_19. Accessed 18 June 2025","DOI":"10.1007\/978-3-031-78495-8_19"},{"key":"1_CR8","doi-asserted-by":"publisher","unstructured":"Davila, K., Xu, F., Ahmed, S., Mendoza, D.A., Setlur, S., Govindaraju, V.: ICPR 2022: challenge on harvesting raw tables from infographics (CHART-Infographics). In: 2022 26th International Conference on Pattern Recognition (ICPR), pp. 4995\u20135001 (2022). https:\/\/doi.org\/10.1109\/ICPR56361.2022.9956289. Accessed 18 June 2025","DOI":"10.1109\/ICPR56361.2022.9956289"},{"key":"1_CR9","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: North American Chapter of the Association for Computational Linguistics (2019). https:\/\/aclanthology.org\/N19-1423.pdf. Accessed 18 June 2025"},{"key":"1_CR10","doi-asserted-by":"publisher","unstructured":"Diehl, A., Abdul-Rahman, A., El-Assady, M., Bach, B., Keim, D., Chen, M.: VisGuides: a forum for discussing visualization guidelines. In: Johansson, J., Sadlo, F., Schreck, T. (eds.) EuroVis 2018 - Short Papers. The Eurographics Association (2018). https:\/\/doi.org\/10.2312\/eurovisshort.20181079. Accessed 18 June 2025","DOI":"10.2312\/eurovisshort.20181079"},{"key":"1_CR11","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. In: International Conference on Learning Representations (2021). https:\/\/openreview.net\/pdf?id=YicbFdNTTy. Accessed 18 June 2025"},{"key":"1_CR12","doi-asserted-by":"publisher","unstructured":"Franzblau, L.E., Chung, K.C.: Graphs, tables, and figures in scientific publications: the good, the bad, and how not to be the latter. J. Hand Surg. 37(3), 591\u2013596 (2012). https:\/\/doi.org\/10.1016\/j.jhsa.2011.12.041. Accessed 18 June 2025","DOI":"10.1016\/j.jhsa.2011.12.041"},{"key":"1_CR13","unstructured":"Han, Y., et al.: ChartLlama: a multimodal LLM for chart understanding and generation (2023). https:\/\/arxiv.org\/abs\/2311.16483v1. Accessed 18 June 2025"},{"key":"1_CR14","unstructured":"Hong, W., et al.: CogVLM2: visual language models for image and video understanding (2024). https:\/\/arxiv.org\/abs\/2408.16500v1. Accessed 18 June 2025"},{"key":"1_CR15","doi-asserted-by":"crossref","unstructured":"Jambor, H.K.: From zero to figure hero. A checklist for designing scientific data visualizations (2024). https:\/\/arxiv.org\/abs\/2408.16007v1. Accessed 18 June 2025","DOI":"10.31219\/osf.io\/xgbyr"},{"key":"1_CR16","doi-asserted-by":"publisher","unstructured":"Kantharaj, S., et al.: Chart-to-text: a large-scale benchmark for chart summarization. In: Muresan, S., Nakov, P., Villavicencio, A. (eds.) Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), Dublin, Ireland, pp. 4005\u20134023. Association for Computational Linguistics (2022). https:\/\/doi.org\/10.18653\/v1\/2022.acl-long.277. Accessed 18 June 2025","DOI":"10.18653\/v1\/2022.acl-long.277"},{"key":"1_CR17","doi-asserted-by":"publisher","unstructured":"Kasneci, E., et al.: ChatGPT for good? On opportunities and challenges of large language models for education. Learn. Individ. Differ. 103, 102274 (2023). https:\/\/doi.org\/10.1016\/j.lindif.2023.102274. Accessed 18 June 2025","DOI":"10.1016\/j.lindif.2023.102274"},{"key":"1_CR18","doi-asserted-by":"crossref","unstructured":"Kim, N.W., Myers, G., Bach, B.: How good is ChatGPT in giving advice on your visualization design? (2024). https:\/\/arxiv.org\/abs\/2310.09617v3. Accessed 18 June 2025","DOI":"10.1145\/3745768"},{"key":"1_CR19","doi-asserted-by":"publisher","unstructured":"Liu, F., et al.: DePlot: one-shot visual language reasoning by plot-to-table translation. In: Rogers, A., Boyd-Graber, J., Okazaki, N. (eds.) Findings of the Association for Computational Linguistics: ACL 2023, Toronto, Canada, pp. 10381\u201310399. Association for Computational Linguistics (2023). https:\/\/doi.org\/10.18653\/v1\/2023.findings-acl.660. Accessed 18 June 2025","DOI":"10.18653\/v1\/2023.findings-acl.660"},{"key":"1_CR20","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning (2023). https:\/\/arxiv.org\/abs\/2304.08485v2. Accessed 18 June 2025"},{"key":"1_CR21","doi-asserted-by":"publisher","unstructured":"Masry, A., Shahmohammadi, M., Parvez, M.R., Hoque, E., Joty, S.: ChartInstruct: instruction tuning for chart comprehension and reasoning. In: ACL, pp. 10387\u201310409 (2024). https:\/\/doi.org\/10.18653\/v1\/2024.findings-acl.619. Accessed 18 June 2025","DOI":"10.18653\/v1\/2024.findings-acl.619"},{"key":"1_CR22","doi-asserted-by":"publisher","unstructured":"Masry, A., Do, X.L., Tan, J.Q., Joty, S., Hoque, E.: ChartQA: a benchmark for question answering about charts with visual and logical reasoning. In: Muresan, S., Nakov, P., Villavicencio, A. (eds.) Findings of the Association for Computational Linguistics: ACL 2022, Dublin, Ireland, pp. 2263\u20132279. Association for Computational Linguistics (2022). https:\/\/doi.org\/10.18653\/v1\/2022.findings-acl.177. Accessed 18 June 2025","DOI":"10.18653\/v1\/2022.findings-acl.177"},{"key":"1_CR23","unstructured":"McNutt, A.M., Kindlmann, G.L.: Linting for visualization: towards a practical automated visualization guidance system. In: VisGuides: 2nd Workshop on the Creation, Curation, Critique and Conditioning of Principles and Guidelines in Visualization (2018). https:\/\/c4pgv.dbvis.de\/McNutt_Kindlmann_2018.pdf. Accessed 18 June 2025"},{"key":"1_CR24","doi-asserted-by":"publisher","unstructured":"Nguyen, V.T., Jung, K., Gupta, V.: Examining data visualization pitfalls in scientific publications. Vis. Comput. Ind. Biomed. Art 4(1), 1\u201315 (2021). https:\/\/doi.org\/10.1186\/s42492-021-00092-y","DOI":"10.1186\/s42492-021-00092-y"},{"key":"1_CR25","doi-asserted-by":"publisher","unstructured":"Park, J.H., et al.: The principles of presenting statistical results using figures. Korean J. Anesthesiol. 75, 139\u2013150 (2022). https:\/\/doi.org\/10.4097\/kja.21508. Accessed 18 June 2025","DOI":"10.4097\/kja.21508"},{"key":"1_CR26","doi-asserted-by":"publisher","unstructured":"Parsons, P.: Understanding data visualization design practice. IEEE Trans. Visual Comput. Graphics 28(1), 665\u2013675 (2022). https:\/\/doi.org\/10.1109\/TVCG.2021.3114959. Accessed 18 June 2025","DOI":"10.1109\/TVCG.2021.3114959"},{"key":"1_CR27","unstructured":"Yang, A., et al.: Qwen2.5 technical report (2025). https:\/\/arxiv.org\/abs\/2412.15115v2. Accessed 18 June 2025"},{"key":"1_CR28","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: Proceedings of the 38th International Conference on Machine Learning (ICML 2021), pp. 8748\u20138763 (2021). https:\/\/proceedings.mlr.press\/v139\/radford21a\/radford21a.pdf. Accessed 18 June 2025"},{"key":"1_CR29","doi-asserted-by":"publisher","unstructured":"Schriger, D.L., Cooper, R.J.: Achieving graphical excellence: suggestions and methods for creating high-quality visual displays of experimental data. Ann. Emerg. Med. 37(1), 75\u201387 (2001). https:\/\/doi.org\/10.1067\/mem.2001.111570. Accessed 18 June 2025","DOI":"10.1067\/mem.2001.111570"},{"key":"1_CR30","doi-asserted-by":"publisher","unstructured":"Shin, S., Chung, S., Hong, S., Elmqvist, N.: A scanner deeply: predicting gaze heatmaps on visualizations using crowdsourced eye movement data. IEEE Trans. Visual Comput. Graph. 29(1), 396\u2013406 (2023). https:\/\/doi.org\/10.1109\/TVCG.2022.3209472. Accessed 18 June 2025","DOI":"10.1109\/TVCG.2022.3209472"},{"key":"1_CR31","unstructured":"Shin, S., Hong, S., Elmqvist, N.: Visualizationary: automating design feedback for visualization designers using LLMs (2024). https:\/\/arxiv.org\/abs\/2409.13109v1. Accessed 18 June 2025"},{"key":"1_CR32","doi-asserted-by":"publisher","unstructured":"Shukla, S., Samal, A.: Recognition and quality assessment of data charts in mixed-mode documents. IJDAR 11(3), 111\u2013126 (2008). https:\/\/doi.org\/10.1007\/s10032-008-0065-5, accessed: 2025-06-18","DOI":"10.1007\/s10032-008-0065-5"},{"key":"1_CR33","unstructured":"Su, W., et al.: VL-BERT: pre-training of generic visual-linguistic representations. In: International Conference on Learning Representations (2020). https:\/\/openreview.net\/attachment?id=SygXPaEYvH&name=original_pdf. Accessed 18 June 2025"},{"key":"1_CR34","doi-asserted-by":"publisher","unstructured":"Tahamtan, I., Safipour Afshar, A., Ahamdzadeh, K.: Factors affecting number of citations: a comprehensive review of the literature. Scientometrics 107(3), 1195\u20131225 (2016). https:\/\/doi.org\/10.1007\/s11192-016-1889-2, accessed: 2025-06-18","DOI":"10.1007\/s11192-016-1889-2"},{"key":"1_CR35","unstructured":"Tufte, E.R., Graves-Morris, P.R.: The visual display of quantitative information. No.\u00a09, Graphics Press Cheshire, CT, 2 edn. (2001)"},{"key":"1_CR36","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Guyon, I., et al. (eds.) Advances in Neural Information Processing Systems, vol.\u00a030. Curran Associates, Inc. (2017). https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2017\/file\/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf. Accessed 18 June 2025"},{"key":"1_CR37","unstructured":"Wang, C.Y., Yeh, I.H., Liao, H.: You only learn one representation: unified network for multiple tasks (2021). https:\/\/arxiv.org\/abs\/2105.04206v1. Accessed 18 June 2025"},{"key":"1_CR38","unstructured":"Wang, W., et al.: CogVLM: visual expert for pretrained language models. In: Globerson, A., et al. (eds.) Advances in Neural Information Processing Systems, vol.\u00a037, pp. 121475\u2013121499. Curran Associates, Inc. (2024). https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2024\/file\/dc06d4d2792265fb5454a6092bfd5c6a-Paper-Conference.pdf. Accessed 18 June 2025"},{"key":"1_CR39","unstructured":"Wang, Z., Yu, J., Yu, A.W., Dai, Z., Tsvetkov, Y., Cao, Y.: SimVLM: simple visual language model pretraining with weak supervision. In: International Conference on Learning Representations (2022). https:\/\/openreview.net\/pdf?id=GUrhfTuf_3. Accessed 18 June 2025"},{"key":"1_CR40","doi-asserted-by":"publisher","unstructured":"Zhang, J., Huang, J., Jin, S., Lu, S.: Vision-language models for vision tasks: a survey. IEEE Trans. Pattern Anal. Mach. Intell. 46(8), 5625\u20135644 (2024). https:\/\/doi.org\/10.1109\/TPAMI.2024.3369699. Accessed 18 June 2025","DOI":"10.1109\/TPAMI.2024.3369699"}],"container-title":["Lecture Notes in Computer Science","Document Analysis and Recognition \u2013 ICDAR 2025"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-04630-7_1","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,15]],"date-time":"2025-09-15T23:47:30Z","timestamp":1757980050000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-04630-7_1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,16]]},"ISBN":["9783032046291","9783032046307"],"references-count":40,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-04630-7_1","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9,16]]},"assertion":[{"value":"16 September 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICDAR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Document Analysis and Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Wuhan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16 September 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 September 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icdar2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/iapr.org\/icdar2025","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}