{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T21:38:49Z","timestamp":1773265129920,"version":"3.50.1"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,11,12]],"date-time":"2025-11-12T00:00:00Z","timestamp":1762905600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,11,12]],"date-time":"2025-11-12T00:00:00Z","timestamp":1762905600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,11,12]]},"DOI":"10.1109\/icdmw69685.2025.00167","type":"proceedings-article","created":{"date-parts":[[2026,3,10]],"date-time":"2026-03-10T19:50:39Z","timestamp":1773172239000},"page":"1404-1410","source":"Crossref","is-referenced-by-count":0,"title":["TALENT: Table VQA via Augmented Language-Enhanced Natural-Text Transcription"],"prefix":"10.1109","author":[{"given":"Yutong","family":"Guo","sequence":"first","affiliation":[{"name":"Johns Hopkins University"}]},{"given":"Wanying","family":"Wang","sequence":"additional","affiliation":[{"name":"Independent Researcher"}]},{"given":"Yue","family":"Wu","sequence":"additional","affiliation":[{"name":"Independent Researcher"}]},{"given":"Zichen","family":"Miao","sequence":"additional","affiliation":[{"name":"Purdue University"}]},{"given":"Haoyu","family":"Wang","sequence":"additional","affiliation":[{"name":"University at Albany"}]}],"member":"263","reference":[{"key":"ref1","volume-title":"Tablevqa-bench: A visual question answering benchmark on multiple table domains","author":"Kim","year":"2024"},{"key":"ref2","volume-title":"Enhancing financial vqa in vision language models using intermediate structured representations","author":"Srivastava","year":"2025"},{"key":"ref3","volume-title":"Pmc-vqa: Visual instruction tuning for medical visual question answering","author":"Zhang","year":"2024"},{"key":"ref4","first-page":"406","article-title":"Wikidt: Visual-based table recognition and question answering dataset","volume-title":"Document Analysis and Recognition - ICDAR 2024: 18th International Conference","author":"Shi"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3534619"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3728635"},{"key":"ref7","volume-title":"Guiding vision-language model selection for visual question-answering across tasks, domains, and knowledge types","author":"Sinha","year":"2024"},{"key":"ref8","volume-title":"Ocr vs llms: What\u2019s the best tool for document processing?","author":"Patin","year":"2025"},{"key":"ref9","volume-title":"Tinyvqa: Compact multimodal deep neural network for visual question answering on resource-constrained devices","author":"Rashid","year":"2024"},{"key":"ref10","doi-asserted-by":"crossref","DOI":"10.1007\/978-3-030-58604-1_5","volume-title":"Table structure recognition using top-down and bottom-up cues","author":"Raja","year":"2020"},{"key":"ref11","doi-asserted-by":"crossref","DOI":"10.1109\/CVPR52733.2024.02484","volume-title":"Improved baselines with visual instruction tuning","author":"Liu","year":"2024"},{"key":"ref12","volume-title":"Large language models(llms) on tabular data: Prediction, generation, and understanding - a survey","author":"Fang","year":"2024"},{"key":"ref13","doi-asserted-by":"crossref","DOI":"10.1145\/3503161.3548038","volume-title":"Tsrformer: Table structure recognition with transformers","author":"Lin","year":"2022"},{"key":"ref14","volume-title":"Tablemaster: A recipe to advance table understanding with language models","author":"Cao","year":"2025"},{"key":"ref15","volume-title":"Tableformer: Table structure understanding with transformers","author":"Nassar","year":"2022"},{"key":"ref16","volume-title":"Deplot: One-shot visual language reasoning by plot-to-table translation","author":"Liu","year":"2023"},{"key":"ref17","volume-title":"Qwen2.5-vl technical report","author":"Bai","year":"2025"},{"key":"ref18","volume-title":"Qwen2.5 technical report","author":"Qwen","year":"2025"},{"key":"ref19","volume-title":"Supported models","year":"2025"},{"key":"ref20","volume-title":"Flashattention-2: Faster attention with better parallelism and work partitioning","author":"Dao","year":"2023"},{"key":"ref21","volume-title":"Phi-4-mini technical report: Compact yet powerful multimodal language models via mixture-of-loras","year":"2025"},{"key":"ref22","volume-title":"Minicpm: Unveiling the potential of small language models with scalable training strategies","author":"Hu","year":"2024"},{"key":"ref23","volume-title":"Gpt-4v(ision) system card","year":"2023"},{"key":"ref24","volume-title":"Gpt-4 technical report","year":"2024"},{"key":"ref25","volume-title":"Gemini: A family of highly capable multimodal models","year":"2025"},{"key":"ref26","volume-title":"Qwen3 technical report","author":"Yang","year":"2025"}],"event":{"name":"2025 IEEE International Conference on Data Mining Workshops (ICDMW)","location":"Washington, DC, USA","start":{"date-parts":[[2025,11,12]]},"end":{"date-parts":[[2025,11,15]]}},"container-title":["2025 IEEE International Conference on Data Mining Workshops (ICDMW)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11415623\/11415713\/11415742.pdf?arnumber=11415742","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T05:34:58Z","timestamp":1773207298000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11415742\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,12]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/icdmw69685.2025.00167","relation":{},"subject":[],"published":{"date-parts":[[2025,11,12]]}}}