{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T06:45:37Z","timestamp":1767077137309,"version":"3.48.0"},"reference-count":47,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T00:00:00Z","timestamp":1764720000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T00:00:00Z","timestamp":1764720000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,3]]},"DOI":"10.1109\/dicta68720.2025.11302424","type":"proceedings-article","created":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T18:36:22Z","timestamp":1767033382000},"page":"1-10","source":"Crossref","is-referenced-by-count":0,"title":["Rethinking Agentic and End-to-End Large Multimodal Models for Vision Tasks"],"prefix":"10.1109","author":[{"given":"Yixin","family":"Wang","sequence":"first","affiliation":[{"name":"The University of Adelaide,Adelaide,SA,Australia,5005"}]},{"given":"Xinyu","family":"Wang","sequence":"additional","affiliation":[{"name":"The University of Adelaide,Adelaide,SA,Australia,5005"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Gpt-4 technical report","author":"Achiam","year":"2023","journal-title":"arXiv preprint"},{"key":"ref2","first-page":"2371623736","article-title":"Flamingo: a visual language model for few-shot learning","volume-title":"Proc. Advances in Neural Inf. Process. Syst.","volume":"35","author":"Alayrac","year":"2022"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.52202\/079017-1547"},{"key":"ref4","article-title":"Microsoft coco captions: Data collection and evaluation server","author":"Chen","year":"2015","journal-title":"arXiv preprint"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02283"},{"volume-title":"Grounded-sam-2: Assembling open-world models for diverse visual tasks","year":"2024","author":"R.","key":"ref6"},{"key":"ref7","article-title":"Ocrbench v2: An improved benchmark for evaluating large multimodal models on visual text localization and reasoning","author":"Fu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref8","article-title":"Assistgpt: A general multi-modal assistant that can plan, execute, inspect, and learn","author":"Gao","year":"2023","journal-title":"arXiv preprint"},{"volume-title":"Google. Gemini 2.0","year":"2025","key":"ref9"},{"volume-title":"Google DeepMind. Gemini 2.0 flash","year":"2024","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01436"},{"key":"ref13","article-title":"Fastreid: A pytorch toolbox for general instance reidentification","author":"He","year":"2020","journal-title":"arXiv preprint"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1086"},{"key":"ref15","article-title":"Seedbench: Benchmarking multimodal 11 ms with generative comprehension","author":"Li","year":"2023","journal-title":"arXiv preprint"},{"key":"ref16","first-page":"19730","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"International conference on machine learning","author":"Li","year":"2023"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.27"},{"key":"ref18","first-page":"34892","article-title":"Visual instruction tuning","volume-title":"Proc. Advances in Neural Inf. Process. Syst.","volume":"36","author":"Liu","year":"2023"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46475-6_53"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72658-3_13"},{"key":"ref22","first-page":"26439","article-title":"Unified-io2: Scaling autoregressive multimodal models with vision language audio and action","volume-title":"Proc. IEEE Conf. Comp. Vis. Patt. Recogn.","author":"Lu","year":"2024"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20044-1_20"},{"volume-title":"Introducing gpt-4o and more tools to chatgpt free users","year":"2024","key":"ref24"},{"volume-title":"Internvl3\u201314b","year":"2025","key":"ref25"},{"volume-title":"Paddleocr: A practical ocr toolkit based on paddlepaddle","year":"2021","key":"ref26"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547942"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/tse.2022.3197063"},{"journal-title":"Kosmos-2: Grounding multimodal large language models to the world","year":"2023","author":"Peng","key":"ref29"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00340"},{"key":"ref31","article-title":"Meta releases two llama 4 ai models","author":"Robison","year":"2025","journal-title":"The Verge"},{"key":"ref32","first-page":"68539","article-title":"Toolformer: Language models can teach themselves to use tools","volume-title":"Proc. Advances in Neural Inf. Process. Syst.","volume":"36","author":"Schick","year":"2023"},{"key":"ref33","first-page":"38154","article-title":"Hugginggpt: Solving ai tasks with chatgpt and its friends in hugging face","volume":"36","author":"Shen","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2020.3035969"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01092"},{"key":"ref36","article-title":"Chameleon: Mixed-modal early-fusion foundation models","author":"Team.","year":"2024","journal-title":"arXiv preprint"},{"key":"ref37","article-title":"Gemini: a family of highly capable multimodal models","author":"Team","year":"2023","journal-title":"arXiv preprint"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01014"},{"key":"ref39","first-page":"26606","article-title":"Modaverse: Efficiently transforming modalities with 11 ms","volume-title":"Proc. IEEE Conf. Comp. Vis. Patt. Recogn.","author":"Wang","year":"2024"},{"key":"ref40","article-title":"Are large vision language models good game players","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wang","year":"2025"},{"volume-title":"xAI. Grok 2 vision 1212","year":"2024","key":"ref41"},{"key":"ref42","article-title":"On the tool manipulation capability of open-source large language models","author":"Xu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref43","article-title":"Mm-react: Prompting chatgpt for multimodal reasoning and action","author":"Yang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00166"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.70"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.405"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.540"}],"event":{"name":"2025 International Conference on Digital Image Computing: Techniques and Applications (DICTA)","start":{"date-parts":[[2025,12,3]]},"location":"Adelaide, Australia","end":{"date-parts":[[2025,12,5]]}},"container-title":["2025 International Conference on Digital Image Computing: Techniques and Applications (DICTA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11302408\/11302416\/11302424.pdf?arnumber=11302424","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T06:40:52Z","timestamp":1767076852000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11302424\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,3]]},"references-count":47,"URL":"https:\/\/doi.org\/10.1109\/dicta68720.2025.11302424","relation":{},"subject":[],"published":{"date-parts":[[2025,12,3]]}}}