{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T07:16:30Z","timestamp":1767078990354,"version":"3.48.0"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T00:00:00Z","timestamp":1764720000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T00:00:00Z","timestamp":1764720000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,3]]},"DOI":"10.1109\/dicta68720.2025.11302429","type":"proceedings-article","created":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T18:36:22Z","timestamp":1767033382000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Exploring Primitive Visual Measurement Understanding and the Role of Output Format in Learning in Vision-Language Models"],"prefix":"10.1109","author":[{"given":"Ankit","family":"Yadav","sequence":"first","affiliation":[{"name":"The University of Adelaide,Adelaide,Australia"}]},{"given":"Lingqiao","family":"Liu","sequence":"additional","affiliation":[{"name":"The University of Adelaide,Adelaide,Australia"}]},{"given":"Yuankai","family":"Qi","sequence":"additional","affiliation":[{"name":"Macquarie University,Sydney,Australia"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Phi-3 technical report: A highly capable language model locally on your phone","author":"Abdin","year":"2024","journal-title":"arXiv preprint arXiv"},{"key":"ref2","first-page":"23716","article-title":"Flamingo: a visual language model for few-shot learning","volume":"35","author":"Alayrac","year":"2022","journal-title":"Advances in neural information processing systems"},{"issue":"2","key":"ref3","first-page":"3","article-title":"Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond","volume":"1","author":"Bai","year":"2023","journal-title":"arXiv preprint arXiv"},{"key":"ref4","article-title":"Paligemma: A versatile 3b vlm for transfer","author":"Beyer","year":"2024","journal-title":"arXiv preprint arXiv"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TAES.2016.140952"},{"key":"ref7","article-title":"The llama 3 herd of models","author":"Dubey","year":"2024","journal-title":"arXiv preprint arXiv"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00686"},{"volume-title":"Explaining and extending the bit-parallel approximate string matching algorithm of myers","year":"2001","author":"Hyyr\u00f6","key":"ref9"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58601-0_2"},{"key":"ref11","article-title":"Visual instruction tuning","volume":"36","author":"Liu","year":"2024","journal-title":"Advances in neural information processing systems"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.patrec.2015.10.013"},{"key":"ref13","article-title":"Prism: A framework for decoupling and assessing the capabilities of vlms","author":"Qiao","year":"2024","journal-title":"arXiv preprint arXiv"},{"issue":"8","key":"ref14","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-96-0917-8_17"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.17487\/rfc1321"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-04083-2_5"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73242-3_25"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.78"},{"key":"ref20","article-title":"Guiding visionlanguage model selection for visual question-answering across tasks, domains, and knowledge types","author":"Sinha","year":"2024","journal-title":"arXiv preprint arXiv"},{"key":"ref21","article-title":"Qwen2-vl: Enhancing vision-language model\u2019s perception of the world at any resolution","author":"Wang","year":"2024","journal-title":"arXiv preprint arXiv"},{"key":"ref22","article-title":"Qwen2 technical report","author":"Yang","year":"2024","journal-title":"arXiv preprint arXiv"},{"key":"ref23","article-title":"Minicpm-v: A gpt-4v level mllm on your phone","author":"Yao","year":"2024","journal-title":"arXiv preprint arXiv"}],"event":{"name":"2025 International Conference on Digital Image Computing: Techniques and Applications (DICTA)","start":{"date-parts":[[2025,12,3]]},"location":"Adelaide, Australia","end":{"date-parts":[[2025,12,5]]}},"container-title":["2025 International Conference on Digital Image Computing: Techniques and Applications (DICTA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11302408\/11302416\/11302429.pdf?arnumber=11302429","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T07:14:34Z","timestamp":1767078874000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11302429\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,3]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/dicta68720.2025.11302429","relation":{},"subject":[],"published":{"date-parts":[[2025,12,3]]}}}