{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T18:36:45Z","timestamp":1772822205608,"version":"3.50.1"},"reference-count":36,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,3,31]],"date-time":"2025-03-31T00:00:00Z","timestamp":1743379200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,3,31]],"date-time":"2025-03-31T00:00:00Z","timestamp":1743379200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,3,31]]},"DOI":"10.23919\/date64628.2025.10992791","type":"proceedings-article","created":{"date-parts":[[2025,5,21]],"date-time":"2025-05-21T17:36:35Z","timestamp":1747848995000},"page":"1-6","source":"Crossref","is-referenced-by-count":1,"title":["ChipVQA: Benchmarking Visual Language Models for Chip Design"],"prefix":"10.23919","author":[{"given":"Haoyu","family":"Yang","sequence":"first","affiliation":[{"name":"NVIDIA Corp."}]},{"given":"Qijing","family":"Huang","sequence":"additional","affiliation":[{"name":"NVIDIA Corp."}]},{"given":"Nathaniel","family":"Pinckney","sequence":"additional","affiliation":[{"name":"NVIDIA Corp."}]},{"given":"Walker","family":"Turner","sequence":"additional","affiliation":[{"name":"NVIDIA Corp."}]},{"given":"Wenfei","family":"Zhou","sequence":"additional","affiliation":[{"name":"NVIDIA Corp."}]},{"given":"Yanqing","family":"Zhang","sequence":"additional","affiliation":[{"name":"NVIDIA Corp."}]},{"given":"Chia-Tung","family":"Ho","sequence":"additional","affiliation":[{"name":"NVIDIA Corp."}]},{"given":"Chen-Chia","family":"Chang","sequence":"additional","affiliation":[{"name":"NVIDIA Corp."}]},{"given":"Haoxing","family":"Ren","sequence":"additional","affiliation":[{"name":"NVIDIA Corp."}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3240765.3240843"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3061639.3062270"},{"key":"ref3","doi-asserted-by":"crossref","DOI":"10.1145\/3400302.3415661","article-title":"Hotspot detection via attention-based deep layout metric learning","volume-title":"IEEE\/ACM International Conference on Computer-Aided Design (ICCAD)","author":"Geng","year":"2020"},{"key":"ref4","first-page":"264","article-title":"Bufformer: A generative ml framework for scalable buffering","volume-title":"IEEE\/ACM Asia and South Pacific Design Automation Conference (ASPDAC)","author":"Liang","year":"2023"},{"key":"ref5","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3508352.3549442","article-title":"Transsizer: A novel transformer-based fast gate sizer","volume-title":"Proceedings of the 41st IEEE\/ACM International Conference on Computer-Aided Design","author":"Nath","year":"2022"},{"key":"ref6","article-title":"Chipnemo: Domain-adapted llms for chip design","author":"Liu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref7","article-title":"Betterv: Controlled verilog generation with discriminative guidance","author":"Pei","year":"2024","journal-title":"arXiv preprint"},{"key":"ref8","article-title":"Rtlcoder: Outperforming gpt-3.5 in design rtl generation with our open-source dataset and lightweight solution","author":"Liu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3649329.3657353"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCAD57390.2023.10323812"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00913"},{"key":"ref12","volume-title":"GPT-4o","year":"2024"},{"key":"ref13","article-title":"Visual instruction tuning","volume-title":"Conference on Neural Information Processing Systems (NIPS)","volume":"36","author":"Liu","year":"2024"},{"key":"ref14","volume-title":"Llava-next: Improved reasoning, ocr, and world knowledge","author":"Liu","year":"2024"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73397-0_18"},{"key":"ref16","article-title":"Autogen: Enabling next-gen llm applications via multi-agent conversation","author":"Wu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref17","volume":"abs\/2306.14824","author":"Peng","year":"2023","journal-title":"Kosmos-2: Grounding multimodal large language models to the world"},{"key":"ref18","article-title":"Neva: Nemo vision and language assistant","volume-title":"NVIDIA","year":"2024"},{"key":"ref19","article-title":"Phi-3 technical report: A highly capable language model locally on your phone","author":"Abdin","year":"2024","journal-title":"arXiv preprint"},{"key":"ref20","article-title":"Paligemma: A lightweight open vision-language model (vlm)","volume-title":"Google AI Blog","year":"2024"},{"key":"ref21","article-title":"Fuyu-8b: A multimodal architecture for ai agents","volume-title":"Adept AI","author":"Bavishi","year":"2024"},{"key":"ref22","article-title":"X-vila: Cross-modality alignment for large language model","author":"Ye","year":"2024","journal-title":"arXiv preprint"},{"key":"ref23","article-title":"Mmbench: Is your multimodal model an all-around player?","author":"Liu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref24","article-title":"Mm-vet: Evaluating large multimodal models for integrated capabilities","author":"Yu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref25","article-title":"Lamm: Language-assisted multimodal instruction-tuning dataset, framework, and benchmark","author":"Yin","year":"2023","journal-title":"arXiv preprint"},{"key":"ref26","article-title":"Seed-bench: Benchmarking multimodal llms with generative comprehension","author":"Li","year":"2023","journal-title":"arXiv preprint"},{"key":"ref27","article-title":"Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts","author":"Lu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02520"},{"key":"ref29","volume-title":"LLaMa-3.2","year":"2024"},{"key":"ref30","volume-title":"Ollama","year":"2024"},{"key":"ref31","volume-title":"NVIDIA NIM","year":"2024"},{"key":"ref32","volume-title":"Azure OpenAI","year":"2024"},{"key":"ref33","volume-title":"LLaMa-3","year":"2024"},{"key":"ref34","article-title":"Mistral 7b","author":"Jiang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref35","volume-title":"Fastchat","author":"Liang","year":"2023"},{"key":"ref36","volume-title":"Yi-34b"}],"event":{"name":"2025 Design, Automation &amp; Test in Europe Conference (DATE)","location":"Lyon, France","start":{"date-parts":[[2025,3,31]]},"end":{"date-parts":[[2025,4,2]]}},"container-title":["2025 Design, Automation &amp;amp; Test in Europe Conference (DATE)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10992638\/10992588\/10992791.pdf?arnumber=10992791","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,22]],"date-time":"2025-05-22T05:32:13Z","timestamp":1747891933000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10992791\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3,31]]},"references-count":36,"URL":"https:\/\/doi.org\/10.23919\/date64628.2025.10992791","relation":{},"subject":[],"published":{"date-parts":[[2025,3,31]]}}}