{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T07:11:31Z","timestamp":1767337891809,"version":"3.27.0"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,9,10]],"date-time":"2024-09-10T00:00:00Z","timestamp":1725926400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,9,10]],"date-time":"2024-09-10T00:00:00Z","timestamp":1725926400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,9,10]]},"DOI":"10.1109\/etfa61755.2024.10711022","type":"proceedings-article","created":{"date-parts":[[2024,10,16]],"date-time":"2024-10-16T17:51:22Z","timestamp":1729101082000},"page":"1-4","source":"Crossref","is-referenced-by-count":3,"title":["Towards a Benchmark of Multimodal Large Language Models for Industrial Engineering"],"prefix":"10.1109","volume":"36","author":[{"given":"Markus Michael","family":"Geipel","sequence":"first","affiliation":[{"name":"Siemens AG Digital Industries - Factory Automation,Munich,81739"}]}],"member":"263","reference":[{"key":"ref1","article-title":"The dawn of lmms: Pre-liminary explorations with gpt-4v(ision)","author":"Yang","year":"2023","journal-title":"arXiv"},{"key":"ref2","article-title":"Large language model based multi-agents: A survey of progress and challenges","author":"Guo","year":"2024","journal-title":"arXiv"},{"key":"ref3","article-title":"Gpt-4 technical report","author":"OpenAI","year":"2024","journal-title":"arXiv"},{"key":"ref4","article-title":"Visual instruction tuning","author":"Liu","year":"2023","journal-title":"arXiv"},{"key":"ref5","article-title":"Seed-bench: Bench-marking multimodal llms with generative comprehen-sion","author":"Li","year":"2023","journal-title":"arXiv"},{"key":"ref6","article-title":"Exploring the reasoning abilities of multimodal large language models (mllms): A comprehensive survey on emerging trends in multimodal reasoning","author":"Wang","year":"2024","journal-title":"arXiv"},{"key":"ref7","article-title":"Mm-bigbench: Eval-uating multimodal models on multimodal content com-prehension tasks","author":"Yang","year":"2023","journal-title":"arXiv"},{"key":"ref8","article-title":"Mm-react: Prompting chatgpt for multimodal reasoning and action","author":"Yang","year":"2023","journal-title":"arXiv"},{"key":"ref9","first-page":"5484","article-title":"M3exam: A multilingual, multimodal, multilevel benchmark for ex-amining large language models","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Zhang","year":"2023"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00914"},{"key":"ref11","article-title":"Mmbench: Is your multi-modal model an all-around player?","author":"Liu","year":"2023","journal-title":"arXiv"},{"key":"ref12","article-title":"Lvlm-ehub: A compre-hensive evaluation benchmark for large vision-language models","author":"Xu","year":"2023","journal-title":"arXiv"},{"key":"ref13","article-title":"Mme: A comprehensive evaluation benchmark for multimodal large language models","author":"Fu","year":"2024","journal-title":"arXiv"},{"key":"ref14","article-title":"Lamm: Language-assisted multi-modal instruction-tuning dataset, frame-work, and benchmark","author":"Yin","year":"2023","journal-title":"arXiv"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00166"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref18","article-title":"Comparative analysis of gpt-4vision, gpt-4 and open source llms in clinical diagnostic accuracy: A benchmark against human expertise","author":"Han","year":"2023","journal-title":"medRxiv"},{"key":"ref19","article-title":"Anomalygpt: Detecting industrial anomalies using large vision-language mod-els","author":"Gu","year":"2023","journal-title":"arXiv"},{"key":"ref20","article-title":"Llm-based control code generation using image recognition","author":"Koziolek","year":"2023","journal-title":"arXiv preprint"},{"key":"ref21","article-title":"Incorporating large language models into production systems for en-hanced task automation and flexibility","author":"Xia","year":"2024","journal-title":"arXiv preprint"},{"journal-title":"Siemens Xcelerator: Scaling roll-out of generative AI with Siemens industrial copilot","year":"2024","author":"Siemens","key":"ref22"}],"event":{"name":"2024 IEEE 29th International Conference on Emerging Technologies and Factory Automation (ETFA)","start":{"date-parts":[[2024,9,10]]},"location":"Padova, Italy","end":{"date-parts":[[2024,9,13]]}},"container-title":["2024 IEEE 29th International Conference on Emerging Technologies and Factory Automation (ETFA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10710336\/10710347\/10711022.pdf?arnumber=10711022","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,17]],"date-time":"2024-10-17T06:29:39Z","timestamp":1729146579000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10711022\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,10]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/etfa61755.2024.10711022","relation":{},"subject":[],"published":{"date-parts":[[2024,9,10]]}}}