{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,22]],"date-time":"2025-10-22T02:51:46Z","timestamp":1761101506231,"version":"3.44.0"},"reference-count":28,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,8,26]],"date-time":"2025-08-26T00:00:00Z","timestamp":1756166400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,8,26]],"date-time":"2025-08-26T00:00:00Z","timestamp":1756166400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,8,26]]},"DOI":"10.1109\/cog64752.2025.11114183","type":"proceedings-article","created":{"date-parts":[[2025,8,19]],"date-time":"2025-08-19T18:06:42Z","timestamp":1755626802000},"page":"1-8","source":"Crossref","is-referenced-by-count":1,"title":["Can Multimodal LLMs Reason About Stability? An Exploratory Study with Insights from the LLMs4PCG Challenge"],"prefix":"10.1109","author":[{"given":"Mury F.","family":"Dewantoro","sequence":"first","affiliation":[{"name":"Graduate School of Information Science and Engineering, Ritsumeikan University,Ibaraki, Osaka,Japan"}]},{"given":"Febri","family":"Abdullah","sequence":"additional","affiliation":[{"name":"Independent researcher"}]},{"given":"Yi","family":"Xia","sequence":"additional","affiliation":[{"name":"Graduate School of Information Science and Engineering, Ritsumeikan University,Ibaraki, Osaka,Japan"}]},{"given":"Ibrahim","family":"Khan","sequence":"additional","affiliation":[{"name":"Graduate School of Information Science and Engineering, Ritsumeikan University,Ibaraki, Osaka,Japan"}]},{"given":"Ruck","family":"Thawonmas","sequence":"additional","affiliation":[{"name":"Graduate School of Information Science and Engineering, Ritsumeikan University,Ibaraki, Osaka,Japan"}]},{"given":"Wenwen","family":"Ouyang","sequence":"additional","affiliation":[{"name":"College of Information Science and Engineering, Ritsumeikan University,Ibaraki, Osaka,Japan"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.324"},{"key":"ref2","article-title":"Chain of Thought Prompting Elicits Reasoning in Large Language Models","volume-title":"Advances in Neural Information Processing Systems","author":"Wei","year":"2022"},{"key":"ref3","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume-title":"Advances in Neural Information Processing Systems","volume":"35","author":"Ouyang","year":"2022"},{"key":"ref4","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv preprint arXiv"},{"key":"ref5","article-title":"Advancing reasoning in large language models: Promising methods and approaches","author":"Patil","year":"2025","journal-title":"arXiv preprint arXiv"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-024-00963-y"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"133","DOI":"10.18653\/v1\/2023.findings-eacl.10","article-title":"Learning the effects of physical actions in a multi-modal environment","volume-title":"Findings of the Association for Computational Linguistics: EACL 2023","author":"Dagan","year":"2023"},{"key":"ref8","article-title":"How far are we from intelligent visual deductive reasoning?","author":"Zhang","year":"2024","journal-title":"arXiv preprint arXiv"},{"key":"ref9","article-title":"Phyx: Does your model have the \u201cwits\u201d for physical reasoning?","author":"Shen","year":"2025","journal-title":"arXiv preprint arXiv"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CoG60054.2024.10645646"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1093\/nsr\/nwae403"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1609\/aiide.v20i1.31877"},{"key":"ref13","doi-asserted-by":"crossref","first-page":"9743","DOI":"10.18653\/v1\/2023.findings-emnlp.652","article-title":"NEWTON: Are large language models capable of physical reasoning?","volume-title":"Findings of the Association for Computational Linguistics: EMNLP 2023","author":"Wang","year":"2023"},{"key":"ref14","article-title":"A little less conversation, a little more action, please: Investigating the physical common-sense of 1 lms in a 3d embodied environment","volume-title":"arXiv preprint arXiv","author":"Mecattaf","year":"2024"},{"key":"ref15","article-title":"Llmphy: Complex physical reasoning using large language models and world models","author":"Cherian","year":"2024","journal-title":"arXiv preprint arXiv"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2025.3529117"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/InCIT63192.2024.10810536"},{"key":"ref18","article-title":"Gemma 3 technical report","author":"Kamath","year":"2025","journal-title":"CoRR"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"ref20","article-title":"Qwen2. 5-vl technical report","author":"Bai","year":"2025","journal-title":"arXiv preprint arXiv"},{"key":"ref21","first-page":"13321","article-title":"Nullshot prompting: Rethinking prompting large language models with hallucination","volume-title":"Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing","author":"Taveekitworachai","year":"Nov. 2024"},{"key":"ref22","article-title":"Large Language Models are Zero-Shot Reasoners","volume-title":"Advances in Neural Information Processing Systems","author":"Kojima","year":"2022"},{"key":"ref23","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref24","article-title":"To cot or not to cot? chain-of-thought helps mainly on math and symbolic reasoning","author":"Sprague","year":"2024","journal-title":"arXiv preprint arXiv"},{"key":"ref25","article-title":"U-niah: Unified rag and llm evaluation for long context needle-in-a-haystack","author":"Gao","year":"2025","journal-title":"arXiv preprint arXiv"},{"key":"ref26","doi-asserted-by":"crossref","first-page":"157","DOI":"10.1162\/tacl_a_00638","article-title":"Lost in the middle: How language models use long contexts","volume":"12","author":"Liu","year":"2024","journal-title":"Transactions of the Association for Computational Linguistics"},{"key":"ref27","article-title":"Can mllms reason in multimodality? emma: An enhanced multimodal reasoning benchmark","author":"Hao","year":"2025","journal-title":"arXiv preprint arXiv"},{"key":"ref28","article-title":"The future of mllm prompting is adaptive: A comprehensive experimental evaluation of prompt engineering methods for robust multimodal performance","author":"Mohanty","year":"2025","journal-title":"arXiv preprint arXiv"}],"event":{"name":"2025 IEEE Conference on Games (CoG)","start":{"date-parts":[[2025,8,26]]},"location":"Lisbon, Portugal","end":{"date-parts":[[2025,8,29]]}},"container-title":["2025 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11113832\/11113841\/11114183.pdf?arnumber=11114183","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,20]],"date-time":"2025-08-20T06:19:42Z","timestamp":1755670782000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11114183\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,26]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/cog64752.2025.11114183","relation":{},"subject":[],"published":{"date-parts":[[2025,8,26]]}}}