{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T12:40:26Z","timestamp":1766061626843,"version":"3.48.0"},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iros60139.2025.11246314","type":"proceedings-article","created":{"date-parts":[[2025,11,27]],"date-time":"2025-11-27T18:54:45Z","timestamp":1764269685000},"page":"329-334","source":"Crossref","is-referenced-by-count":0,"title":["Vision-Language Guided Adaptive Robot Action Planning: Responding to Intermediate Results and Implicit Human Intentions"],"prefix":"10.1109","author":[{"given":"Weihao","family":"Cai","sequence":"first","affiliation":[{"name":"Ritsumeikan University,College of Information Science and Engineering,Osaka,Japan"}]},{"given":"Yoshiki","family":"Mori","sequence":"additional","affiliation":[{"name":"The University of Osaka,Graduate School of Engineering Science,Osaka,Japan"}]},{"given":"Nobutaka","family":"Shimada","sequence":"additional","affiliation":[{"name":"Ritsumeikan University,College of Information Science and Engineering,Osaka,Japan"}]}],"member":"263","reference":[{"key":"ref1","first-page":"465","article-title":"Pilco: A model-based and data-efficient approach to policy search","volume-title":"Proceedings of the 28th International Conference on machine learning (ICML-11)","author":"Deisenroth"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/9481.003.0013"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/s41315-019-00103-5"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-control-100819-063206"},{"key":"ref5","first-page":"19730","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"International conference on machine learning","author":"Li"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3310935"},{"article-title":"Video chatcaptioner: Towards enriched spatiotemporal descriptions","year":"2023","author":"Chen","key":"ref7"},{"key":"ref8","article-title":"Chatgpt asks, blip-2 answers: automatic questioning towards enriched visual descriptions","volume":"2024","author":"Zhu","year":"2024","journal-title":"Transactions on Machine Learning Research"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.755"},{"article-title":"Llm as a robotic brain: Unifying egocentric memory and control","year":"2023","author":"Mai","key":"ref10"},{"article-title":"V2pe: Improving multimodal long-context capability of vision-language models with variable visual position encoding","year":"2024","author":"Ge","key":"ref11"},{"article-title":"Prism: A framework for decoupling and assessing the capabilities of vlms","year":"2024","author":"Qiao","key":"ref12"},{"article-title":"Ferret: Refer and ground anything anywhere at any granularity","year":"2023","author":"You","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487173"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3187614"},{"volume-title":"Social learning: an introduction to mechanisms, methods, and models","year":"2013","author":"Hoppitt","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/IWIS62722.2024.10706068"},{"key":"ref18","first-page":"24993","article-title":"Fine-grained visual prompting","volume":"36","author":"Yang","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Set-of-mark prompting unleashes extraordinary visual grounding in gpt-4v","year":"2023","author":"Yang","key":"ref19"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00721"},{"article-title":"Moka: Open-vocabulary robotic manipulation through mark-based visual prompting","volume-title":"First Workshop on Vision-Language Models for Navigation and Manipulation at ICRA 2024","author":"Liu","key":"ref21"},{"article-title":"Look before you leap: Unveiling the power of gpt-4v in robotic vision-language planning","year":"2023","author":"Hu","key":"ref22"},{"key":"ref23","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume":"35","author":"Wei","year":"2022","journal-title":"Advances in neural information processing systems"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.82"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.20"}],"event":{"name":"2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","start":{"date-parts":[[2025,10,19]]},"location":"Hangzhou, China","end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11245651\/11245652\/11246314.pdf?arnumber=11246314","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T12:36:10Z","timestamp":1766061370000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11246314\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/iros60139.2025.11246314","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}