{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T07:02:22Z","timestamp":1775199742478,"version":"3.50.1"},"reference-count":38,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434641","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-7","source":"Crossref","is-referenced-by-count":0,"title":["Robot Confirmation Generation and Action Planning Using Long-context Q-Former Integrated with Multimodal LLM"],"prefix":"10.1109","author":[{"given":"Chiori","family":"Hori","sequence":"first","affiliation":[{"name":"Mitsubishi Electric Research Laboratories (MERL),Cambridge,MA,USA"}]},{"given":"Yoshiki","family":"Masuyama","sequence":"additional","affiliation":[{"name":"Mitsubishi Electric Research Laboratories (MERL),Cambridge,MA,USA"}]},{"given":"Siddarth","family":"Jain","sequence":"additional","affiliation":[{"name":"Mitsubishi Electric Research Laboratories (MERL),Cambridge,MA,USA"}]},{"given":"Radu","family":"Corcodel","sequence":"additional","affiliation":[{"name":"Mitsubishi Electric Research Laboratories (MERL),Cambridge,MA,USA"}]},{"given":"Devesh","family":"Jha","sequence":"additional","affiliation":[{"name":"Mitsubishi Electric Research Laboratories (MERL),Cambridge,MA,USA"}]},{"given":"Diego","family":"Romeres","sequence":"additional","affiliation":[{"name":"Mitsubishi Electric Research Laboratories (MERL),Cambridge,MA,USA"}]},{"given":"Jonathan Le","family":"Roux","sequence":"additional","affiliation":[{"name":"Mitsubishi Electric Research Laboratories (MERL),Cambridge,MA,USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2013.6697215"},{"key":"ref2","first-page":"67","article-title":"A cognitive system for understanding human manipulation actions","volume":"3","author":"Yang","year":"2014","journal-title":"Advances in Cognitive Systems"},{"key":"ref3","article-title":"Robot learning manipulation action plans by","volume-title":"unconstrained videos from the world wide web","author":"Yang"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8462891"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2022.XVIII.026"},{"key":"ref6","article-title":"R3M: A universal visual representation for robot manipulation","author":"Nair","year":"2022","journal-title":"arXiv preprint arXiv:2203.12601"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460857"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561308"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1983"},{"key":"ref10","article-title":"CLIPort: What and where pathways for robotic manipulation","volume-title":"Proc. CoRL","author":"Shridhar"},{"key":"ref11","article-title":"Do as I can, not as I say: Grounding language in robotic affordances","author":"Ahn","year":"2022","journal-title":"arXiv preprint arXiv:2204.01691"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10161317"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10610981"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s10514-023-10133-5"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2024.3410155"},{"key":"ref16","article-title":"Planning with large language models via corrective re-prompting","volume-title":"NeurIPS Foundation Models for Decision Making Workshop","author":"Raman"},{"key":"ref17","article-title":"BLIP-2: Bootstrapping languageimage pre-training with frozen image encoders and large language models","volume-title":"Proc. ICML","author":"Li"},{"key":"ref18","article-title":"Human action understanding-based robot planning using multimodal LLM","volume-title":"Proc. ICRA Workshop for \u201cCooking Robotics: Perception and motion planning","author":"Kambara"},{"key":"ref19","article-title":"Towards 11 m-centric multimodal fusion: A survey on integration strategies and techniques","author":"An"},{"key":"ref20","article-title":"The evolution of multimodal model architectures","author":"Wadekar"},{"key":"ref21","article-title":"Interactive robot action replanning using multimodal 11 m trained from human demonstration videos","volume-title":"Proc. ICASSP","author":"Hori"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2018.2882140"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCB.2011.2172420"},{"key":"ref24","article-title":"Kitchenvla: Iterative visionlanguage corrections for robotic execution of human tasks","volume-title":"Proc. ICRA Workshop for \u201c Safely Leveraging Vision-Language Foundation Models in Robotics: Challenges and Opportunities","author":"Lu"},{"key":"ref25","article-title":"Maniskill3: GPU parallelized simulation and rendering for generalizable embodied AI","volume-title":"Proc. ICRA Workshop for \u201c7th Robot Learning Workshop: Towards Robots with Human-Level Abilities","author":"Tao"},{"key":"ref26","article-title":"Flare: Robot learning with implicit world modeling","author":"Zheng","year":"2025","journal-title":"arXiv preprint arXiv:2505.15659"},{"key":"ref27","article-title":"WorldEval: World model as real-world robot policies evaluator","author":"Li","year":"2025","journal-title":"arXiv preprint arXiv:2505.19017"},{"key":"ref28","article-title":"VideoLLaMA 3: Frontier multimodal foundation models for image and video understanding","author":"Zhang","year":"2025","journal-title":"arXiv preprint arXiv:2501.13106"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12342"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"ref31","article-title":"OPT: Open Pre-trained Transformer Language Models","author":"Zhang","year":"2022","journal-title":"arXiv preprint arXiv:2205.01068"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-demo.49"},{"key":"ref33","article-title":"video-SALMONN: Speech-enhanced audio-visual large language models","volume-title":"Proc. ICML","author":"Sun"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01563"},{"key":"ref35","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. ICML","author":"Radford"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-698"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1162"},{"key":"ref38","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv preprint arXiv:1810.04805"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434641.pdf?arnumber=11434641","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:57:41Z","timestamp":1775192261000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434641\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":38,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434641","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}