{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T20:08:18Z","timestamp":1766088498877,"version":"3.44.0"},"reference-count":62,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,19]],"date-time":"2025-05-19T00:00:00Z","timestamp":1747612800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,5,19]]},"DOI":"10.1109\/icra55743.2025.11128807","type":"proceedings-article","created":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T17:28:56Z","timestamp":1756834136000},"page":"8972-8979","source":"Crossref","is-referenced-by-count":3,"title":["In-Context Learning Enables Robot Action Prediction in LLMs"],"prefix":"10.1109","author":[{"given":"Yida","family":"Yin","sequence":"first","affiliation":[{"name":"University of California,Berkeley"}]},{"given":"Zekai","family":"Wang","sequence":"additional","affiliation":[{"name":"University of California,Berkeley"}]},{"given":"Yuvan","family":"Sharma","sequence":"additional","affiliation":[{"name":"University of California,Berkeley"}]},{"given":"Dantong","family":"Niu","sequence":"additional","affiliation":[{"name":"University of California,Berkeley"}]},{"given":"Trevor","family":"Darrell","sequence":"additional","affiliation":[{"name":"University of California,Berkeley"}]},{"given":"Roei","family":"Herzig","sequence":"additional","affiliation":[{"name":"University of California,Berkeley"}]}],"member":"263","reference":[{"journal-title":"arXiv preprint","article-title":"Gpt-4 technical report","year":"2024","key":"ref1"},{"volume-title":"Claude-3.5-sonnet","year":"2024","key":"ref2"},{"journal-title":"arXiv preprint","article-title":"The llama 3 herd of models","year":"2024","key":"ref3"},{"key":"ref4","article-title":"Language models are few-shot learners","author":"Brown","year":"2020","journal-title":"NeurIPS"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.759"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.78"},{"key":"ref7","article-title":"Chain-of-thought prompting elicits reasoning in large language models","author":"Wei","year":"2022","journal-title":"NeurIPS"},{"key":"ref8","article-title":"What makes good in-context examples for gpt-3?","author":"Liu","year":"2021","journal-title":"ACL"},{"key":"ref9","article-title":"Calibrate before use: Improving few-shot performance of language models","author":"Zhao","year":"2021","journal-title":"ICML"},{"key":"ref10","article-title":"Long-context llms struggle with long in-context learning","author":"Li","year":"2024","journal-title":"TMLR"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00638"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01692"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2974707"},{"journal-title":"arXiv preprint","article-title":"Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context","year":"2024","key":"ref14"},{"key":"ref15","article-title":"Visual instruction tuning","author":"Liu","year":"2024","journal-title":"NeurIPS"},{"key":"ref16","article-title":"Moka: Open-vocabulary robotic manipulation through mark-based visual prompting","author":"Liu","year":"2024","journal-title":"RSS"},{"key":"ref17","article-title":"Look before you leap: Unveiling the power of gpt-4v in robotic vision-language planning","author":"Hu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref18","article-title":"Learning transferable visual models from natural language supervision","author":"Radford","year":"2021","journal-title":"I CML"},{"key":"ref19","article-title":"Simple open-vocabulary object detection with vision transformers","author":"Minderer","year":"2022","journal-title":"ECCV"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref22","article-title":"Language models as zero-shot planners: Extracting actionable knowledge for embodied agents","author":"Huang","year":"2022","journal-title":"ICML"},{"key":"ref23","article-title":"Voxposer: Composable 3d value maps for robotic manipulation with language models","author":"Huang","year":"2023","journal-title":"CoRL"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10161317"},{"key":"ref25","article-title":"Do as i can, not as i say: Grounding language in robotic affordances","author":"Ahn","year":"2023","journal-title":"CoRL"},{"key":"ref26","article-title":"Socratic models: Composing zero-shot multimodal reasoning with language","author":"Zeng","year":"2023","journal-title":"ICLR"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10161534"},{"key":"ref28","article-title":"Manipulate-anything: Automating real-world robots using vision-language models","author":"Duan","year":"2024","journal-title":"CoRL"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160591"},{"key":"ref30","article-title":"Voyager: An open-ended embodied agent with large language models","author":"Wang","year":"2024","journal-title":"TMLR"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160969"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/s10514-023-10131-7"},{"key":"ref33","article-title":"Language to rewards for robotic skill synthesis","author":"Yu","year":"2023","journal-title":"CoRL"},{"key":"ref34","article-title":"Rekep: Spatio-temporal reasoning of relational keypoint constraints for robotic manipulation","author":"Huang","year":"2024","journal-title":"oRL"},{"key":"ref35","article-title":"Mt-opt: Continuous multi-task robotic reinforcement learning at scale","author":"Kalashnikov","year":"2021","journal-title":"arXiv preprint"},{"key":"ref36","article-title":"Bc-z: Zero-shot task generalization with robotic imitation learning","author":"Jang","year":"2022","journal-title":"CoRL"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2024.xx.096"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/cdc56724.2024.10885862"},{"key":"ref39","article-title":"Rt-2: Vision-language-action models transfer web knowledge to robotic control","author":"Brohan","year":"2023","journal-title":"oRL"},{"journal-title":"ICRA","article-title":"Open x-embodiment: Robotic learning datasets and rt-x models","year":"2024","key":"ref40"},{"volume-title":"Intro-ducing rfm-l: Giving robots human-like reasoning capabil-ities","year":"2024","author":"Sohn","key":"ref41"},{"key":"ref42","article-title":"Llarva: Vision-action instruction tuning enhances robot learning","author":"Niu","year":"2024","journal-title":"CoRL"},{"key":"ref43","article-title":"Openvla: An open-source vision-language-action model","author":"Kim","year":"2024","journal-title":"oRL"},{"key":"ref44","article-title":"Dinov2: Learning robust visual features without supervision","author":"Oquab","year":"2024","journal-title":"arXiv preprint"},{"key":"ref45","article-title":"Llara: Supercharging robot learning data for vision-language policy","author":"Li","year":"2024","journal-title":"CoRL"},{"key":"ref46","article-title":"Scaling proprioceptive-visual learning with heterogeneous pre-trained transformers","author":"Wang","year":"2024","journal-title":"NeurIPS"},{"key":"ref47","article-title":"Self-generated in-context learning: Leveraging auto-regressive language models as a demonstration generator","volume-title":"NAACL workshop","author":"Kim","year":"2022"},{"key":"ref48","article-title":"Evaluating large language models trained on code","author":"Chen","year":"2021","journal-title":"ICML"},{"key":"ref49","article-title":"Teaching algorithmic reasoning via in-context learning","author":"Zhou","year":"2023","journal-title":"NeurIPS"},{"key":"ref50","article-title":"Multimodal task vectors enable many-shot multimodal in-context learning","author":"Huang","year":"2024","journal-title":"NeurIPS"},{"key":"ref51","article-title":"Gpt-driver: Learning to drive with gpt","volume-title":"NeurIPS workshop","author":"Mao","year":"2023"},{"key":"ref52","article-title":"Large language models as general pattern machines","author":"Mirchandani","year":"2023","journal-title":"CoRL"},{"key":"ref53","article-title":"Incoro: In-context learning for robotics control with feedback loops","author":"Zhu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3140817"},{"key":"ref55","article-title":"Perceiver-actor: A multi-task transformer for robotic manipulation","author":"Shridhar","year":"2022","journal-title":"CoRL"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2024.XX.090"},{"key":"ref57","article-title":"Pre-training auto-regressive robotic models with 4d representations","author":"Niu","year":"2025","journal-title":"arXiv preprint"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2024.XX.055"},{"key":"ref59","article-title":"Act3d: 3d feature field transformers for multi-task robotic manipulation","author":"Gervet","year":"2023","journal-title":"CORL"},{"key":"ref60","article-title":"Robot learning with sensorimotor pre-training","author":"Radosavovic","year":"2023","journal-title":"CoRL"},{"volume-title":"Polymetis","year":"2021","author":"Lin","key":"ref61"},{"key":"ref62","article-title":"Qwen2 technical report","author":"Yang","year":"2024","journal-title":"arXiv preprint"}],"event":{"name":"2025 IEEE International Conference on Robotics and Automation (ICRA)","start":{"date-parts":[[2025,5,19]]},"location":"Atlanta, GA, USA","end":{"date-parts":[[2025,5,23]]}},"container-title":["2025 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11127273\/11127223\/11128807.pdf?arnumber=11128807","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T06:09:05Z","timestamp":1756879745000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11128807\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,19]]},"references-count":62,"URL":"https:\/\/doi.org\/10.1109\/icra55743.2025.11128807","relation":{},"subject":[],"published":{"date-parts":[[2025,5,19]]}}}