{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T14:28:08Z","timestamp":1766068088493,"version":"3.40.3"},"publisher-location":"Cham","reference-count":66,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031732317"},{"type":"electronic","value":"9783031732324"}],"license":[{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73232-4_2","type":"book-chapter","created":{"date-parts":[[2024,9,29]],"date-time":"2024-09-29T06:01:53Z","timestamp":1727589713000},"page":"20-38","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":18,"title":["Octopus: Embodied Vision-Language Programmer from\u00a0Environmental Feedback"],"prefix":"10.1007","author":[{"given":"Jingkang","family":"Yang","sequence":"first","affiliation":[]},{"given":"Yuhao","family":"Dong","sequence":"additional","affiliation":[]},{"given":"Shuai","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Bo","family":"Li","sequence":"additional","affiliation":[]},{"given":"Ziyue","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Haoran","family":"Tan","sequence":"additional","affiliation":[]},{"given":"Chencheng","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Jiamu","family":"Kang","sequence":"additional","affiliation":[]},{"given":"Yuanhan","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Kaiyang","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Ziwei","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,30]]},"reference":[{"key":"2_CR1","unstructured":"Grand theft auto v (2014)"},{"key":"2_CR2","unstructured":"Minecraft (2023)"},{"key":"2_CR3","unstructured":"Ahn, M., et al.: Do as i can and not as i say: grounding language in robotic affordances. arXiv preprint arXiv:2204.01691 (2022)"},{"key":"2_CR4","first-page":"23716","volume":"35","author":"JB Alayrac","year":"2022","unstructured":"Alayrac, J.B., et al.: Flamingo: a visual language model for few-shot learning. Adv. Neural. Inf. Process. Syst. 35, 23716\u201323736 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"2_CR5","unstructured":"Awadalla, A., et al.: OpenFlamingo: an open-source framework for training large autoregressive vision-language models. arXiv preprint arXiv:2308.01390 (2023)"},{"key":"2_CR6","unstructured":"Baker, B., et al.: Video pretraining (VPT): learning to act by watching unlabeled online videos (2022)"},{"key":"2_CR7","doi-asserted-by":"publisher","unstructured":"Bellemare, M.G., Naddaf, Y., Veness, J., Bowling, M.: The arcade learning environment: an evaluation platform for general agents. J. Artif. Intell. Res. 47, 253\u2013279 (2013). https:\/\/doi.org\/10.1613\/jair.3912","DOI":"10.1613\/jair.3912"},{"key":"2_CR8","doi-asserted-by":"crossref","unstructured":"Billard, A., Kragic, D.: Trends and challenges in robot manipulation. Science 364(6446), eaat8414 (2019)","DOI":"10.1126\/science.aat8414"},{"key":"2_CR9","unstructured":"Brockman, G., et al.: OpenAI gym. arXiv preprint arXiv:1606.01540 (2016)"},{"key":"2_CR10","unstructured":"Brohan, A., et\u00a0al.: RT-2: vision-language-action models transfer web knowledge to robotic control. arXiv preprint arXiv:2307.15818 (2023)"},{"key":"2_CR11","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown, T.: Language models are few-shot learners. Adv. Neural. Inf. Process. Syst. 33, 1877\u20131901 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"2_CR12","doi-asserted-by":"crossref","unstructured":"Chang, A., et al.: Matterport3D: learning from RGB-D data in indoor environments. arXiv preprint arXiv:1709.06158 (2017)","DOI":"10.1109\/3DV.2017.00081"},{"key":"2_CR13","unstructured":"Chen, J., et al.: RoboScript: code generation for free-form manipulation tasks across real and simulation (2024)"},{"key":"2_CR14","unstructured":"Chen, L., et al.: Language models are visual reasoning coordinators. In: ICLR 2023 Workshop on Mathematical and Empirical Understanding of Foundation Models (2023)"},{"key":"2_CR15","unstructured":"Chiang, W.L., et\u00a0al.: Vicuna: an open-source chatbot impressing GPT-4 with 90%* ChatGPT quality (2023). See https:\/\/vicunalmsys.org. Accessed 14 Apr 2023"},{"key":"2_CR16","unstructured":"Dai, W., et al.: InstructBLIP: towards general-purpose vision-language models with instruction tuning. arXiv preprint arXiv:2305.06500 (2023)"},{"key":"2_CR17","doi-asserted-by":"crossref","unstructured":"Das, A., Datta, S., Gkioxari, G., Lee, S., Parikh, D., Batra, D.: Embodied question answering (2017)","DOI":"10.1109\/CVPR.2018.00008"},{"key":"2_CR18","unstructured":"Driess, D., et al.: PaLM-E: an embodied multimodal language model. arXiv preprint arXiv:2303.03378 (2023)"},{"key":"2_CR19","doi-asserted-by":"publisher","first-page":"255","DOI":"10.1146\/annurev.psych.59.103006.093629","volume":"59","author":"JSB Evans","year":"2008","unstructured":"Evans, J.S.B.: Dual-processing accounts of reasoning, judgment, and social cognition. Annu. Rev. Psychol. 59, 255\u2013278 (2008)","journal-title":"Annu. Rev. Psychol."},{"key":"2_CR20","unstructured":"Fan, L., et al.: MineDojo: building open-ended embodied agents with internet-scale knowledge (2022)"},{"key":"2_CR21","doi-asserted-by":"crossref","unstructured":"Fu, H., et al.: RFUniverse: a multiphysics simulation platform for embodied AI (2023)","DOI":"10.15607\/RSS.2023.XIX.087"},{"key":"2_CR22","doi-asserted-by":"crossref","unstructured":"Fu, Z., Zhao, T.Z., Finn, C.: Mobile ALOHA: learning bimanual mobile manipulation with low-cost whole-body teleoperation. arXiv (2024)","DOI":"10.15607\/RSS.2023.XIX.016"},{"key":"2_CR23","unstructured":"Gao, X., Gong, R., Shu, T., Xie, X., Wang, S., Zhu, S.C.: VRKitchen: an interactive 3D virtual environment for task-oriented learning (2019)"},{"key":"2_CR24","doi-asserted-by":"crossref","unstructured":"Gu, S., Holly, E., Lillicrap, T., Levine, S.: Deep reinforcement learning for robotic manipulation with asynchronous off-policy updates. In: 2017 IEEE International Conference on Robotics and Automation (ICRA), pp. 3389\u20133396. IEEE (2017)","DOI":"10.1109\/ICRA.2017.7989385"},{"key":"2_CR25","doi-asserted-by":"crossref","unstructured":"Gupta, T., Kembhavi, A.: Visual programming: compositional visual reasoning without training. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14953\u201314962 (2023)","DOI":"10.1109\/CVPR52729.2023.01436"},{"key":"2_CR26","unstructured":"Huang, S., Jiang, Z., Dong, H., Qiao, Y., Gao, P., Li, H.: Instruct2Act: mapping multi-modality instructions to robotic actions with large language model. arXiv preprint arXiv:2305.11176 (2023)"},{"key":"2_CR27","unstructured":"Huang, W., Wang, C., Zhang, R., Li, Y., Wu, J., Fei-Fei, L.: VoxPoser: composable 3D value maps for robotic manipulation with language models. arXiv preprint arXiv:2307.05973 (2023)"},{"issue":"2","key":"2_CR28","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3054912","volume":"50","author":"A Hussein","year":"2017","unstructured":"Hussein, A., Gaber, M.M., Elyan, E., Jayne, C.: Imitation learning: a survey of learning methods. ACM Comput. Surv. (CSUR) 50(2), 1\u201335 (2017)","journal-title":"ACM Comput. Surv. (CSUR)"},{"key":"2_CR29","unstructured":"Kahneman, D.: Thinking, Fast and Slow. Macmillan, New York (2011)"},{"key":"2_CR30","unstructured":"Kolve, E., et\u00a0al.: AI2-THOR: an interactive 3D environment for visual AI. arXiv preprint arXiv:1712.05474 (2017)"},{"key":"2_CR31","unstructured":"Li, B., Zhang, Y., Chen, L., Wang, J., Yang, J., Liu, Z.: Otter: a multi-modal model with in-context instruction tuning. arXiv preprint arXiv:2305.03726 (2023)"},{"key":"2_CR32","unstructured":"Li, C., et\u00a0al.: BEHAVIOR-1K: a benchmark for embodied AI with 1,000 everyday activities and realistic simulation. In: Conference on Robot Learning, pp. 80\u201393. PMLR (2023)"},{"key":"2_CR33","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597 (2023)"},{"key":"2_CR34","doi-asserted-by":"crossref","unstructured":"Lin, K., Agia, C., Migimatsu, T., Pavone, M., Bohg, J.: Text2Motion: from natural language instructions to feasible plans. arXiv preprint arXiv:2303.12153 (2023)","DOI":"10.1007\/s10514-023-10131-7"},{"key":"2_CR35","doi-asserted-by":"crossref","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning (2023)","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"2_CR36","unstructured":"Liu, H., et al.: LLaVA-NeXT: improved reasoning, OCR, and world knowledge, January 2024. https:\/\/llava-vl.github.io\/blog\/2024-01-30-llava-next\/"},{"key":"2_CR37","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning (2023)"},{"key":"2_CR38","unstructured":"Liu, Z., Dong, Y., Rao, Y., Zhou, J., Lu, J.: Chain-of-Spot: interactive reasoning improves large vision-language models. arXiv preprint arXiv:2403.12966 (2024)"},{"key":"2_CR39","doi-asserted-by":"crossref","unstructured":"Mao, H., et al.: SEIHAI: a sample-efficient hierarchical AI for the MineRL competition (2021)","DOI":"10.1007\/978-3-030-94662-3_3"},{"key":"2_CR40","unstructured":"MosaicML: MPT-7B (2023). https:\/\/www.mosaicml.com\/blog\/mpt-7b. Accessed 23 May 2023"},{"key":"2_CR41","unstructured":"Mu, Y., et al.: EmbodiedGPT: vision-language pre-training via embodied chain of thought. arXiv preprint arXiv:2305.15021 (2023)"},{"key":"2_CR42","first-page":"27730","volume":"35","author":"L Ouyang","year":"2022","unstructured":"Ouyang, L., et al.: Training language models to follow instructions with human feedback. Adv. Neural. Inf. Process. Syst. 35, 27730\u201327744 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"2_CR43","doi-asserted-by":"crossref","unstructured":"Park, J.S., O\u2019Brien, J.C., Cai, C.J., Morris, M.R., Liang, P., Bernstein, M.S.: Generative agents: interactive simulacra of human behavior. arXiv preprint arXiv:2304.03442 (2023)","DOI":"10.1145\/3586183.3606763"},{"key":"2_CR44","doi-asserted-by":"crossref","unstructured":"Puig, X., et al.: VirtualHome: simulating household activities via programs. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8494\u20138502 (2018)","DOI":"10.1109\/CVPR.2018.00886"},{"key":"2_CR45","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"issue":"8","key":"2_CR46","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., et al.: Language models are unsupervised multitask learners. OpenAI Blog 1(8), 9 (2019)","journal-title":"OpenAI Blog"},{"key":"2_CR47","doi-asserted-by":"crossref","unstructured":"Ramakrishnan, S.K., Jayaraman, D., Grauman, K.: An exploration of embodied visual exploration (2020)","DOI":"10.1007\/s11263-021-01437-z"},{"key":"2_CR48","unstructured":"Rana, K., Haviland, J., Garg, S., Abou-Chakra, J., Reid, I., Suenderhauf, N.: SayPlan: grounding large language models using 3D scene graphs for scalable task planning. arXiv preprint arXiv:2307.06135 (2023)"},{"key":"2_CR49","doi-asserted-by":"crossref","unstructured":"Savva, M., et\u00a0al.: Habitat: a platform for embodied AI research. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9339\u20139347 (2019)","DOI":"10.1109\/ICCV.2019.00943"},{"key":"2_CR50","unstructured":"Schick, T., et al.: ToolFormer: language models can teach themselves to use tools. arXiv preprint arXiv:2302.04761 (2023)"},{"key":"2_CR51","unstructured":"Schulman, J., Wolski, F., Dhariwal, P., Radford, A., Klimov, O.: Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347 (2017)"},{"key":"2_CR52","unstructured":"Shen, Y., Song, K., Tan, X., Li, D., Lu, W., Zhuang, Y.: HuggingGPT: solving AI tasks with ChatGPT and its friends in HuggingFace. arXiv preprint arXiv:2303.17580 (2023)"},{"key":"2_CR53","doi-asserted-by":"crossref","unstructured":"Sur\u00eds, D., Menon, S., Vondrick, C.: ViperGPT: visual inference via python execution for reasoning. arXiv preprint arXiv:2303.08128 (2023)","DOI":"10.1109\/ICCV51070.2023.01092"},{"key":"2_CR54","unstructured":"Touvron, H., et\u00a0al.: LLaMA: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"2_CR55","unstructured":"Wang, G., et al.: Voyager: an open-ended embodied agent with large language models (2023)"},{"key":"2_CR56","unstructured":"Wang, H., Liang, W., Gool, L.V., Wang, W.: Towards versatile embodied navigation (2022)"},{"key":"2_CR57","unstructured":"Wu, Y., Wu, Y., Gkioxari, G., Tian, Y.: Building generalizable agents with a realistic and rich 3D environment. arXiv preprint arXiv:1801.02209 (2018)"},{"key":"2_CR58","unstructured":"Wu, Z., Wang, Z., Xu, X., Lu, J., Yan, H.: Embodied task planning with large language models. arXiv preprint arXiv:2307.01848 (2023)"},{"key":"2_CR59","doi-asserted-by":"crossref","unstructured":"Xiang, F., et al.: SAPIEN: a simulated part-based interactive environment (2020)","DOI":"10.1109\/CVPR42600.2020.01111"},{"key":"2_CR60","doi-asserted-by":"crossref","unstructured":"Xie, B., et al.: FunQA: towards surprising video comprehension. arXiv preprint arXiv:2306.14899 (2023)","DOI":"10.1007\/978-3-031-73232-4_3"},{"key":"2_CR61","unstructured":"Yan, C., Misra, D., Bennnett, A., Walsman, A., Bisk, Y., Artzi, Y.: CHALET: Cornell house agent learning environment (2019)"},{"key":"2_CR62","unstructured":"Yu, W., et\u00a0al.: Language to rewards for robotic skill synthesis. arXiv preprint arXiv:2306.08647 (2023)"},{"key":"2_CR63","unstructured":"Yuan, H., et al.: Skill reinforcement learning and planning for open-world long-horizon tasks (2023)"},{"key":"2_CR64","unstructured":"Zheng, S., Liu, J., Feng, Y., Lu, Z.: Steve-Eye: equipping LLM-based embodied agents with visual perception in open worlds (2023)"},{"key":"2_CR65","doi-asserted-by":"publisher","unstructured":"Zhou, X., Girdhar, R., Joulin, A., Kr\u00e4henb\u00fchl, P., Misra, I.: Detecting twenty-thousand classes using image-level supervision. In: Avidan, S., Brostow, G., Cisse, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision \u2013 ECCV 2022. ECCV 2022. LNCS, vol. 13669, pp. 350\u2013368. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20077-9_21","DOI":"10.1007\/978-3-031-20077-9_21"},{"key":"2_CR66","unstructured":"Zhu, Y., et al.: robosuite: a modular simulation framework and benchmark for robot learning. arXiv preprint arXiv:2009.12293 (2020)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73232-4_2","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T21:15:14Z","timestamp":1732828514000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73232-4_2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,30]]},"ISBN":["9783031732317","9783031732324"],"references-count":66,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73232-4_2","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,9,30]]},"assertion":[{"value":"30 September 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}