{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T16:53:40Z","timestamp":1775667220967,"version":"3.50.1"},"publisher-location":"Cham","reference-count":58,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031729690","type":"print"},{"value":"9783031729706","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,23]],"date-time":"2024-11-23T00:00:00Z","timestamp":1732320000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,23]],"date-time":"2024-11-23T00:00:00Z","timestamp":1732320000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72970-6_8","type":"book-chapter","created":{"date-parts":[[2024,11,22]],"date-time":"2024-11-22T10:49:55Z","timestamp":1732272595000},"page":"126-142","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":30,"title":["LLaVA-Plus: Learning to\u00a0Use Tools for\u00a0Creating Multimodal Agents"],"prefix":"10.1007","author":[{"given":"Shilong","family":"Liu","sequence":"first","affiliation":[]},{"given":"Hao","family":"Cheng","sequence":"additional","affiliation":[]},{"given":"Haotian","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Hao","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Feng","family":"Li","sequence":"additional","affiliation":[]},{"given":"Tianhe","family":"Ren","sequence":"additional","affiliation":[]},{"given":"Xueyan","family":"Zou","sequence":"additional","affiliation":[]},{"given":"Jianwei","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Hang","family":"Su","sequence":"additional","affiliation":[]},{"given":"Jun","family":"Zhu","sequence":"additional","affiliation":[]},{"given":"Lei","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Jianfeng","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Chunyuan","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,23]]},"reference":[{"key":"8_CR1","unstructured":"Langchain (2022). https:\/\/github.com\/hwchase17\/langchain"},{"key":"8_CR2","unstructured":"Alayrac, J.B., et\u00a0al.: Flamingo: a visual language model for few-shot learning. arXiv preprint arXiv:2204.14198 (2022)"},{"key":"8_CR3","unstructured":"Askell, A., et\u00a0al.: A general language assistant as a laboratory for alignment. arXiv preprint arXiv:2112.00861 (2021)"},{"key":"8_CR4","doi-asserted-by":"publisher","unstructured":"Awadalla, A., et al.: OpenFlamingo (2023). https:\/\/doi.org\/10.5281\/zenodo.7733589","DOI":"10.5281\/zenodo.7733589"},{"key":"8_CR5","unstructured":"Bitton, Y., et al.: Visit-bench: a benchmark for vision-language instruction following inspired by real-world use (2023)"},{"key":"8_CR6","doi-asserted-by":"crossref","unstructured":"Brooks, T., Holynski, A., Efros, A.A.: InstructPix2Pix: learning to follow image editing instructions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18392\u201318402 (2023)","DOI":"10.1109\/CVPR52729.2023.01764"},{"key":"8_CR7","unstructured":"Chen, K., Zhang, Z., Zeng, W., Zhang, R., Zhu, F., Zhao, R.: Shikra: unleashing multimodal llm\u2019s referential dialogue magic. arXiv preprint arXiv:2306.15195 (2023)"},{"key":"8_CR8","doi-asserted-by":"crossref","unstructured":"Chen, Y., et al.: Can pre-trained vision and language models answer visual information-seeking questions? (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.925"},{"key":"8_CR9","unstructured":"Dai, W., et al.: InstructBLIP: towards general-purpose vision-language models with instruction tuning. arXiv preprint arXiv:2305.06500 (2023)"},{"key":"8_CR10","doi-asserted-by":"crossref","unstructured":"Gan, Z., Li, L., Li, C., Wang, L., Liu, Z., Gao, J.: Vision-language pre-training: basics, recent advances, and future trends. Found. Trends\u00ae Comput. Graph. Vision (2022)","DOI":"10.1561\/9781638281337"},{"key":"8_CR11","unstructured":"Gao, P., et\u00a0al.: LLaMA-adapter v2: parameter-efficient visual instruction model. arXiv preprint arXiv:2304.15010 (2023)"},{"key":"8_CR12","doi-asserted-by":"crossref","unstructured":"Gupta, T., Kembhavi, A.: Visual programming: compositional visual reasoning without training. arXiv preprint arXiv:2211.11559 (2022)","DOI":"10.1109\/CVPR52729.2023.01436"},{"key":"8_CR13","unstructured":"JaidedAI: EasyOCR (2023). https:\/\/github.com\/JaidedAI\/EasyOCR"},{"key":"8_CR14","unstructured":"Kirillov, A., et\u00a0al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023)"},{"key":"8_CR15","unstructured":"Koh, J.Y., Fried, D., Salakhutdinov, R.: Generating images with multimodal language models. arXiv preprint arXiv:2305.17216 (2023)"},{"key":"8_CR16","doi-asserted-by":"crossref","unstructured":"Lai, X., et al.: LISA: reasoning segmentation via large language model. arXiv preprint arXiv:2308.00692 (2023)","DOI":"10.1109\/CVPR52733.2024.00915"},{"key":"8_CR17","unstructured":"Li, B., Zhang, Y., Chen, L., Wang, J., Yang, J., Liu, Z.: Otter: a multi-modal model with in-context instruction tuning. arXiv preprint arXiv:2305.03726 (2023)"},{"key":"8_CR18","doi-asserted-by":"crossref","unstructured":"Li, B., Wang, R., Wang, G., Ge, Y., Ge, Y., Shan, Y.: Seed-bench: benchmarking multimodal llms with generative comprehension. arXiv preprint arXiv:2307.16125 (2023)","DOI":"10.1109\/CVPR52733.2024.01263"},{"key":"8_CR19","doi-asserted-by":"crossref","unstructured":"Li, C., Gan, Z., Yang, Z., Yang, J., Li, L., Wang, L., Gao, J.: Multimodal foundation models: From specialists to general-purpose assistants. arXiv preprint arXiv:2309.10020 (2023)","DOI":"10.1561\/9781638283379"},{"key":"8_CR20","unstructured":"Li, C., et al.: ELEVATER: a benchmark and toolkit for evaluating language-augmented visual models. In: NeurIPS Track on Datasets and Benchmarks (2022)"},{"key":"8_CR21","unstructured":"Li, F., et al.: Semantic-SAM: segment and recognize anything at any granularity. arXiv preprint arXiv:2307.04767 (2023)"},{"key":"8_CR22","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597 (2023)"},{"key":"8_CR23","doi-asserted-by":"crossref","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning (2023)","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"8_CR24","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. arXiv preprint arXiv:2304.08485 (2023)"},{"key":"8_CR25","unstructured":"Liu, S., Fan, L., Johns, E., Yu, Z., Xiao, C., Anandkumar, A.: Prismer: a vision-language model with an ensemble of experts. arXiv preprint arXiv:2303.02506 (2023)"},{"key":"8_CR26","doi-asserted-by":"crossref","unstructured":"Liu, S., et\u00a0al.: Grounding DINO: marrying DINO with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499 (2023)","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"8_CR27","doi-asserted-by":"crossref","unstructured":"Long, S., Qin, S., Panteleev, D., Bissacco, A., Fujii, Y., Raptis, M.: Towards end-to-end unified scene text detection and layout analysis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2022)","DOI":"10.1109\/CVPR52688.2022.00112"},{"key":"8_CR28","doi-asserted-by":"crossref","unstructured":"Long, S., Qin, S., Panteleev, D., Bissacco, A., Fujii, Y., Raptis, M.: ICDAR 2023 competition on hierarchical text detection and recognition. arXiv preprint arXiv:2305.09750 (2023)","DOI":"10.1007\/978-3-031-41679-8_28"},{"key":"8_CR29","doi-asserted-by":"crossref","unstructured":"Minsky, M.: Society of Mind. Simon and Schuster (1988)","DOI":"10.21236\/ADA200313"},{"key":"8_CR30","unstructured":"OpenAI: ChatGPT (2023). https:\/\/openai.com\/blog\/chatgpt\/"},{"key":"8_CR31","unstructured":"OpenAI: ChatGPT plugins (2023). https:\/\/openai.com\/blog\/chatgpt-plugins"},{"key":"8_CR32","unstructured":"OpenAI: GPT-4 technical report (2023)"},{"key":"8_CR33","unstructured":"OpenAI: GPT-4v(ision) system card (2023). https:\/\/cdn.openai.com\/papers\/GPTV_System_Card.pdf"},{"key":"8_CR34","unstructured":"Pan, J., et al.: JourneyDB: a benchmark for generative image understanding (2023)"},{"key":"8_CR35","unstructured":"Patil, S.G., Zhang, T., Wang, X., Gonzalez, J.E.: Gorilla: large language model connected with massive APIs. arXiv preprint arXiv:2305.15334 (2023)"},{"key":"8_CR36","unstructured":"Peng, B., Li, C., He, P., Galley, M., Gao, J.: Instruction tuning with GPT-4. arXiv preprint arXiv:2304.03277 (2023)"},{"key":"8_CR37","unstructured":"Peng, Z., et al.: Kosmos-2: grounding multimodal large language models to the world. arXiv preprint arXiv:2306.14824 (2023)"},{"key":"8_CR38","doi-asserted-by":"crossref","unstructured":"Pi, R., et al.: DetGPT: detect what you need via reasoning. arXiv preprint arXiv:2305.14167 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.876"},{"key":"8_CR39","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. arXiv preprint arXiv:2103.00020 (2021)"},{"key":"8_CR40","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models (2021)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"8_CR41","unstructured":"Schick, T., et al.: ToolFormer: language models can teach themselves to use tools. arXiv preprint arXiv:2302.04761 (2023)"},{"key":"8_CR42","unstructured":"Sun, Q., et al.: Generative pretraining in multimodality. arXiv preprint arXiv:2307.05222 (2023)"},{"key":"8_CR43","doi-asserted-by":"crossref","unstructured":"Sur\u00eds, D., Menon, S., Vondrick, C.: ViperGPT: visual inference via python execution for reasoning. arXiv preprint arXiv:2303.08128 (2023)","DOI":"10.1109\/ICCV51070.2023.01092"},{"key":"8_CR44","unstructured":"Vicuna: Vicuna: An open-source chatbot impressing GPT-4 with 90%* ChatGPT quality (2023). https:\/\/vicuna.lmsys.org\/"},{"key":"8_CR45","unstructured":"Wu, C., Yin, S., Qi, W., Wang, X., Tang, Z., Duan, N.: Visual ChatGPT: talking, drawing and editing with visual foundation models. arXiv preprint arXiv:2303.04671 (2023)"},{"key":"8_CR46","unstructured":"Yang, R., et al.: GPT4Tools: teaching large language model to use tools via self-instruction. arXiv preprint arXiv:2305.18752 (2023)"},{"key":"8_CR47","unstructured":"Yang, Z., et al.: MM-REACT: prompting ChatGPT for multimodal reasoning and action. arXiv preprint arXiv:2303.11381 (2023)"},{"key":"8_CR48","unstructured":"Yao, S., et al.: REACT: synergizing reasoning and acting in language models. arXiv preprint arXiv:2210.03629 (2022)"},{"key":"8_CR49","unstructured":"Yu, L., et\u00a0al: Scaling autoregressive multi-modal models: pretraining and instruction tuning (2023)"},{"key":"8_CR50","unstructured":"Yu, W., et al.: MM-Vet: evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490 (2023)"},{"key":"8_CR51","unstructured":"Zhang, H., et al.: A simple framework for open-vocabulary segmentation and detection. arXiv preprint arXiv:2303.08131 (2023)"},{"key":"8_CR52","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"8_CR53","unstructured":"Zhang, S., et al.: GPT4ROI: instruction tuning large language model on region-of-interest. arXiv preprint arXiv:2307.03601 (2023)"},{"key":"8_CR54","unstructured":"Zhang, Y., et\u00a0al.: Recognize anything: a strong image tagging model. arXiv preprint arXiv:2306.03514 (2023)"},{"key":"8_CR55","unstructured":"Zhao, Y., Lin, Z., Zhou, D., Huang, Z., Feng, J., Kang, B.: BuboGPT: enabling visual grounding in multi-modal LLMs. arXiv preprint arXiv:2307.08581 (2023)"},{"key":"8_CR56","unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: MiniGPT-4: enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023)"},{"key":"8_CR57","doi-asserted-by":"crossref","unstructured":"Zou, X., et al.: Generalized decoding for pixel, image, and language. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15116\u201315127 (2023)","DOI":"10.1109\/CVPR52729.2023.01451"},{"key":"8_CR58","unstructured":"Zou, X., et al.: Segment everything everywhere all at once. arXiv preprint arXiv:2304.06718 (2023)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72970-6_8","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T20:50:16Z","timestamp":1733086216000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72970-6_8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,23]]},"ISBN":["9783031729690","9783031729706"],"references-count":58,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72970-6_8","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,23]]},"assertion":[{"value":"23 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}