{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T07:36:15Z","timestamp":1757576175523},"reference-count":58,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,7,15]],"date-time":"2024-07-15T00:00:00Z","timestamp":1721001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,7,15]],"date-time":"2024-07-15T00:00:00Z","timestamp":1721001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,7,15]]},"DOI":"10.1109\/icme57554.2024.10687511","type":"proceedings-article","created":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T17:24:16Z","timestamp":1727717056000},"page":"1-5","source":"Crossref","is-referenced-by-count":3,"title":["COCO is \u201cALL\u201d You Need for Visual Instruction Fine-tuning"],"prefix":"10.1109","author":[{"given":"Xiaotian","family":"Han","sequence":"first","affiliation":[{"name":"ByteDance Inc"}]},{"given":"Yiqi","family":"Wang","sequence":"additional","affiliation":[{"name":"ByteDance Inc"}]},{"given":"Bohan","family":"Zhai","sequence":"additional","affiliation":[]},{"given":"Quanzeng","family":"You","sequence":"additional","affiliation":[{"name":"ByteDance Inc"}]},{"given":"Hongxia","family":"Yang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"year":"2023","key":"ref1","article-title":"GPT-4 Technical Report"},{"article-title":"Llama 2: Open Foundation and Fine-Tuned Chat Models","year":"2023","author":"Touvron","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-55560-2_5"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.33540\/2168"},{"article-title":"InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning","year":"2023","author":"Dai","key":"ref5"},{"article-title":"MIMIC-IT: Multi-Modal In-Context Instruction Tuning","year":"2023","author":"Li","key":"ref6"},{"article-title":"LLaVAR: Enhanced Visual Instruction Tuning for Text-Rich Image Understanding","year":"2023","author":"Zhang","key":"ref7"},{"article-title":"SVIT: Scaling up Visual Instruction Tuning","year":"2023","author":"Zhao","key":"ref8"},{"article-title":"LAMM: Language-Assisted Multi-Modal Instruction-Tuning Dataset, Framework, and Benchmark","year":"2023","author":"Yin","key":"ref9"},{"article-title":"To See is to Believe: Prompting GPT-4V for Better Visual Instruction Tuning","year":"2023","author":"Wang","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00904"},{"article-title":"Microsoft COCO Captions: Data Collection and Evaluation Server","year":"2015","author":"Chen","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00686"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00380"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"article-title":"MME: A Comprehensive Evaluation Benchmark for Multimodal Large Language Models","year":"2023","author":"Fu","key":"ref17"},{"article-title":"SEED-Bench-2: Benchmarking Multimodal Large Language Models","year":"2023","author":"Li","key":"ref18"},{"article-title":"MMMU: A Massive Multi-discipline Multimodal Understanding and Reasoning Benchmark for Expert AGI","year":"2023","author":"Yue","key":"ref19"},{"article-title":"MM-Vet: Evaluating Large Multimodal Models for Integrated Capabilities","year":"2023","author":"Yu","key":"ref20"},{"article-title":"InfiMM-Eval: Complex Open-Ended Reasoning Evaluation For Multi-Modal Large Language Models","year":"2023","author":"Han","key":"ref21"},{"article-title":"Improved Baselines with Visual Instruction Tuning","year":"2023","author":"Liu","key":"ref22"},{"article-title":"Microsoft COCO: Common Objects in Context","year":"2015","author":"Lin","key":"ref23"},{"article-title":"Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations","year":"2016","author":"Krishna","key":"ref24"},{"article-title":"MM-REACT: Prompting ChatGPT for Multimodal Reasoning and Action","year":"2023","author":"Yang","key":"ref25"},{"article-title":"HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face","year":"2023","author":"Shen","key":"ref26"},{"article-title":"Visual ChatGPT: Talking, Drawing and Editing with Visual Foundation Models","year":"2023","author":"Wu","key":"ref27"},{"article-title":"Qwen-VL: A Versatile Vision-Language Model for Understanding, Localization, Text Reading, and Beyond","year":"2023","author":"Bai","key":"ref28"},{"article-title":"Flamingo: a Visual Language Model for Few-Shot Learning","year":"2022","author":"Alayrac","key":"ref29"},{"article-title":"BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models","year":"2023","author":"Li","key":"ref30"},{"article-title":"MiniGPT-4: Enhancing Vision-Language Understanding with Advanced Large Language Models","year":"2023","author":"Zhu","key":"ref31"},{"article-title":"LAION-5B: An open large-scale dataset for training next generation image-text models","year":"2022","author":"Schuhmann","key":"ref32"},{"article-title":"COYO-700M: Image-Text Pair Dataset","year":"2022","author":"Byeon","key":"ref33"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/2812802"},{"article-title":"Multimodal C4: An Open, Billion-scale Corpus of Images Interleaved With Text","year":"2023","author":"Zhu","key":"ref35"},{"article-title":"CogVLM: Visual Expert for Pretrained Language Models","year":"2023","author":"Wang","key":"ref36"},{"article-title":"MMICL: Empowering Vision-language Model with Multi-Modal In-Context Learning","year":"2023","author":"Zhao","key":"ref37"},{"article-title":"ShareGPT4V: Improving Large Multi-Modal Models with Better Captions","year":"2023","author":"Chen","key":"ref38"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.356"},{"article-title":"Exploring Models and Data for Image Question Answering","year":"2015","author":"Ren","key":"ref40"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.540"},{"article-title":"The Color of the Cat is Gray: 1 Million Full-Sentences Visual Question Answering (FSVQA)","year":"2016","author":"Shin","key":"ref42"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00566"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00331"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20074-8_9"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.217"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1145\/3404835.3463259"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1145\/3219819.3220036"},{"article-title":"Interpretable Counting for Visual Question Answering","year":"2018","author":"Trott","key":"ref49"},{"article-title":"TallyQA: Answering Complex Counting Questions","year":"2018","author":"Acharya","key":"ref50"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_34"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00439"},{"article-title":"Point and Ask: Incorporating Pointing into Visual Question Answering","year":"2022","author":"Mani","key":"ref53"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1152"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1184"},{"article-title":"Sparkles: Unlocking Chats Across Multiple Images for Multimodal Instruction-Following Models","year":"2023","author":"Huang","key":"ref56"},{"article-title":"Flickr30k Entities: Collecting Region-to-Phrase Correspondences for Richer Image-to-Sentence Models","year":"2016","author":"Plummer","key":"ref57"},{"article-title":"Chain-of-Thought Prompting Elicits Reasoning in Large Language Models","year":"2023","author":"Wei","key":"ref58"}],"event":{"name":"2024 IEEE International Conference on Multimedia and Expo (ICME)","start":{"date-parts":[[2024,7,15]]},"location":"Niagara Falls, ON, Canada","end":{"date-parts":[[2024,7,19]]}},"container-title":["2024 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10685847\/10687354\/10687511.pdf?arnumber=10687511","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T05:56:16Z","timestamp":1727762176000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10687511\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7,15]]},"references-count":58,"URL":"https:\/\/doi.org\/10.1109\/icme57554.2024.10687511","relation":{},"subject":[],"published":{"date-parts":[[2024,7,15]]}}}