{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T02:25:13Z","timestamp":1769912713178,"version":"3.49.0"},"publisher-location":"Singapore","reference-count":31,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819626403","type":"print"},{"value":"9789819626410","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-96-2641-0_19","type":"book-chapter","created":{"date-parts":[[2025,3,31]],"date-time":"2025-03-31T00:54:47Z","timestamp":1743382487000},"page":"281-292","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Enhancing Visual Question Answering with\u00a0Pre-trained Vision-Language Models: An Ensemble Approach at\u00a0the\u00a0LAVA Challenge 2024"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2823-3861","authenticated-orcid":false,"given":"Trong-Hieu","family":"Nguyen-Mau","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0007-3769-7955","authenticated-orcid":false,"given":"Nhu-Binh Nguyen","family":"Truc","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0009-7200-9403","authenticated-orcid":false,"given":"Nhu-Vinh","family":"Hoang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3046-3041","authenticated-orcid":false,"given":"Minh-Triet","family":"Tran","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0888-8908","authenticated-orcid":false,"given":"Hai-Dang","family":"Nguyen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,3,29]]},"reference":[{"key":"19_CR1","unstructured":"OpenVLM leaderboard. https:\/\/huggingface.co\/spaces\/opencompass\/open_vlm_leaderboard"},{"key":"19_CR2","unstructured":"ACCV workshop on large vision \u2013 language model learning and applications (2024). https:\/\/lava-workshop.github.io\/"},{"key":"19_CR3","unstructured":"Gopalkrishnan, A., Greer, R., Trivedi, M.: Multi-frame, lightweight & efficient vision-language models for question answering in autonomous driving. arXiv preprint arXiv:2403.19838 (2024)"},{"key":"19_CR4","doi-asserted-by":"crossref","unstructured":"Antol, S., et al.: VQA: visual question answering. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2425\u20132433 (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"19_CR5","doi-asserted-by":"crossref","unstructured":"Antol, S., et al.: VQA: visual question answering. arXiv preprint arXiv:1505.00468 (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"19_CR6","unstructured":"Chen, L., et\u00a0al.: Are we on the right way for evaluating large vision-language models? arXiv preprint arXiv:2403.20330 (2024)"},{"key":"19_CR7","doi-asserted-by":"crossref","unstructured":"Chen, Z., Wang, W., Tian, H., et al.: How far are we to GPT-4v? Closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821 (2024)","DOI":"10.1007\/s11432-024-4231-5"},{"key":"19_CR8","doi-asserted-by":"crossref","unstructured":"Cheng, S., et al.: EgoThink: evaluating first-person perspective thinking capability of vision-language models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14291\u201314302 (2024)","DOI":"10.1109\/CVPR52733.2024.01355"},{"key":"19_CR9","unstructured":"Emanuilov, S.: Qwen2-VL \u2014 a new milestone in vision-language AI. https:\/\/unfoldai.com\/qwen2-vl\/"},{"key":"19_CR10","doi-asserted-by":"crossref","unstructured":"Fukui, A., Park, D.H., Yang, D., Rohrbach, A., Darrell, T., Rohrbach, M.: Multimodal compact bilinear pooling for visual question answering and visual grounding. arXiv preprint arXiv:1606.01847 (2016)","DOI":"10.18653\/v1\/D16-1044"},{"key":"19_CR11","doi-asserted-by":"crossref","unstructured":"Han, X., Wang, S., Su, C., Huang, Q., Tian, Q.: Greedy gradient ensemble for robust visual question answering. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1584\u20131593 (2021)","DOI":"10.1109\/ICCV48922.2021.00161"},{"issue":"1","key":"19_CR12","doi-asserted-by":"publisher","first-page":"17","DOI":"10.1007\/s44267-024-00050-1","volume":"2","author":"Y Jiang","year":"2024","unstructured":"Jiang, Y., et al.: Effectiveness assessment of recent large vision-language models. Vis. Intell. 2(1), 17 (2024)","journal-title":"Vis. Intell."},{"key":"19_CR13","unstructured":"Kim, J., Jun, J., Zhang, B.: Bilinear attention networks. arXiv preprint arXiv:1805.07932 (2018)"},{"key":"19_CR14","unstructured":"Le, B.H., Nguyen-Mau, T.H., Nguyen-Vu, D.K., Ho-Ngoc, V.P., Nguyen, H.D., Tran, M.T.: Leveraging large vision-language models for visual question answering in VizWiz grand challenge (2024)"},{"key":"19_CR15","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International Conference on Machine Learning, pp. 12888\u201312900. PMLR (2022)"},{"key":"19_CR16","doi-asserted-by":"publisher","first-page":"51","DOI":"10.1016\/j.patrec.2018.04.031","volume":"111","author":"V Lioutas","year":"2018","unstructured":"Lioutas, V., Passalis, N., Tefas, A.: Explicit ensemble attention learning for improving visual question answering. Pattern Recogn. Lett. 111, 51\u201357 (2018)","journal-title":"Pattern Recogn. Lett."},{"key":"19_CR17","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. In: NeurIPS (2023)"},{"key":"19_CR18","unstructured":"Lu, J., Batra, D., Parikh, D., Lee, S.: ViLBERT: pretraining task-agnostic Visiolinguistic representations for vision-and-language tasks. arXiv preprint arXiv:1908.02265 (2019)"},{"key":"19_CR19","doi-asserted-by":"publisher","first-page":"275","DOI":"10.1007\/s10462-012-9338-y","volume":"42","author":"S Masoudnia","year":"2014","unstructured":"Masoudnia, S., Ebrahimpour, R.: Mixture of experts: a literature survey. Artif. Intell. Rev. 42, 275\u2013293 (2014)","journal-title":"Artif. Intell. Rev."},{"key":"19_CR20","unstructured":"Meta: Llama 3.2: revolutionizing edge AI and vision with open, customizable models. https:\/\/ai.meta.com\/blog\/llama-3-2-connect-2024-vision-edge-mobile-devices\/"},{"key":"19_CR21","unstructured":"Saxena, S., Sharma, M., Kroemer, O.: MResT: multi-resolution sensing for real-time control with vision-language models. arXiv preprint arXiv:2401.14502 (2024)"},{"key":"19_CR22","doi-asserted-by":"crossref","unstructured":"Tan, H., Bansal, M.: LXMERT: learning cross-modality encoder representations from transformers. arXiv preprint arXiv:1908.07490 (2019)","DOI":"10.18653\/v1\/D19-1514"},{"key":"19_CR23","unstructured":"Team, O.: InternVL2: better than the best\u2014expanding performance boundaries of open-source multimodal models with the progressive scaling strategy (2024). https:\/\/internvl.github.io\/blog\/2024-07-02-InternVL-2.0\/"},{"key":"19_CR24","unstructured":"Wang, P., et\u00a0al.: Qwen2-VL: enhancing vision-language model\u2019s perception of the world at any resolution. arXiv preprint arXiv:2409.12191 (2024)"},{"key":"19_CR25","unstructured":"Wang, W., et\u00a0al.: VisionLLM: large language model is also an open-ended decoder for vision-centric tasks. Adv. Neural Inf. Process. Syst. 36 (2024)"},{"key":"19_CR26","first-page":"24824","volume":"35","author":"J Wei","year":"2022","unstructured":"Wei, J., et al.: Chain-of-thought prompting elicits reasoning in large language models. Adv. Neural. Inf. Process. Syst. 35, 24824\u201324837 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"19_CR27","doi-asserted-by":"crossref","unstructured":"Yang, A., Miech, A., Sivic, J., Laptev, I., Schmid, C.: Just ask: learning to answer questions from millions of narrated videos. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1686\u20131697 (2021)","DOI":"10.1109\/ICCV48922.2021.00171"},{"key":"19_CR28","doi-asserted-by":"crossref","unstructured":"Yang, Z., He, X., Gao, J., Deng, L., Smola, A.J.: Stacked attention networks for image question answering. arXiv preprint arXiv:1511.02274 (2015)","DOI":"10.1109\/CVPR.2016.10"},{"key":"19_CR29","unstructured":"Yao, Y., Yu, T., Zhang, et al.: MiniCPM-v: a GPT-4v level MLLM on your phone. arXiv preprint arXiv:2408.01800 (2024)"},{"key":"19_CR30","doi-asserted-by":"crossref","unstructured":"Zhang, J., Huang, J., Jin, S., Lu, S.: Vision-language models for vision tasks: a survey. IEEE Trans. Pattern Anal. Mach. Intell. (2024)","DOI":"10.1109\/TPAMI.2024.3369699"},{"key":"19_CR31","doi-asserted-by":"crossref","unstructured":"Zhou, L., Palangi, H., Zhang, L., Hu, H., Corso, J., Gao, J.: Unified vision-language pre-training for image captioning and VQA. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a034, pp. 13041\u201313049 (2020)","DOI":"10.1609\/aaai.v34i07.7005"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ACCV 2024 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-96-2641-0_19","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,31]],"date-time":"2025-03-31T00:55:06Z","timestamp":1743382506000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-96-2641-0_19"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9789819626403","9789819626410"],"references-count":31,"URL":"https:\/\/doi.org\/10.1007\/978-981-96-2641-0_19","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"29 March 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ACCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Asian Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Hanoi","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vietnam","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 December 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 December 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"accv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}