{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,7]],"date-time":"2026-05-07T16:24:52Z","timestamp":1778171092655,"version":"3.51.4"},"publisher-location":"Cham","reference-count":67,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726835","type":"print"},{"value":"9783031726842","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72684-2_9","type":"book-chapter","created":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:02:45Z","timestamp":1730574165000},"page":"146-164","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":16,"title":["CAT: Enhancing Multimodal Large Language Model to Answer Questions in Dynamic Audio-Visual Scenarios"],"prefix":"10.1007","author":[{"given":"Qilang","family":"Ye","sequence":"first","affiliation":[]},{"given":"Zitong","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Rui","family":"Shao","sequence":"additional","affiliation":[]},{"given":"Xinyu","family":"Xie","sequence":"additional","affiliation":[]},{"given":"Philip","family":"Torr","sequence":"additional","affiliation":[]},{"given":"Xiaochun","family":"Cao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,3]]},"reference":[{"key":"9_CR1","doi-asserted-by":"crossref","unstructured":"AlAmri, H., et al.: Audio visual scene-aware dialog. In: IEEE Conference on Computer Vision and Pattern Recognition, CVPR, pp. 7558\u20137567 (2019)","DOI":"10.1109\/CVPR.2019.00774"},{"key":"9_CR2","doi-asserted-by":"crossref","unstructured":"Antol, S., et al.: VQA: visual question answering. In: IEEE International Conference on Computer Vision, ICCV, pp. 2425\u20132433. IEEE Computer Society (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"9_CR3","doi-asserted-by":"crossref","unstructured":"Bain, M., Nagrani, A., Varol, G., Zisserman, A.: Frozen in time: a joint video and image encoder for end-to-end retrieval. In: IEEE\/CVF International Conference on Computer Vision, ICCV, pp. 1708\u20131718. IEEE (2021)","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"9_CR4","unstructured":"Chen, G., Shen, L., Shao, R., Deng, X., Nie, L.: LION: empowering multimodal large language model with dual-level visual knowledge. CoRR abs\/2311.11860 (2023)"},{"key":"9_CR5","doi-asserted-by":"crossref","unstructured":"Chen, H., Xie, W., Vedaldi, A., Zisserman, A.: VGGSound: a large-scale audio-visual dataset. In: IEEE International Conference on Acoustics, Speech and Signal Processing, ICASSP, pp. 721\u2013725. IEEE (2020)","DOI":"10.1109\/ICASSP40776.2020.9053174"},{"key":"9_CR6","unstructured":"Chen, S., et al.: VALOR: vision-audio-language omni-perception pretraining model and dataset. CoRR abs\/2304.08345 (2023)"},{"key":"9_CR7","unstructured":"Chen, S., et alJ.: VAST: A vision-audio-subtitle-text omni-modality foundation model and dataset. CoRR abs\/2305.18500 (2023)"},{"key":"9_CR8","unstructured":"Dai, W., et al.: InstructBLIP: towards general-purpose vision-language models with instruction tuning. CoRR abs\/2305.06500 (2023)"},{"key":"9_CR9","unstructured":"Devlin, J., Chang, M., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: Burstein, J., Doran, C., Solorio, T. (eds.) Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT, pp. 4171\u20134186. Association for Computational Linguistics (2019)"},{"key":"9_CR10","doi-asserted-by":"crossref","unstructured":"Fan, C., Zhang, X., Zhang, S., Wang, W., Zhang, C., Huang, H.: Heterogeneous memory enhanced multimodal attention model for video question answering. In: IEEE Conference on Computer Vision and Pattern Recognition, CVPR, pp. 1999\u20132007. Computer Vision Foundation\/IEEE (2019)","DOI":"10.1109\/CVPR.2019.00210"},{"key":"9_CR11","doi-asserted-by":"crossref","unstructured":"Fayek, H.M., Johnson, J.: Temporal reasoning via audio question answering. IEEE ACM Trans. Audio Speech Lang. Process. 28, 2283\u20132294 (2020)","DOI":"10.1109\/TASLP.2020.3010650"},{"key":"9_CR12","unstructured":"Gao, P., et al.: LLaMA-adapter V2: parameter-efficient visual instruction model. CoRR abs\/2304.15010 (2023)"},{"key":"9_CR13","doi-asserted-by":"crossref","unstructured":"Girdhar, R., et al.: Imagebind one embedding space to bind them all. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pp. 15180\u201315190 (2023)","DOI":"10.1109\/CVPR52729.2023.01457"},{"key":"9_CR14","unstructured":"Han, J., et al.: OneLLM: one framework to align all modalities with language. CoRR abs\/2312.03700 (2023)"},{"key":"9_CR15","unstructured":"Han, J., et al.: ImageBind-LLM: multi-modality instruction tuning. CoRR abs\/2309.03905 (2023)"},{"key":"9_CR16","unstructured":"Hendrycks, D., et al.: Measuring massive multitask language understanding. In: ICLR (2021)"},{"key":"9_CR17","unstructured":"Hu, E.J., et al.: LoRA: low-rank adaptation of large language models. In: The Tenth International Conference on Learning Representations, ICLR (2022)"},{"key":"9_CR18","doi-asserted-by":"crossref","unstructured":"Huang, B., Wang, X., Chen, H., Song, Z., Zhu, W.: VTimeLLM: empower LLM to grasp video moments. CoRR abs\/2311.18445 (2023)","DOI":"10.1109\/CVPR52733.2024.01353"},{"key":"9_CR19","doi-asserted-by":"crossref","unstructured":"Jiang, P., Han, Y.: Reasoning with heterogeneous graph alignment for video question answering. In: AAAI, pp. 11109\u201311116. AAAI Press (2020)","DOI":"10.1609\/aaai.v34i07.6767"},{"key":"9_CR20","doi-asserted-by":"crossref","unstructured":"Jin, P., Takanobu, R., Zhang, C., Cao, X., Yuan, L.: Chat-UniVi: unified visual representation empowers large language models with image and video understanding. CoRR abs\/2311.08046 (2023)","DOI":"10.1109\/CVPR52733.2024.01300"},{"key":"9_CR21","doi-asserted-by":"crossref","unstructured":"Korbar, B., Xian, Y., Tonioni, A., Zisserman, A., Tombari, F.: Text-conditioned resampler for long form video understanding. CoRR abs\/2312.11897 (2023)","DOI":"10.1007\/978-3-031-73016-0_16"},{"key":"9_CR22","doi-asserted-by":"crossref","unstructured":"Lavie, A., Agarwal, A.: METEOR: an automatic metric for MT evaluation with high levels of correlation with human judgments. In: Callison-Burch, C., Koehn, P., Fordyce, C.S., Monz, C. (eds.) Proceedings of the Second Workshop on Statistical Machine Translation, pp. 228\u2013231. Association for Computational Linguistics (2007)","DOI":"10.3115\/1626355.1626389"},{"key":"9_CR23","doi-asserted-by":"crossref","unstructured":"Le, H., Sahoo, D., Chen, N.F., Hoi, S.C.H.: Multimodal transformer networks for end-to-end video-grounded dialogue systems. In: Korhonen, A., Traum, D.R., M\u00e0rquez, L. (eds.) Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL, pp. 5612\u20135623. Association for Computational Linguistics (2019)","DOI":"10.18653\/v1\/P19-1564"},{"key":"9_CR24","doi-asserted-by":"crossref","unstructured":"Le, T.M., Le, V., Venkatesh, S., Tran, T.: Hierarchical conditional relation networks for video question answering. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pp. 9969\u20139978. Computer Vision Foundation\/IEEE (2020)","DOI":"10.1109\/CVPR42600.2020.00999"},{"key":"9_CR25","doi-asserted-by":"crossref","unstructured":"Li, G., Hou, W., Hu, D.: Progressive spatio-temporal perception for audio-visual question answering. In: El-Saddik, A., et al. (eds.) ACM MM, pp. 7808\u20137816. ACM (2023)","DOI":"10.1145\/3581783.3612293"},{"key":"9_CR26","doi-asserted-by":"crossref","unstructured":"Li, G., Wei, Y., Tian, Y., Xu, C., Wen, J.R., Hu, D.: Learning to answer questions in dynamic audio-visual scenarios. In: CVPR, p. 19086-19096 (2022)","DOI":"10.1109\/CVPR52688.2022.01852"},{"key":"9_CR27","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.C.H.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In: Krause, A., Brunskill, E., Cho, K., Engelhardt, B., Sabato, S., Scarlett, J. (eds.) International Conference on Machine Learning, ICML, pp. 19730\u201319742. PMLR (2023)"},{"key":"9_CR28","unstructured":"Li, K., et al: VideoChat: chat-centric video understanding. CoRR abs\/2305.06355 (2023)"},{"key":"9_CR29","unstructured":"Li, K., et al.: MVBench: a comprehensive multi-modal video understanding benchmark. CoRR abs\/2311.17005 (2023)"},{"key":"9_CR30","doi-asserted-by":"crossref","unstructured":"Li, X., Song, J., Gao, L., Liu, X., Huang, W., He, X., Gan, C.: Beyond RNNs: positional self-attention with co-attention for video question answering. In: AAAI, pp. 8658\u20138665. AAAI Press (2019)","DOI":"10.1609\/aaai.v33i01.33018658"},{"key":"9_CR31","doi-asserted-by":"crossref","unstructured":"Li, Y., Wang, C., Jia, J.: LLaMA-vid: an image is worth 2 tokens in large language models. CoRR abs\/2311.17043 (2023)","DOI":"10.1007\/978-3-031-72952-2_19"},{"key":"9_CR32","unstructured":"Li, Z., et al.: Monkey: image resolution and text label are important things for large multi-modal models. CoRR abs\/2311.06607 (2023)"},{"key":"9_CR33","unstructured":"Lin, B., et al.: Video-LLaVA: learning united visual representation by alignment before projection. CoRR abs\/2311.10122 (2023)"},{"key":"9_CR34","doi-asserted-by":"crossref","unstructured":"Lin, Y., Sung, Y., Lei, J., Bansal, M., Bertasius, G.: Vision transformers are parameter-efficient audio-visual learners. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, CVPR, pp. 2299\u20132309. IEEE (2023)","DOI":"10.1109\/CVPR52729.2023.00228"},{"key":"9_CR35","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. CoRR abs\/2304.08485 (2023)"},{"key":"9_CR36","unstructured":"Lu, J., Yang, J., Batra, D., Parikh, D.: Hierarchical question-image co-attention for visual question answering. In: Lee, D.D., Sugiyama, M., von Luxburg, U., Guyon, I., Garnett, R. (eds.) Advances in Neural Information Processing Systems NIPs, pp. 289\u2013297 (2016)"},{"key":"9_CR37","unstructured":"Luo, R., et al.: Valley: video assistant with large language model enhanced ability. CoRR abs\/2306.07207 (2023)"},{"key":"9_CR38","unstructured":"Lyu, C., et al.: Macaw-LLM: multi-modal language modeling with image, audio, video, and text integration. CoRR abs\/2306.09093 (2023)"},{"key":"9_CR39","unstructured":"Ma, F., Jin, X., Wang, H., Xian, Y., Feng, J., Yang, Y.: VISTA-LLAMA: reliable video narrator via equal distance to visual tokens. CoRR abs\/2312.08870 (2023)"},{"key":"9_CR40","unstructured":"Maaz, M., Rasheed, H.A., Khan, S.H., Khan, F.S.: Video-ChatGPT: towards detailed video understanding via large vision and language models. CoRR abs\/2306.05424 (2023)"},{"key":"9_CR41","unstructured":"MacGlashan, J., et al.: Interactive learning from policy-dependent human feedback. In: Precup, D., Teh, Y.W. (eds.) Proceedings of the 34th International Conference on Machine Learning, ICML, pp. 2285\u20132294 (2017)"},{"key":"9_CR42","unstructured":"Mei, X., et al.: Wavcaps: a ChatGPT-assisted weakly-labelled audio captioning dataset for audio-language multimodal research. CoRR abs\/2303.17395 (2023)"},{"key":"9_CR43","doi-asserted-by":"crossref","unstructured":"Nadeem, A., Hilton, A., Dawes, R., Thomas, G., Mustafa, A.: CAD - contextual multi-modal alignment for dynamic AVQA. CoRR abs\/2310.16754 (2023)","DOI":"10.1109\/WACV57701.2024.00709"},{"key":"9_CR44","unstructured":"Nguyen, D.T., Sharma, S., Schulz, H., Asri, L.E.: From film to video: multi-turn question answering with multi-modal context. CoRR abs\/1812.07023 (2018)"},{"key":"9_CR45","unstructured":"OpenAI: GPT-4 technical report. CoRR abs\/2303.08774 (2023)"},{"key":"9_CR46","unstructured":"Ouyang, L., et al.: Training language models to follow instructions with human feedback. In: Advances in Neural Information Processing Systems, NeurIPS (2022)"},{"key":"9_CR47","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.: BLEU: a method for automatic evaluation of machine translation. In: Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 311\u2013318. ACL (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"9_CR48","doi-asserted-by":"crossref","unstructured":"Pennington, J., Socher, R., Manning, C.D.: GloVe: global vectors for word representation. In: Moschitti, A., Pang, B., Daelemans, W. (eds.) Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing, EMNLP, pp. 1532\u20131543. ACL (2014)","DOI":"10.3115\/v1\/D14-1162"},{"key":"9_CR49","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"710","DOI":"10.1007\/978-3-031-19842-7_41","volume-title":"Computer Vision \u2013 ECCV 2022","author":"H Pham","year":"2022","unstructured":"Pham, H., Le, T.M., Le, V., Phuong, T.M., Tran, T.: Video dialog as conversation about objects living in space-time. In: Avidan, S., Brostow, G.J., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13699, pp. 710\u2013726. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19842-7_41"},{"key":"9_CR50","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: Meila, M., Zhang, T. (eds.) Proceedings of the International Conference on Machine Learning, ICML, vol.\u00a0139, pp. 8748\u20138763. PMLR (2021)"},{"key":"9_CR51","unstructured":"Radford, A., Kim, J.W., Xu, T., Brockman, G., McLeavey, C., Sutskever, I.: Robust speech recognition via large-scale weak supervision. In: Krause, A., Brunskill, E., Cho, K., Engelhardt, B., Sabato, S., Scarlett, J. (eds.) International Conference on Machine Learning, ICML, pp. 28492\u201328518. PMLR (2023)"},{"key":"9_CR52","unstructured":"Rafailov, R., Sharma, A., Mitchell, E., Ermon, S., Manning, C.D., Finn, C.: Direct preference optimization: Your language model is secretly a reward model. CoRR abs\/2305.18290 (2023)"},{"key":"9_CR53","doi-asserted-by":"crossref","unstructured":"Sakaguchi, K., Bras, R.L., Bhagavatula, C., Choi, Y.: WinoGrande: an adversarial winograd schema challenge at scale. In: AAAI, pp. 8732\u20138740 (2020)","DOI":"10.1609\/aaai.v34i05.6399"},{"key":"9_CR54","doi-asserted-by":"crossref","unstructured":"Schwartz, I., Schwing, A.G., Hazan, T.: A simple baseline for audio-visual scene-aware dialog. In: IEEE Conference on Computer Vision and Pattern Recognition, CVPR, pp. 12548\u201312558. Computer Vision Foundation\/IEEE (2019)","DOI":"10.1109\/CVPR.2019.01283"},{"key":"9_CR55","unstructured":"Touvron, H., et al.: LLaMA 2: open foundation and fine-tuned chat models. CoRR abs\/2307.09288 (2023)"},{"key":"9_CR56","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Zitnick, C.L., Parikh, D.: CiDER: consensus-based image description evaluation. In: IEEE Conference on Computer Vision and Pattern Recognition, CVPR, pp. 4566\u20134575 (2015)","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"9_CR57","doi-asserted-by":"crossref","unstructured":"Wang, X., Wu, J., Chen, J., Li, L., Wang, Y., Wang, W.Y.: VATEX: a large-scale, high-quality multilingual dataset for video-and-language research. In: IEEE\/CVF International Conference on Computer Vision, ICCV, pp. 4580\u20134590. IEEE (2019)","DOI":"10.1109\/ICCV.2019.00468"},{"key":"9_CR58","doi-asserted-by":"crossref","unstructured":"Xu, D., et al.: Video question answering via gradually refined attention over appearance and motion. In: ACM MM, pp. 1645\u20131653 (2017)","DOI":"10.1145\/3123266.3123427"},{"key":"9_CR59","doi-asserted-by":"crossref","unstructured":"Yang, P., et al.: AVQA: a dataset for audio-visual question answering on videos. In: ACM MM, pp. 3480\u20133491. ACM (2022)","DOI":"10.1145\/3503161.3548291"},{"key":"9_CR60","doi-asserted-by":"crossref","unstructured":"Yu, Z., et al.: ActivityNet-QA: a dataset for understanding complex web videos via question answering. In: AAAI, pp. 9127\u20139134 (2019)","DOI":"10.1609\/aaai.v33i01.33019127"},{"key":"9_CR61","doi-asserted-by":"crossref","unstructured":"Yu, Z., Yu, J., Cui, Y., Tao, D., Tian, Q.: Deep modular co-attention networks for visual question answering. In: IEEE Conference on Computer Vision and Pattern Recognition, CVPR, pp. 6281\u20136290. Computer Vision Foundation\/IEEE (2019)","DOI":"10.1109\/CVPR.2019.00644"},{"key":"9_CR62","doi-asserted-by":"crossref","unstructured":"Yun, H., Yu, Y., Yang, W., Lee, K.I., Kim, G.H.: Pano-AVQA: grounded audio-visual question answering on 360$$^\\circ $$ videos. In: CVPR, pp. 2031-2041 (2021)","DOI":"10.1109\/ICCV48922.2021.00204"},{"key":"9_CR63","doi-asserted-by":"crossref","unstructured":"Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., Choi, Y.: HellaSwag: can a machine really finish your sentence? In: Proceedings of the 57th Conference of the Association for Computational Linguistics, ACL, pp. 4791\u20134800 (2019)","DOI":"10.18653\/v1\/P19-1472"},{"key":"9_CR64","doi-asserted-by":"crossref","unstructured":"Zhang, H., Li, X., Bing, L.: Video-LLaMA: An instruction-tuned audio-visual language model for video understanding. In: Proceedings of the Empirical Methods in Natural Language Processing, EMNLP, pp. 543\u2013553 (2023)","DOI":"10.18653\/v1\/2023.emnlp-demo.49"},{"key":"9_CR65","unstructured":"Zhang, R., et al.: LLaMA-adapter: efficient fine-tuning of language models with zero-init attention. CoRR abs\/2303.16199 (2023)"},{"key":"9_CR66","unstructured":"Zhang, S., et al.: Instruction tuning for large language models: a survey. CoRR abs\/2308.10792 (2023)"},{"key":"9_CR67","unstructured":"Zhao, Z., et al.: ChatBridge: bridging modalities with large language model as a language catalyst. CoRR abs\/2305.16103 (2023)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72684-2_9","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:04:44Z","timestamp":1730574284000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72684-2_9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,3]]},"ISBN":["9783031726835","9783031726842"],"references-count":67,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72684-2_9","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,3]]},"assertion":[{"value":"3 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}