{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,11]],"date-time":"2026-04-11T13:12:37Z","timestamp":1775913157988,"version":"3.50.1"},"reference-count":261,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T00:00:00Z","timestamp":1770336000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T00:00:00Z","timestamp":1770336000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1007\/s11263-025-02667-1","type":"journal-article","created":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T03:27:20Z","timestamp":1770348440000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":11,"title":["Multimodal Alignment and Fusion: A Survey"],"prefix":"10.1007","volume":"134","author":[{"given":"Songtao","family":"Li","sequence":"first","affiliation":[]},{"given":"Hao","family":"Tang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,2,6]]},"reference":[{"key":"2667_CR1","doi-asserted-by":"crossref","unstructured":"Ak, K. E., Lee, G., Xu, Y., & Shen, M. (2023). Leveraging efficient training and feature fusion in transformers for multimodal classification. In 2023 IEEE International Conference on Image Processing (ICIP), (pp. 1420\u20131424).","DOI":"10.1109\/ICIP49359.2023.10223098"},{"key":"2667_CR2","unstructured":"Akaho, S. (2001). A kernel method for canonical correlation analysis. In: Proceedings of the International Meeting on Psychometric Society."},{"key":"2667_CR3","first-page":"1105916\u20131105916","volume":"11059","author":"A Akhmerov","year":"2019","unstructured":"Akhmerov, A., Vasilev, A., & Vasileva, A. V. (2019). Research of spatial alignment techniques for multimodal image fusion. In Proceedings of the SPIE,11059, 1105916-1105916\u20139.","journal-title":"In Proceedings of the SPIE"},{"key":"2667_CR4","doi-asserted-by":"crossref","unstructured":"Alayrac, J.-B., Donahue, J., Luc, P., Miech, A., Barr, I., Hasson, Y., Lenc, K., Mensch, A., Millican, K., Reynolds, M., Ring, R., Rutherford, E., Cabi, S., Han, T., Gong, Z., Samangooei, S., Monteiro, M., Menick, J., Borgeaud, S., Zisserman, A. (2022). and Karen Simonyan. Flamingo: A visual language model for few-shot learning. In NeurIPS.","DOI":"10.52202\/068431-1723"},{"key":"2667_CR5","unstructured":"Allaire, D. L., & Willcox, K.E. (2012). Fusing information from multifidelity computer models of physical systems. 2012 15th International Conference on Information Fusion, (pp. 2458\u20132465)."},{"key":"2667_CR6","unstructured":"Andrew, G., Arora, R., Bilmes, J., & Livescu, K. (2013). Deep canonical correlation analysis."},{"key":"2667_CR7","doi-asserted-by":"crossref","unstructured":"Arany, A., Bolg\u00e1r, B., Balogh, B., Antal, P., & M\u00e1tyus, P. (2012). Multi-aspect candidates for repositioning: data fusion methods using heterogeneous information sources. Current medicinal, chemistry, 20(1), 95\u2013107.","DOI":"10.2174\/0929867311302010010"},{"key":"2667_CR8","unstructured":"Asai, M. (2018). Set cross entropy: Likelihood-based permutation invariant loss function for probability distributions."},{"key":"2667_CR9","unstructured":"Globerson, A., Mokady, R., & Nukrai, D. (2022). Association for Computational Linguistics, Text-only training for image captioning using noise-injected clip."},{"key":"2667_CR10","unstructured":"Ayache, S., Qu\u00e9not, G., & Gensel, J. (2007). Classifier fusion for svm-based multimedia semantic indexing. In: European Conference on Information Retrieval."},{"key":"2667_CR11","first-page":"1","volume":"3","author":"FR Bach","year":"2002","unstructured":"Bach, F. R., & Jordan, M. I. (2002). Kernel independent component analysis. Journal of Machine Learning Research,3, 1\u201348.","journal-title":"Journal of Machine Learning Research"},{"key":"2667_CR12","doi-asserted-by":"crossref","unstructured":"Bachmann, R., Kar, O. F., Mizrahi, D., Garjani, A., Gao, M., Griffiths, D., Hu, J., Dehghan, A., & Zamir, A. (2024). 4m-21: An any-to-any vision model for tens of tasks and modalities.","DOI":"10.52202\/079017-1977"},{"key":"2667_CR13","unstructured":"Badrinarayanan, V., Kendall, A., & Cipolla, R. (2015). Segnet: A deep convolutional encoder-decoder architecture for image segmentation. IEEE TPAMI."},{"key":"2667_CR14","unstructured":"Bai, J., Bai, S., Yang, S., Wang, S., Tan, S., Wang, P., Lin, J., Zhou, C., & Zhou, J. (2023). Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond."},{"key":"2667_CR15","doi-asserted-by":"crossref","unstructured":"Baltrusaitis, T., Ahuja, C., & Morency, L.-P. (2018). Multimodal machine learning: A survey and taxonomy. IEEE TPAMI, 41(2).","DOI":"10.1109\/TPAMI.2018.2798607"},{"key":"2667_CR16","doi-asserted-by":"publisher","first-page":"91","DOI":"10.1007\/978-3-319-13105-4_14","volume-title":"Ambient Assisted Living and Daily Activities","author":"O Banos","year":"2014","unstructured":"Banos, O., Garcia, R., Holgado-Terriza, J. A., Damas, M., Pomares, H., Rojas, I., Saez, A., & Villalonga, C. (2014). mhealthdroid: A novel framework for agile development of mobile health applications. In L. Pecchia, L. L. Chen, C. Nugent, & J. Bravo (Eds.), Ambient Assisted Living and Daily Activities (pp. 91\u201398). Cham: Springer International Publishing."},{"key":"2667_CR17","unstructured":"Bao, H., Wang, W., Dong, L., Liu, Q., Mohammed, O. K., Aggarwal, K., Som, S., & Wei, F. (2022). Vlmo: Unified vision-language pre-training with mixture-of-modality-experts."},{"key":"2667_CR18","doi-asserted-by":"publisher","first-page":"14804","DOI":"10.1109\/ACCESS.2023.3243854","volume":"11","author":"A Barua","year":"2023","unstructured":"Barua, A., Ahmed, M. U., & Begum, S. (2023). A systematic literature review on multimodal machine learning: Applications, challenges, gaps and future directions. IEEE Access,11, 14804\u201314831.","journal-title":"IEEE Access"},{"key":"2667_CR19","doi-asserted-by":"crossref","unstructured":"Blasch, E., Levchuk, G. M., Staskevich, G., Burke, D., & Aved, A. (2014). Visualization of graphical information fusion results. In: Defense + Security Symposium.","DOI":"10.1117\/12.2052892"},{"key":"2667_CR20","doi-asserted-by":"publisher","first-page":"114","DOI":"10.1038\/s41568-021-00408-3","volume":"22","author":"K Boehm","year":"2021","unstructured":"Boehm, K., Khosravi, P., Vanguri, R., Gao, J., & Shah, S. (2021). Harnessing multimodal data integration to advance precision oncology. Nature Reviews Cancer,22, 114\u2013126.","journal-title":"Nature Reviews Cancer"},{"key":"2667_CR21","unstructured":"Bousetouane, F. (2025). Generative ai for vision: A comprehensive study of frameworks and applications."},{"key":"2667_CR22","doi-asserted-by":"crossref","unstructured":"Bradshaw, T. J., Tie, X., Warner, J., Hu, J., Li, Q., & Li, X. (2025). Large language models and large multimodal models in medical imaging: A primer for physicians. Journal of Nuclear Medicine.","DOI":"10.2967\/jnumed.124.268072"},{"key":"2667_CR23","doi-asserted-by":"crossref","unstructured":"Broedermann, T., Sakaridis, C., Fu, Y., & Gool, L. V. (2024). Condition-aware multimodal fusion for robust semantic perception of driving scenes.","DOI":"10.1109\/LRA.2025.3536218"},{"key":"2667_CR24","unstructured":"Brown, T.B., Mann, B., Ryder, N., Subbiah, M., Kaplan, J., Dhariwal, P., et\u00a0al. Language models are few-shot learners."},{"key":"2667_CR25","unstructured":"Byeon, M., Park, B., Kim, H., Lee, S., Baek, W., & Kim, S. (2022). Coyo-700m: Image-text pair dataset."},{"issue":"1","key":"2667_CR26","doi-asserted-by":"publisher","first-page":"1","DOI":"10.70393\/6a69656173.323633","volume":"3","author":"Y Cao","year":"2025","unstructured":"Cao, Y., Yang, X., & Sun, R. (2025). Generative ai models: Theoretical foundations and algorithmic practices. Journal of Industrial Engineering and Applied Science,3(1), 1\u20139.","journal-title":"Journal of Industrial Engineering and Applied Science"},{"key":"2667_CR27","doi-asserted-by":"crossref","unstructured":"Changpinyo, S., Sharma, P., Ding, N., & Soricut, R. (2021). Conceptual 12m: Pushing web-scale image-text pre-training to recognize long-tail visual concepts.","DOI":"10.1109\/CVPR46437.2021.00356"},{"key":"2667_CR28","doi-asserted-by":"crossref","unstructured":"Chen, D., Chen, J., Yang, L., & Shang, F. (2024). Mix-tower: Light visual question answering framework based on exclusive self-attention mechanism. Neurocomputing,","DOI":"10.1016\/j.neucom.2024.127686"},{"key":"2667_CR29","unstructured":"Chen, F., Han, M., Zhao, H., Zhang, Q., Shi, J., Xu, S., & Xu, B. (2023). X-llm: Bootstrapping advanced large language models by treating multi-modalities as foreign languages."},{"key":"2667_CR30","unstructured":"Chen, H., & Xu, T. (2023). Instructblip 2: Extending vision-language models with fine-grained instruction tuning. IEEE Transactions on Pattern Analysis and Machine Intelligence."},{"key":"2667_CR31","doi-asserted-by":"crossref","unstructured":"Chen, J., & Zhang, A. (2020). Hgmf: Heterogeneous graph-based fusion for multimodal data with incompleteness.","DOI":"10.1145\/3394486.3403182"},{"key":"2667_CR32","unstructured":"Chen, J., Zhu, D., Shen, X., Li, X., Liu, Z., Zhang, P., Krishnamoorthi, R., Chandra, V., Xiong, Y., & Elhoseiny, M. (2023). Minigpt-v2: large language model as a unified interface for vision-language multi-task learning."},{"key":"2667_CR33","unstructured":"Chen, K., & Sun, Y. (2023). Llava-med: Medical image understanding with large language models. IEEE TNNLS."},{"key":"2667_CR34","doi-asserted-by":"crossref","unstructured":"Chen, S., Li, H., Wang, Q., Zhao, Z., Sun, M., Zhu, X., & Liu, J. (2023). Vast: A vision-audio-subtitle-text omni-modality foundation model and dataset.","DOI":"10.52202\/075280-3185"},{"key":"2667_CR35","doi-asserted-by":"crossref","unstructured":"Chen, Y.-C., Li, L., Licheng, Yu., El Kholy, A., Ahmed, F., Gan, Z., Cheng, Y., & Liu, J. (2020). Uniter: Universal image-text representation learning.","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"2667_CR36","unstructured":"Chen, Y., Qiao, X., Sun, Z., & Li, X. (2024). Comkd-clip: Comprehensive knowledge distillation for contrastive language-image pre-traning model."},{"key":"2667_CR37","doi-asserted-by":"crossref","unstructured":"Chen, Y., Yuan, J., Tian, Y., Geng, S., Li, X., Zhou, D., Metaxas, D. N., & Yang, H. (2023). Revisiting multimodal representation in contrastive learning: From patch and token embeddings to finite discrete tokens. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), (pp. 15095\u201315104).","DOI":"10.1109\/CVPR52729.2023.01449"},{"key":"2667_CR38","unstructured":"Chu, Y., Jin, X., Zhou, X., Yang, Q., Zhang, S., Yan, Z., Zhou, C., & Zhou, J. (2023). Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models."},{"key":"2667_CR39","doi-asserted-by":"crossref","unstructured":"Cui, Y., Liang, S., & Zhang, Y. Y. (2024). Multimodal representation learning for tourism recommendation with two-tower architecture. PLoS One.","DOI":"10.1371\/journal.pone.0299370"},{"key":"2667_CR40","doi-asserted-by":"crossref","unstructured":"Dai, W., Li, J., Li, D., Tiong, A. M. H., Zhao, J., Wang, W., Li, B., Fung, P., & Hoi, S. (2023). Instructblip: Towards general-purpose vision-language models with instruction tuning.","DOI":"10.52202\/075280-2142"},{"key":"2667_CR41","doi-asserted-by":"publisher","DOI":"10.1002\/mp.14539","volume-title":"Multimodal mri synthesis using unified generative adversarial networks","author":"X Dai","year":"2020","unstructured":"Dai, X., Lei, Y., Fu, Y., Curran, W., Liu, T., Mao, H., & Yang, X. (2020). Multimodal mri synthesis using unified generative adversarial networks. Medical Physics."},{"key":"2667_CR42","doi-asserted-by":"crossref","unstructured":"Danapal, G., Santos, G. A., Jo\u00e3o, P. C., da Costa, L., Praciano, B. J. G., & Pinheiro, G. P. M. (2020). Sensor fusion of camera and lidar raw data for vehicle detection. In 2020 Workshop on Communication Networks and Power Systems (WCNPS), pp. 1\u20136.","DOI":"10.1109\/WCNPS50723.2020.9263724"},{"key":"2667_CR43","unstructured":"Desai, K., Kaul, G., Aysola, Z., & Johnson, J. (2021). Redcaps: web-curated image-text data created by the people, for the people."},{"key":"2667_CR44","unstructured":"Devlin, J., Chang, M.-W., Lee, K., & Toutanova, K. (2019). Bert: Pre-training of deep bidirectional transformers for language understanding."},{"key":"2667_CR45","first-page":"1","volume":"60","author":"L Ding","year":"2022","unstructured":"Ding, L., Lin, D., Lin, S., Zhang, J., Cui, X., Wang, Y., Tang, H., & Bruzzone, L. (2022). Looking outside the window: Wide-context transformer for the semantic segmentation of high-resolution remote sensing images. IEEE TGRS,60, 1\u201313.","journal-title":"IEEE TGRS"},{"key":"2667_CR46","doi-asserted-by":"crossref","unstructured":"Ding, Y., Rich, A., Wang, M., Stier, N., Turk, M., Sen, P., & H\u00f6llerer, T. (2021). Sparse fusion for multimodal transformers.","DOI":"10.31219\/osf.io\/f7jvn"},{"key":"2667_CR47","doi-asserted-by":"publisher","first-page":"6406","DOI":"10.1109\/TSP.2016.2605068","volume":"64","author":"D Dov","year":"2016","unstructured":"Dov, D., Talmon, R., & Cohen, I. (2016). Kernel-based sensor fusion with application to audio-visual voice activity detection. IEEE Transactions on Signal Processing,64, 6406\u20136416.","journal-title":"IEEE Transactions on Signal Processing"},{"key":"2667_CR48","unstructured":"Driess, D., Xia, F., Sajjadi, M. S. M., Lynch, C., Chowdhery, A., Ichter, B., Wahid, A., Tompson, J., Vuong, Q., Yu, T., Huang, W., Chebotar, Y., Sermanet, P., Duckworth, D., Levine, S., Vanhoucke, V., Hausman, K., Toussaint, M., Greff, K., Zeng, A., Mordatch, I., & Florence, P. (2023). Palm-e: An embodied multimodal language model."},{"key":"2667_CR49","doi-asserted-by":"crossref","unstructured":"Duan, B., Tang, H., Wang, W., Zong, Z., Yang, G., & Yan, Y. (2021). Audio-visual event localization via recursive fusion by joint co-attention. In WACV.","DOI":"10.1109\/WACV48630.2021.00406"},{"key":"2667_CR50","unstructured":"Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., et\u00a0al. The llama 3 herd of models."},{"key":"2667_CR51","doi-asserted-by":"crossref","unstructured":"Duque, A. F., Wolf, G., & Moon, K. R. (2022). Diffusion transport alignment.","DOI":"10.1007\/978-3-031-30047-9_10"},{"key":"2667_CR52","unstructured":"D\u00e9fossez, A., Mazar\u00e9, L., Orsini, M., Royer, A., P\u00e9rez, P., J\u00e9gou, H., Grave, E., & Zeghidour, N. (2024). Moshi: a speech-text foundation model for real-time dialogue."},{"key":"2667_CR53","doi-asserted-by":"crossref","unstructured":"Han, et al. (2024). Onellm: One framework to align all modalities with language. In CVPR","DOI":"10.1109\/CVPR52733.2024.02510"},{"key":"2667_CR54","doi-asserted-by":"crossref","unstructured":"Fei, N., Lu, Z., Gao, Y., Yang, G., Huo, Y., Wen, J., & Lu, H. (2022). Towards artificial general intelligence via a multimodal foundation model. Nature Communications.","DOI":"10.1038\/s41467-022-30761-2"},{"key":"2667_CR55","doi-asserted-by":"crossref","unstructured":"Gabeur, V., Sun, C., Alahari, K., & Schmid, C. (2020). Multi-modal transformer for video retrieval. In ECCV, pp. 214\u2013229.","DOI":"10.1007\/978-3-030-58548-8_13"},{"key":"2667_CR56","unstructured":"Gadre, S. Y., Ilharco, G., Fang, A., Hayase, J., Smyrnis, G., Nguyen, T., Marten, R., Wortsman, M., Ghosh, D., Zhang, J., Orgad, E., Entezari, R., Daras, G., Pratt, S., Ramanujan, V., Bitton, Y., Marathe, K., Mussmann, S., Vencu, R., Cherti, M., Krishna, R., Koh, P. W., Saukh, O., Ratner, A., Song, S., Hajishirzi, H., Farhadi, A., Beaumont, R., Oh, S., Dimakis, A., Jitsev, J., Carmon, Y., Shankar, V., & Schmidt, L. (2023). Datacomp: In search of the next generation of multimodal datasets."},{"key":"2667_CR57","unstructured":"Geng, S., Yuan, J., Tian, Y., Chen, Y., & Zhang, Y. (2023). Hiclip: Contrastive language-image pretraining with hierarchy-aware attention."},{"key":"2667_CR58","doi-asserted-by":"crossref","unstructured":"Girdhar, R., El-Nouby, A., Liu, Z., Singh, M., Alwala, K. V., Joulin, A., & Misra, I. (2023). Imagebind: One embedding space to bind them all.","DOI":"10.1109\/CVPR52729.2023.01457"},{"key":"2667_CR59","doi-asserted-by":"crossref","unstructured":"Gondal, M. W., Gast, J., Ruiz, I. A., Droste, R., Macri, T., Kumar, S., & Staudigl, L. (2024). Domain aligned clip for few-shot classification. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), pp. 5721\u20135730.","DOI":"10.1109\/WACV57701.2024.00562"},{"key":"2667_CR60","doi-asserted-by":"crossref","unstructured":"Grail, Q., Perez, J., & Gaussier, E. (2021). Globalizing BERT-based transformer architectures for long document summarization. In: Merlo, P., Tiedemann, J., & Tsarfaty, R., (Eds.), Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, (pp. 1792\u20131810), Online, April 2021. Association for Computational Linguistics.","DOI":"10.18653\/v1\/2021.eacl-main.154"},{"key":"2667_CR61","doi-asserted-by":"crossref","unstructured":"Guo, C., & Zhang, L. (2023). A model-level fusion-based multi-modal object detection and recognition method, In: 2023 7th Asian Conference on Artificial Intelligence Technology (ACAIT). (pp. 34\u201338)","DOI":"10.1109\/ACAIT60137.2023.10528389"},{"key":"2667_CR62","doi-asserted-by":"crossref","unstructured":"Guzhov, A., Raue, F., Hees, J., & Dengel, A. (2022). Audioclip: Extending clip to image, text and audio. In ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), (pp. 976\u2013980)","DOI":"10.1109\/ICASSP43922.2022.9747631"},{"key":"2667_CR63","doi-asserted-by":"crossref","unstructured":"G\u00f4lo, M., Moraes, M. I. D., Goularte, R., & Marcacini, R. (2023). On the use of early fusion operators on heterogeneous graph neural networks for one-class learning. In Proceedings of the 29th Brazilian Symposium on Multimedia and the Web.","DOI":"10.1145\/3617023.3617041"},{"key":"2667_CR64","doi-asserted-by":"crossref","unstructured":"Liu, Z., Shen, Y., Lakshminarasimhan, V.B., Liang, P.P., Bagher\u00a0Zadeh, A. A., & Morency, L.-P. (2018). Efficient low-rank multimodal fusion with modality-specific factors. In: Gurevych, I., Miyao, Y., (Ed.), Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2247\u20132256, Melbourne, Australia. Association for Computational Linguistics.","DOI":"10.18653\/v1\/P18-1209"},{"issue":"12","key":"2667_CR65","doi-asserted-by":"publisher","first-page":"2639","DOI":"10.1162\/0899766042321814","volume":"16","author":"DR Hardoon","year":"2004","unstructured":"Hardoon, D. R., Szedmak, S., & Shawe-Taylor, J. (2004). Canonical correlation analysis: An overview with application to learning methods. Neural Computation,16(12), 2639\u20132664.","journal-title":"Neural Computation"},{"key":"2667_CR66","unstructured":"In Hong, Y., Zhen, H., Chen, P., Zheng, S., Yilun, D., Chen, Z., & Gan, C. (2023). 3d-llm: Injecting the 3d world into large language models."},{"issue":"3\u20134","key":"2667_CR67","doi-asserted-by":"publisher","first-page":"321","DOI":"10.1093\/biomet\/28.3-4.321","volume":"28","author":"H Hotelling","year":"1936","unstructured":"Hotelling, H. (1936). Relations between two sets of variates. Biometrika,28(3\u20134), 321\u2013377.","journal-title":"Biometrika"},{"key":"2667_CR68","unstructured":"Zhizhang, H., Zhu, X., Tran, S., Vidal, R., & Dhua, A. (2023). Provla: Compositional image search with progressive vision-language alignment and multimodal fusion. (pp. 2772\u20132777)"},{"key":"2667_CR69","unstructured":"Islam, S., Elmekki, H., Elsebai, A., Bentahar, J., Drawel, N., Rjoub, G., & Pedrycz, W. A comprehensive survey on applications of transformers for deep learning tasks."},{"key":"2667_CR70","unstructured":"Jaegle, A., Gimeno, F., Brock, A., Zisserman, A., Vinyals, O., & Carreira, J. (2021). Perceiver: General perception with iterative attention."},{"key":"2667_CR71","doi-asserted-by":"crossref","unstructured":"Jaiswal, S., Mart\u00ednez, B., & Valstar, M. F. (2015). Learning to combine local models for facial action unit detection. 2015 11th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), 06:1\u20136.","DOI":"10.1109\/FG.2015.7284872"},{"key":"2667_CR72","unstructured":"Jia, C., Yang, Y., Xia, Y., Chen, Y.-T., Parekh, Z., Pham, H., Le, Q. V., Sung, Y., Li, Z., & Duerig, T. (2021). Scaling up visual and vision-language representation learning with noisy text supervision."},{"key":"2667_CR73","unstructured":"Jia, D., Guo, J., Han, K., Han, W., Zhang, C., Chang, X., & Chen, C. (2024) Geminifusion: Efficient pixel-wise multimodal fusion for vision transformer."},{"key":"2667_CR74","doi-asserted-by":"crossref","unstructured":"Jiang, Y., Zheng, Y., Hou, S., Chang, Y., & Gee, J. (2017). Multimodal image alignment via linear mapping between feature modalities. Journal of Healthcare Engineering.","DOI":"10.1155\/2017\/8625951"},{"key":"2667_CR75","doi-asserted-by":"crossref","unstructured":"Karpathy, A., & Li, F.-F. (2015). Deep visual-semantic alignments for generating image descriptions. In 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR). (pp. 3128\u20133137)","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"2667_CR76","unstructured":"Kelly, C., Hu, L., Yang, C., Tian, Y., Yang, D., Yang, B., Huang, Z., Li, Z., & Zou, Y. (2023). Unifiedvisiongpt: Streamlining vision-oriented ai through generalized multimodal framework."},{"key":"2667_CR77","doi-asserted-by":"crossref","unstructured":"Kim, J., Oh, C., Do, H., Kim, S., & Sohn, K. (2024). Diffusion-driven gan inversion for multi-modal face image generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), (pp. 10403\u201310412)","DOI":"10.1109\/CVPR52733.2024.00990"},{"key":"2667_CR78","doi-asserted-by":"crossref","unstructured":"Kim, W., Chun, S., Kim, T., Han, D., & Yun, S. (2024). Hype: Hyperbolic entailment filtering for underspecified images and texts In: European Conference on Computer Vision. (pp. 247\u2013265). Springer.","DOI":"10.1007\/978-3-031-73661-2_14"},{"key":"2667_CR79","unstructured":"Kim, W., Son, B., & Kim, I. (2021) ViLT: Vision-and-language transformer without convolution or region supervision."},{"key":"2667_CR80","doi-asserted-by":"crossref","unstructured":"Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A. C., Lo, W.-Y., Doll\u00e1r, P., & Girshick, R. (2023) Segment anything.","DOI":"10.1109\/ICCV51070.2023.00371"},{"issue":"1","key":"2667_CR81","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1109\/T-AFFC.2011.15","volume":"3","author":"S Koelstra","year":"2012","unstructured":"Koelstra, S., Muhl, C., Soleymani, M., Lee, J.-S., Yazdani, A., Ebrahimi, T., Pun, T., Nijholt, A., & Patras, I. (2012). Deap: A database for emotion analysis; using physiological signals. IEEE Transactions on Affective Computing,3(1), 18\u201331.","journal-title":"IEEE Transactions on Affective Computing"},{"key":"2667_CR82","doi-asserted-by":"publisher","first-page":"144","DOI":"10.1186\/1752-0509-6-144","volume":"6","author":"M Kol\u00e1\u0159","year":"2012","unstructured":"Kol\u00e1\u0159, M., Meier, J., Mustonen, V., L\u00e4ssig, M., & Berg, J. (2012). Graphalignment: Bayesian pairwise alignment of biological networks. BMC Syst Biol,6, 144.","journal-title":"BMC Syst Biol"},{"key":"2667_CR83","doi-asserted-by":"crossref","unstructured":"Kong, Z., Xu, D., Li, Z., Dong, P., Tang, H., Wang, Y., & Mukherjee, S. (2025). Autovit: Achieving real-time vision transformers on mobile via latency-aware coarse-to-fine search. Springer IJCV, pp. 1\u201317.","DOI":"10.1007\/s11263-025-02480-w"},{"key":"2667_CR84","doi-asserted-by":"crossref","unstructured":"Krishna, R., Zhu, Y., Groth, O., Johnson, J., Hata, K., Kravitz, J., Chen, S., Kalantidis, Y., Li, L.-J., Shamma, D. A., Bernstein, M. S., & Li, F.-F. (2016). Visual genome: Connecting language and vision using crowdsourced dense image annotations.","DOI":"10.1007\/s11263-016-0981-7"},{"issue":"2","key":"2667_CR85","doi-asserted-by":"publisher","first-page":"201","DOI":"10.1137\/1025045","volume":"25","author":"JB Kruskal","year":"1983","unstructured":"Kruskal, J. B. (1983). An overview of sequence comparison: Time warps, string edits, and macromolecules. SIAM Rev.,25(2), 201\u2013237.","journal-title":"SIAM Rev."},{"key":"2667_CR86","unstructured":"Lee, H., Park, S., Lee, J., & Choi, E. (2022). Unconditional image-text pair generation with multimodal cross quantizer. ArXiv."},{"key":"2667_CR87","doi-asserted-by":"crossref","unstructured":"Li, B., Ge, Y., Ge, Y., Wang, G., Wang, R., Zhang, R., & Shan, Y. (2023). Seed-bench-2: Benchmarking multimodal large language models.","DOI":"10.1109\/CVPR52733.2024.01263"},{"key":"2667_CR88","unstructured":"Li, H., Zhou, Y., Zeng, Y., Xu, H., & Liang, X. (2024). Gs-clip: Gaussian splatting for contrastive language-image-3d pretraining from real-world data."},{"key":"2667_CR89","first-page":"101498","volume-title":"Advances in Neural Information Processing Systems","author":"H Li","year":"2024","unstructured":"Li, H., Zhang, Y., Chen, P., Shui, Z., Zhu, C., & Yang, L. (2024). Rethinking transformer for long contextual histopathology whole slide image analysis. In A. Globerson, L. Mackey, D. Belgrave, A. Fan, U. Paquet, J. Tomczak, & C. Zhang (Eds.), Advances in Neural Information Processing Systems (Vol. 37, pp. 101498\u2013101528). Curran Associates Inc."},{"issue":"5","key":"2667_CR90","doi-asserted-by":"publisher","first-page":"3673","DOI":"10.1109\/TPAMI.2025.3535617","volume":"47","author":"H Li","year":"2025","unstructured":"Li, H., Yang, Z., Zhang, Y., Jia, W., Zhengtao, Yu., & Liu, Yu. (2025). Mulfs-cap: Multimodal fusion-supervised cross-modality alignment perception for unregistered infrared-visible image fusion. IEEE Transactions on Pattern Analysis and Machine Intelligence,47(5), 3673\u20133690.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2667_CR91","doi-asserted-by":"publisher","first-page":"9053","DOI":"10.18653\/v1\/2024.findings-acl.537","volume-title":"Findings of the Association for Computational Linguistics: ACL 2024","author":"H Li","year":"2024","unstructured":"Li, H., Li, S., Cai, D., Wang, L., Liu, L., Watanabe, T., Yang, Y., & Shi, S. (2024). TextBind: Multi-turn interleaved multimodal instruction-following in the wild. In K. Lun-Wei, A. Martins, & V. Srikumar (Eds.), Findings of the Association for Computational Linguistics: ACL 2024 (pp. 9053\u20139076). Bangkok: Thailand, Association for Computational Linguistics."},{"key":"2667_CR92","unstructured":"Li, H., & Wu, X. (2018). Densefuse: A fusion approach to infrared and visible images. IEEE TIP."},{"key":"2667_CR93","doi-asserted-by":"crossref","unstructured":"Li, J., Dai, H., & Ding, Y. (2022). Self-distillation for robust LiDAR semantic segmentation in autonomous driving. In: ECCV, pp. 659\u2013676.","DOI":"10.1007\/978-3-031-19815-1_38"},{"key":"2667_CR94","doi-asserted-by":"crossref","unstructured":"Li, J., Dai, H., Han, H., & Ding, Y. (2023). Mseg3d: Multi-modal 3d semantic segmentation for autonomous driving. In: CVPR, pp. 21694\u201321704.","DOI":"10.1109\/CVPR52729.2023.02078"},{"key":"2667_CR95","unstructured":"Li, J., Li, D., Savarese, S., & Hoi, S. (2023). Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models."},{"key":"2667_CR96","unstructured":"Li, J., Li, D., Xiong, C., & Hoi, S. (2022). BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In Proceedings of the 39th International Conference on Machine Learning, pages 12888\u201312900. PMLR."},{"key":"2667_CR97","first-page":"9694","volume":"34","author":"J Li","year":"2021","unstructured":"Li, J., Selvaraju, R., Gotmare, A., Joty, S., Xiong, C., & Hoi, S. C. H. (2021). Align before fuse: Vision and language representation learning with momentum distillation. In Advances in Neural Information Processing Systems,34, 9694\u20139705.","journal-title":"In Advances in Neural Information Processing Systems"},{"key":"2667_CR98","doi-asserted-by":"crossref","unstructured":"Li, W., Zhou, H., Junqing, Yu., Song, Z., & Yang, W. (2024). Coupled mamba: Enhanced multi-modal fusion with coupled state space model.","DOI":"10.52202\/079017-1910"},{"key":"2667_CR99","doi-asserted-by":"crossref","unstructured":"Li, X., Yin, X., Li, C., Zhang, P., Xiaowei, H., Zhang, L., Wang, L., Houdong, H., Dong, L., Wei, F., Choi, Y., & Gao, J. (2020). Oscar: Object-semantics aligned pre-training for vision-language tasks.","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"2667_CR100","doi-asserted-by":"publisher","first-page":"9165","DOI":"10.1109\/TIP.2020.3023774","volume":"29","author":"X Li","year":"2020","unstructured":"Li, X., Song, D., & Dong, Y. (2020). Hierarchical feature fusion network for salient object detection. IEEE Transactions on Image Processing,29, 9165\u20139175.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2667_CR101","doi-asserted-by":"crossref","unstructured":"Li, Y., Quan, R., Zhu, L., & Yang, Y. (2023). Efficient multimodal fusion via interactive prompting. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2604\u20132613.","DOI":"10.1109\/CVPR52729.2023.00256"},{"key":"2667_CR102","doi-asserted-by":"crossref","unstructured":"Li, Y., Ding, H., & Chen, H. (2024). Data processing techniques for modern multimodal models. arXiv preprint arXiv:2407.19180.","DOI":"10.1109\/IPTA62886.2024.10755555"},{"key":"2667_CR103","doi-asserted-by":"crossref","unstructured":"Li, Z., Xiyang, W., Hongyang, D., Liu, F., Nghiem, H., & Shi, G. (2025). A survey of state of the art large vision language models: Alignment, benchmark, evaluations and challenges.","DOI":"10.1109\/CVPRW67362.2025.00147"},{"key":"2667_CR104","doi-asserted-by":"crossref","unstructured":"Liang, P. P., Zadeh, A., & Morency, L.-P. (2024). Foundations & trends in multimodal machine learning: Principles, challenges, and open questions. ACM Comput. Surv., 56(10).","DOI":"10.1145\/3656580"},{"key":"2667_CR105","first-page":"17612","volume":"35","author":"VW Liang","year":"2022","unstructured":"Liang, V. W., Zhang, Y., Kwon, Y., Yeung, S., & Zou, J. Y. (2022). Mind the gap: Understanding the modality gap in multi-modal contrastive representation learning. Advances in Neural Information Processing Systems,35, 17612\u201317625.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2667_CR106","doi-asserted-by":"crossref","unstructured":"Lin, J., Yin, H., Ping, W., Yao, L., Molchanov, P., Tao, A., Mao, H., Kautz, J., Shoeybi, M., & Han, S. (2024). Vila: On pre-training for visual language models.","DOI":"10.1109\/CVPR52733.2024.02520"},{"key":"2667_CR107","doi-asserted-by":"publisher","first-page":"111","DOI":"10.1016\/j.aiopen.2022.10.001","volume":"3","author":"T Lin","year":"2022","unstructured":"Lin, T., Wang, Y., Liu, X., & Qiu, X. (2022). A survey of transformers. AI Open,3, 111\u2013132.","journal-title":"AI Open"},{"key":"2667_CR108","doi-asserted-by":"crossref","unstructured":"Lin, T.-Y., Maire, M., Belongie, S., Bourdev, L., Girshick, R., Hays, J., Perona, P., Ramanan, D., Zitnick, C.\u00a0L., & Doll\u00e1r, P. (2015). Microsoft coco: Common objects in context.","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"2667_CR109","doi-asserted-by":"crossref","unstructured":"Lin, X., Bertasius, G., Wang, J., Chang, S.-F., Parikh, D., & Torresani, L. (2021). Vx2text: End-to-end learning of video-based text generation from multimodal inputs. (pp. 7001\u20137011)","DOI":"10.1109\/CVPR46437.2021.00693"},{"key":"2667_CR110","unstructured":"Lin, Y., Luo, L., Chen, Y., Zhang, X., Wang, Z., Yang, W., Tong, M., & Yu, R. (2024). St-align: A multimodal foundation model for image-gene alignment in spatial transcriptomics."},{"key":"2667_CR111","doi-asserted-by":"crossref","unstructured":"Liu, C., Liu, H., Chen, H., & Du, W. (2023). Touchformer: A transformer-based two-tower architecture for tactile temporal signal classification. IEEE Transactions on Multimedia.","DOI":"10.1109\/TOH.2023.3346956"},{"key":"2667_CR112","unstructured":"Liu, H., Chen, Z., Yuan, Y., Mei, X., Liu, X., Mandic, D., Wang, W., & Plumbley, M.D. (2023). Audioldm: Text-to-audio generation with latent diffusion models."},{"key":"2667_CR113","unstructured":"Liu, H., Li, C., Wu, Q., & Lee, Y. J. (2024). Visual instruction tuning. Advances in neural information processing systems (p. 36)"},{"key":"2667_CR114","doi-asserted-by":"crossref","unstructured":"Liu, Y., Zhu, G., Zhu, B., Song, Q., Ge, G., Chen, H., Qiao, G. H., Peng, R., Wu, L., & Wang, J. (2022). Taisu: A 166m large-scale high-quality dataset for chinese vision-language pre-training. In S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, & A. Oh (Eds.), Advances in Neural Information Processing Systems (Vol. 35, pp. 16705\u201316717). Curran Associates Inc.","DOI":"10.52202\/068431-1215"},{"key":"2667_CR115","doi-asserted-by":"crossref","unstructured":"Liu, Z., Shen, Y., Lakshminarasimhan, V.B., Liang, P.P., Bagher\u00a0Zadeh, A. A., & Morency, L.-P. (2018). Efficient low-rank multimodal fusion with modality-specific factors. In: Gurevych, I., Miyao, Y., (Ed.), Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2247\u20132256, Melbourne, Australia. Association for Computational Linguistics.","DOI":"10.18653\/v1\/P18-1209"},{"key":"2667_CR116","unstructured":"Jiasen, L., Batra, D., Parikh, D., & Lee, S. (2019). Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks."},{"key":"2667_CR117","unstructured":"Jiasen, L., Clark, C., Zellers, R., Mottaghi, R., & Kembhavi, A. (2022). Unified-io: A unified model for vision, language, and multi-modal tasks."},{"key":"2667_CR118","first-page":"29615","volume-title":"Advances in Neural Information Processing Systems","author":"G Luo","year":"2023","unstructured":"Luo, G., Zhou, Y., Ren, T., Chen, S., Sun, X., & Ji, R. (2023). Cheap and quick: Efficient vision-language instruction tuning for large language models. In A. Oh, T. Naumann, A. Globerson, K. Saenko, M. Hardt, & S. Levine (Eds.), Advances in Neural Information Processing Systems (Vol. 36, pp. 29615\u201329627). Curran Associates Inc."},{"key":"2667_CR119","unstructured":"Lyu, C., Minghao, W., Wang, L., Huang, X., Liu, B., Zefeng, D., & Shi, S., & Tu, Z. (2023). Macaw-llm: Multi-modal language modeling with image, audio, video, and text integration."},{"key":"2667_CR120","doi-asserted-by":"crossref","unstructured":"Ma, Q., Zhang, M., Tang, Y., & Huang, Z. (2023). Att-sinkhorn: Multimodal alignment with sinkhorn-based deep attention architecture.","DOI":"10.1109\/ICAC57885.2023.10275301"},{"key":"2667_CR121","unstructured":"Ma, W., Li, S., Cai, L., & Kang, J. (2024). Learning modality knowledge alignment for cross-modality transfer. Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pp. 33777\u201333793. PMLR, 21\u201327."},{"key":"2667_CR122","unstructured":"Ma, Z., Furong, X., Liu, J., Yang, M., & Guo, Q. (2024). Sycoca: Symmetrizing contrastive captioners with attentive masking for multimodal alignment."},{"key":"2667_CR123","doi-asserted-by":"publisher","first-page":"2063","DOI":"10.1109\/TNNLS.2018.2790388","volume":"29","author":"M Mahmud","year":"2017","unstructured":"Mahmud, M., Kaiser, M. S., Hussain, A., & Vassanelli, S. (2017). Applications of deep learning and reinforcement learning to biological data. IEEE Transactions on Neural Networks and Learning Systems,29, 2063\u20132079.","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"2667_CR124","doi-asserted-by":"crossref","unstructured":"Mai, S., Hu, H., & Xing, S. (2019). Divide, conquer and combine: Hierarchical feature fusion network with local and global perspectives for multimodal affective computing.","DOI":"10.18653\/v1\/P19-1046"},{"key":"2667_CR125","doi-asserted-by":"crossref","unstructured":"Mai, S., & Haifeng, H., & Xing, S. (2020). Modality to modality translation: An adversarial representation learning and graph fusion network for multimodal fusion.","DOI":"10.1609\/aaai.v34i01.5347"},{"key":"2667_CR126","doi-asserted-by":"publisher","first-page":"594","DOI":"10.1016\/j.imavis.2011.07.001","volume":"29","author":"A Makris","year":"2011","unstructured":"Makris, A., Kosmopoulos, D. I., Perantonis, S. J., & Theodoridis, S. (2011). A hierarchical feature fusion framework for adaptive visual tracking. Image Vis. Comput.,29, 594\u2013606.","journal-title":"Image Vis. Comput."},{"key":"2667_CR127","doi-asserted-by":"crossref","unstructured":"Melzer, T., Reiter, M., & Bischof, H. (2001). Nonlinear feature extraction using generalized canonical correlation analysis.","DOI":"10.1007\/3-540-44668-0_50"},{"key":"2667_CR128","doi-asserted-by":"crossref","unstructured":"Missaoui, O., Frigui, H., & Gader, P. D. (2010). Model level fusion of edge histogram descriptors and gabor wavelets for landmine detection with ground penetrating radar. (pp. 3378\u20133381)","DOI":"10.1109\/IGARSS.2010.5650350"},{"key":"2667_CR129","doi-asserted-by":"crossref","unstructured":"Morelli, V. G., Barbato, M. P., Piccoli, F., & Napoletano, P. (2023). Multimodal fusion methods with vision transformers for remote sensing semantic segmentation. In: 2023 13th Workshop on Hyperspectral Imaging and Signal Processing: Evolution in Remote Sensing (WHISPERS), pp. 1\u20135.","DOI":"10.1109\/WHISPERS61460.2023.10430788"},{"key":"2667_CR130","doi-asserted-by":"crossref","unstructured":"Morvant, E., Habrard, A., & Ayache, S. (2014). Majority vote of diverse classifiers for late fusion. Structural, Syntactic, and Statistical Pattern Recognition (pp. 153\u2013162). Springer.","DOI":"10.1007\/978-3-662-44415-3_16"},{"key":"2667_CR131","doi-asserted-by":"crossref","unstructured":"Mu\u00f1oz, A., & Gonz\u00e1lez, J. (2008). Functional learning of kernels for information fusion purposes. In: Iberoamerican Congress on Pattern Recognition.","DOI":"10.1007\/978-3-540-85920-8_34"},{"key":"2667_CR132","unstructured":"Nagrani, A., Yang, S., Arnab, A., Jansen, A., Schmid, C., & Sun, C. (2022). Attention bottlenecks for multimodal fusion."},{"key":"2667_CR133","doi-asserted-by":"crossref","unstructured":"Nassar, H., & Gleich, D. (2017). Multimodal network alignment. ArXiv.","DOI":"10.1137\/1.9781611974973.69"},{"key":"2667_CR134","unstructured":"Nguyen, T., Gadre, S. Y., Ilharco, G., Oh, S., & Schmidt, L. (2023). Improving multimodal datasets with image captioning."},{"key":"2667_CR135","unstructured":"Ni, J., Tang, H., Haque, S. T., Yan, Y., & Ngu, A. H. H. (2024). A survey on multimodal wearable sensor-based human action recognition. arXiv preprint arXiv:2404.15349."},{"key":"2667_CR136","doi-asserted-by":"crossref","unstructured":"Ning, M., Zhou, F., Wang, W., Wang, S., Zhang, P., & Wang, J. (2024). Abftnet: An efficient transformer network with alignment before fusion for multimodal automatic modulation recognition. Electronics, 13(18).","DOI":"10.3390\/electronics13183725"},{"issue":"1","key":"2667_CR137","doi-asserted-by":"publisher","first-page":"60","DOI":"10.1109\/TAFFC.2017.2713783","volume":"10","author":"F Noroozi","year":"2019","unstructured":"Noroozi, F., Marjanovic, M., Njegus, A., Escalera, S., & Anbarjafari, G. (2019). Audio-visual emotion recognition in video clips. IEEE Transactions on Affective Computing,10(1), 60\u201375.","journal-title":"IEEE Transactions on Affective Computing"},{"key":"2667_CR138","unstructured":"OpenAI, Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, and et\u00a0al. GPT-4 technical report."},{"key":"2667_CR139","unstructured":"Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., & Assran, M. (2024). Dinov2: Learning robust visual features without supervision."},{"key":"2667_CR140","volume-title":"Advances in Neural Information Processing Systems","author":"V Ordonez","year":"2011","unstructured":"Ordonez, V., Kulkarni, G., & Berg, T. (2011). Im2text: Describing images using 1 million captioned photographs. In J. Shawe-Taylor, R. Zemel, P. Bartlett, F. Pereira, & K. Q. Weinberger (Eds.), Advances in Neural Information Processing Systems.  (Vol. 24). Curran Associates Inc."},{"key":"2667_CR141","doi-asserted-by":"publisher","first-page":"416","DOI":"10.1109\/TASLP.2019.2957889","volume":"28","author":"S Parekh","year":"2020","unstructured":"Parekh, S., Essid, S., Ozerov, A., Duong, N. Q. K., P\u00e9rez, P., & Richard, G. (2020). Weakly supervised representation learning for audio-visual scene analysis. IEEE\/ACM Transactions on Audio, Speech, and Language Processing,28, 416\u2013428.","journal-title":"IEEE\/ACM Transactions on Audio, Speech, and Language Processing"},{"key":"2667_CR142","unstructured":"Pattnayak, P., Patel, H. L., Kumar, B., Agarwal, A., Banerjee, I., Panda, S., Kumar, T. (2024). Survey of large multimodal model datasets, application categories and taxonomy."},{"key":"2667_CR143","unstructured":"Peng, Z., Wang, W., Dong, L., Hao, Y., Huang, S., Ma, S., Wei, F. (2023). Kosmos-2: Grounding multimodal large language models to the world."},{"key":"2667_CR144","doi-asserted-by":"crossref","unstructured":"Plummer, B. A., Wang, L., Cervantes, C. M., Caicedo, J. C., Hockenmaier, J., & Lazebnik, S. (2016). Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models.","DOI":"10.1109\/ICCV.2015.303"},{"key":"2667_CR145","doi-asserted-by":"publisher","first-page":"Article 120363","DOI":"10.1016\/j.eswa.2023.120363","volume":"228","author":"Y Qian","year":"2023","unstructured":"Qian, Y., & Pan, L. (2023). Leveraging multimodal features for knowledge graph entity alignment based on dynamic self-attention networks. Expert Systems with Applications,228, Article 120363.","journal-title":"Expert Systems with Applications"},{"key":"2667_CR146","unstructured":"Qin, J., Yitao, X., Luo, Z., Liu, C., Zong, L., & Zhang, X. (2023). Alternative telescopic displacement: An efficient multimodal alignment method"},{"key":"2667_CR147","unstructured":"Qiu, L., Zhang, R., Guo, Z., Zeng, Z., Guo, Z., Li, Y., & Zhang, G. (2023). Vt-clip: Enhancing vision-language models with visual-guided texts."},{"key":"2667_CR148","unstructured":"Radford, A., Kim, W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., Krueger, G., & Sutskever, I. (2021). Learning transferable visual models from natural language supervision. In ICML, pp. 8748\u20138763."},{"key":"2667_CR149","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., & Sutskever, I. (2019). Language models are unsupervised multitask learners."},{"key":"2667_CR150","doi-asserted-by":"publisher","first-page":"203","DOI":"10.1016\/j.inffus.2021.12.003","volume":"81","author":"A Rahate","year":"2022","unstructured":"Rahate, A., Walambe, R., Ramanna, S., & Kotecha, K. (2022). Multimodal co-learning: Challenges, applications with datasets, recent advances and future directions. Information Fusion,81, 203\u2013239.","journal-title":"Information Fusion"},{"key":"2667_CR151","doi-asserted-by":"crossref","unstructured":"Reiss, A., & Stricker, D. (2012). Introducing a new benchmarked dataset for activity monitoring. In: 2012 16th International Symposium on Wearable Computers, (pp 108\u2013109).","DOI":"10.1109\/ISWC.2012.13"},{"key":"2667_CR152","unstructured":"Roger, A., A\u00efmeur, E., & Rish, I. (2024). Towards ethical multimodal systems."},{"key":"2667_CR153","doi-asserted-by":"crossref","unstructured":"R\u00f6vid, A., & Remeli, V. (2019). Towards raw sensor fusion in 3d object detection. 2019 IEEE 17th World Symposium on Applied Machine Intelligence and Informatics (SAMI), pp. 293\u2013298.","DOI":"10.1109\/SAMI.2019.8782779"},{"key":"2667_CR154","unstructured":"Rubenstein, P. K., Asawaroengchai, C., Nguyen, D. D., Bapna, A., Borsos, Z., de Chaumont Quitry, F., Chen, P., Badawy, D. E., Han, W., Kharitonov, E., Muckenhirn, H., Padfield, D., Qin, J., Rozenberg, D., Sainath, T., Schalkwyk, J., Sharifi, M., Ramanovich, M. T., Tagliasacchi, M., Tudor, A., Velimirovi\u0107, M., Vincent, D., Yu, J., Wang, Y., Zayats, V., Zeghidour, N., Zhang, Y., Zhang, Z., Zilka, L., & Frank, C. (2023). Audiopalm: A large language model that can speak and listen."},{"key":"2667_CR155","doi-asserted-by":"crossref","unstructured":"Sarker, Md. I., & Milanova, M. (2022). Deep learning-based multimodal image retrieval combining image and text. In: 2022 International Conference on Computational Science and Computational Intelligence (CSCI), pp. 1543\u20131546.","DOI":"10.1109\/CSCI58124.2022.00274"},{"key":"2667_CR156","doi-asserted-by":"crossref","unstructured":"Scalzo, F., Bebis, G., Nicolescu, M., Loss, L. A., & Tavakkoli, A. (2008). Feature fusion hierarchies for gender classification. 2008 19th International Conference on Pattern Recognition. (pp. 1\u20134)","DOI":"10.1109\/ICPR.2008.4761234"},{"key":"2667_CR157","doi-asserted-by":"crossref","unstructured":"Schuhmann, C., Beaumont, R., Vencu, R., Gordon, C., Wightman, R., Cherti, M., Coombes, T., Katta, A., Mullis, C., Wortsman, M., Schramowski, P., Kundurthy, S., Crowson, K., Schmidt, L., Kaczmarczyk, R., & Jitsev, J. (2022). Laion-5b: An open large-scale dataset for training next generation image-text models.","DOI":"10.52202\/068431-1833"},{"key":"2667_CR158","unstructured":"Shankar, S., Thompson, L., & Fiterau, M. (2022). Progressive fusion for multimodal integration."},{"key":"2667_CR159","doi-asserted-by":"crossref","unstructured":"Shen, S., Yao, Z., Li, C., Darrell, T., Keutzer, K., & He, Y. (2023). Scaling vision-language models with sparse mixture of experts.","DOI":"10.18653\/v1\/2023.findings-emnlp.758"},{"key":"2667_CR160","doi-asserted-by":"crossref","unstructured":"Shi, D., Diao, X., Shi, L., Tang, H., Chi, Y., Li, C., & Xu, H. (2022). Charformer: A glyph fusion based attentive framework for high-precision character image denoising. In: ACM MM.","DOI":"10.1145\/3503161.3548208"},{"key":"2667_CR161","unstructured":"Shi, G., Zhu, Y., Liu, W., Yao, Q., & Li, X. (2022). Heterogeneous graph-based multimodal brain network learning."},{"key":"2667_CR162","doi-asserted-by":"crossref","unstructured":"Singh, A., Ronghang, H., Goswami, V., Couairon, G., Galuba, W., Rohrbach, M., & Kiela, D. (2022). Flava: A foundational language and vision alignment model.","DOI":"10.1109\/CVPR52688.2022.01519"},{"key":"2667_CR163","doi-asserted-by":"crossref","unstructured":"Snoek, C. G. M., Worring, M., & Smeulders, A. W. M. (2005). ACM MM: M. Smeulders. Early versus late fusion in semantic video analysis.","DOI":"10.1145\/1101149.1101236"},{"issue":"1","key":"2667_CR164","first-page":"45","volume":"39","author":"Y Song","year":"2023","unstructured":"Song, Y., Li, Z., & Song, W. (2023). Scene-driven multimodal knowledge graph construction for embodied ai. IEEE Transactions on Robotics,39(1), 45\u201360.","journal-title":"IEEE Transactions on Robotics"},{"key":"2667_CR165","unstructured":"Song, Z., Zang, Z., Wang, Y., Yang, G., yu, K., Chen, W., Wang, M., & Li, S. Z. (2024). Set-clip: Exploring aligned semantic from low-alignment multimodal data through a distribution view."},{"key":"2667_CR166","doi-asserted-by":"crossref","unstructured":"Srinivasan, K., Raman, K., Chen, J., Bendersky, M., & Najork, M. (2021). Wit: Wikipedia-based image text dataset for multimodal multilingual machine learning. In: Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR \u201921. ACM.","DOI":"10.1145\/3404835.3463257"},{"key":"2667_CR167","first-page":"2949","volume":"15","author":"N Srivastava","year":"2012","unstructured":"Srivastava, N., & Salakhutdinov, R. (2012). Multimodal learning with deep boltzmann machines. Journal of Machine Learning Research,15, 2949\u20132980.","journal-title":"Journal of Machine Learning Research"},{"key":"2667_CR168","doi-asserted-by":"crossref","unstructured":"Stappen, L., Baird, A., Schumann, L., & Schuller, B. (2021). The multimodal sentiment analysis in car reviews (muse-car) dataset: Collection. insights and improvements.","DOI":"10.1145\/3475957.3484450"},{"key":"2667_CR169","doi-asserted-by":"crossref","unstructured":"Steinbaeck, J., Steger, C., Holweg, G., & Druml, N. (2018). Design of a low-level radar and time-of-flight sensor fusion framework. 2018 21st Euromicro Conference on Digital System Design (DSD), (pp. 268\u2013275)","DOI":"10.1109\/DSD.2018.00056"},{"key":"2667_CR170","doi-asserted-by":"crossref","unstructured":"Su, L., Yan, F., Zhu, J., Xiao, X., & Duan, H. (2023). Beyond two-tower matching: Learning sparse retrievable cross-interactions for recommendation.","DOI":"10.1145\/3539618.3591643"},{"key":"2667_CR171","doi-asserted-by":"publisher","first-page":"424","DOI":"10.1016\/j.neucom.2018.11.038","volume":"331","author":"H Tang","year":"2019","unstructured":"Tang, H., Liu, H., Xiao, W., & Sebe, N. (2019). Fast and robust dynamic hand gesture recognition via key frames extraction and feature fusion. Elsevier Neurocomputing,331, 424\u2013433.","journal-title":"Elsevier Neurocomputing"},{"issue":"4","key":"2667_CR172","first-page":"1972","volume":"34","author":"H Tang","year":"2021","unstructured":"Tang, H., Liu, H., Dan, X., Torr, P. H. S., & Sebe, N. (2021). Attentiongan: Unpaired image-to-image translation using attention-guided generative adversarial networks. IEEE TNNLS,34(4), 1972\u20131987.","journal-title":"IEEE TNNLS"},{"issue":"6","key":"2667_CR173","doi-asserted-by":"publisher","first-page":"4298","DOI":"10.1109\/TPAMI.2024.3355248","volume":"46","author":"H Tang","year":"2024","unstructured":"Tang, H., Shao, L., Sebe, N., & Van Gool, L. (2024). Graph transformer gans with graph masked modeling for architectural layout generation. IEEE TPAMI,46(6), 4298\u20134313.","journal-title":"IEEE TPAMI"},{"key":"2667_CR174","doi-asserted-by":"crossref","unstructured":"Tang, H., Shao, L., Sebe, N., & Van\u00a0Gool, L. (2025). Enhanced multi-scale cross-attention for person image generation. IEEE TPAMI.","DOI":"10.1109\/TPAMI.2025.3531220"},{"key":"2667_CR175","doi-asserted-by":"crossref","unstructured":"Tang, H., Xu, D., Sebe, N., Wang, Y., Corso, J.J., Yan, Y. (2019). Multi-channel attention selection gan with cascaded semantic guidance for cross-view image translation. In: CVPR.","DOI":"10.1109\/CVPR.2019.00252"},{"key":"2667_CR176","doi-asserted-by":"crossref","unstructured":"Tang, H., Xu, D., Yan, Y., Torr, P.H.S., & Sebe, N. (2020). Local class-specific and global image-level generative adversarial networks for semantic-guided scene generation. In: CVPR.","DOI":"10.1109\/CVPR42600.2020.00789"},{"key":"2667_CR177","doi-asserted-by":"publisher","first-page":"1966","DOI":"10.1109\/TCSVT.2022.3218018","volume":"33","author":"J Tang","year":"2023","unstructured":"Tang, J., Liu, D., Jin, X., Peng, Y., Zhao, Q., Ding, Y., & Kong, W. (2023). Bafn: Bi-direction attention based fusion network for multimodal sentiment analysis. IEEE Transactions on Circuits and Systems for Video Technology,33, 1966\u20131978.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"2667_CR178","first-page":"1056","volume":"23","author":"S Tang","year":"2021","unstructured":"Tang, S., Guo, D., Hong, R., & Wang, M. (2021). Graph-based multimodal sequential embedding for sign language translation. IEEE Transactions on Multimedia,23, 1056\u20131067.","journal-title":"IEEE Transactions on Multimedia"},{"key":"2667_CR179","doi-asserted-by":"publisher","first-page":"5134","DOI":"10.1109\/TIP.2022.3193288","volume":"31","author":"W Tang","year":"2022","unstructured":"Tang, W., He, F., Liu, Y., & Duan, Y. (2022). Matr: Multimodal medical image fusion via multiscale adaptive transformer. IEEE Transactions on Image Processing,31, 5134\u20135149.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2667_CR180","doi-asserted-by":"crossref","unstructured":"Tao, M., Tang, H., Wu, F., Jing, X.-Y., Bao, B.-K., & Xu, C. (2022). Df-gan: A simple and effective baseline for text-to-image synthesis. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp. 16515\u201316525.","DOI":"10.1109\/CVPR52688.2022.01602"},{"issue":"2","key":"2667_CR181","doi-asserted-by":"publisher","first-page":"805","DOI":"10.1109\/TPAMI.2023.3325770","volume":"46","author":"MK Tellamekala","year":"2024","unstructured":"Tellamekala, M. K., Amiriparian, S., Schuller, B. W., Andr\u00e9, E., Giesbrecht, T., & Valstar, M. (2024). COLD fusion: Calibrated and ordinal latent distribution fusion for uncertainty-aware multimodal emotion recognition. IEEE TPAMI,46(2), 805\u2013822.","journal-title":"IEEE TPAMI"},{"key":"2667_CR182","unstructured":"Thai, T. M., Vo, A. T., Tieu, H. K., Bui, L., & Nguyen, T. (2023). Uit-saviors at medvqa-gi 2023: Improving multimodal learning with image enhancement for gastrointestinal visual question answering."},{"issue":"2","key":"2667_CR183","doi-asserted-by":"publisher","first-page":"64","DOI":"10.1145\/2812802","volume":"59","author":"B Thomee","year":"2016","unstructured":"Thomee, B., Shamma, D. A., Friedland, G., Elizalde, B., Ni, K., Poland, D., Borth, D., & Li, L.-J. (2016). Yfcc100m: the new data in multimedia research. Communications of the ACM,59(2), 64\u201373.","journal-title":"Communications of the ACM"},{"key":"2667_CR184","doi-asserted-by":"publisher","first-page":"1325","DOI":"10.1007\/s11280-018-0548-3","volume":"22","author":"H Tian","year":"2019","unstructured":"Tian, H., Tao, Y., Pouyanfar, S., Chen, S.-C., & Shyu, M.-L. (2019). Multimodal deep representation learning for video classification. World Wide Web,22, 1325\u20131341.","journal-title":"World Wide Web"},{"key":"2667_CR185","doi-asserted-by":"crossref","unstructured":"Tong, S., Liu, Z., Zhai, Y., Ma, Y. Y., & LeCun, and Saining Xie. (2024). Eyes wide shut? exploring the visual shortcomings of multimodal llms. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), (pp. 9568\u20139578)","DOI":"10.1109\/CVPR52733.2024.00914"},{"key":"2667_CR186","doi-asserted-by":"crossref","unstructured":"Tong, T., Gray, K. R., Gao, Q., Chen, L., & Rueckert, D. (2015). Nonlinear graph fusion for multi-modal classification of alzheimer\u2019s disease.","DOI":"10.1007\/978-3-319-24888-2_10"},{"key":"2667_CR187","doi-asserted-by":"publisher","first-page":"171","DOI":"10.1016\/j.patcog.2016.10.009","volume":"63","author":"T Tong","year":"2017","unstructured":"Tong, T., Gray, K. R., Gao, Q., Chen, L., & Rueckert, D. (2017). Multi-modal classification of alzheimer\u2019s disease using nonlinear graph fusion. Pattern Recognit.,63, 171\u2013181.","journal-title":"Pattern Recognit."},{"key":"2667_CR188","unstructured":"Tsai, Y.-H. H., Liang, P. P., Zadeh, A., Morency, L.-P., & Salakhutdinov, R. (2019). Learning factorized multimodal representations."},{"key":"2667_CR189","doi-asserted-by":"crossref","unstructured":"Tu, J., Liu, X., Lin, R., Hong, Z., & Wang, M. (2022). Differentiable cross-modal hashing via multimodal transformers.","DOI":"10.1145\/3503161.3548187"},{"key":"2667_CR190","doi-asserted-by":"crossref","unstructured":"Uezato, T., Hong, D., Yokoya, N., & He, W. (2020). Guided deep decoder: Unsupervised image pair fusion. In ECCV.","DOI":"10.1007\/978-3-030-58539-6_6"},{"key":"2667_CR191","unstructured":"Unknown. (2007). Dynamic time warping. Information Retrieval for Music and Motion. Berlin, Heidelberg: Springer."},{"key":"2667_CR192","doi-asserted-by":"crossref","unstructured":"Varma, M., Delbrouck, J.-B., Hooper, S., Chaudhari, A., & Langlotz, C. (2023). Villa: Fine-grained vision-language representation learning from real-world data.","DOI":"10.1109\/ICCV51070.2023.02031"},{"key":"2667_CR193","unstructured":"Vasilakis, Y., Bittner, R., & Pauwels, J. (2024). I can listen but cannot read: An evaluation of two-tower multimodal systems for instrument recognition."},{"key":"2667_CR194","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., & Polosukhin, I. (2017). Attention is all you need."},{"key":"2667_CR195","doi-asserted-by":"crossref","unstructured":"Verma, Y., & Jawahar, C. V. (2014). Im2text and text2im: Associating images and texts for cross-modal retrieval. In: Proceedings of the British Machine Vision Conference (BMVC), (p. 2)","DOI":"10.5244\/C.28.97"},{"key":"2667_CR196","unstructured":"Vosylius, V., & Johns, E. (2023). In: Proceedings of The 7th Conference on Robot Learning, volume 229 of Proceedings of Machine Learning Research, (pp. 3194\u20133213)"},{"key":"2667_CR197","doi-asserted-by":"crossref","unstructured":"Vouitsis, N., Liu, Z., Gorti, S. K., Villecroze, V., Cresswell, J. C., Yu, G., Loaiza-Ganem, G., & Volkovs, M. (2024). Data-efficient multimodal fusion on a single gpu.","DOI":"10.1109\/CVPR52733.2024.02572"},{"key":"2667_CR198","unstructured":"Wan, Y., Wang, W., Zou, G., Zhang, B. (2024). Cross-modal feature alignment and fusion for composed image retrieval. In CVPRW, pp. 8384\u20138388."},{"key":"2667_CR199","doi-asserted-by":"crossref","unstructured":"Wang, A. J., Lin, K. Q., Zhang, D. J., Lei, S. W., & Shou, M. Z. (2023). Too large; data reduction for vision-language pre-training.","DOI":"10.1109\/ICCV51070.2023.00292"},{"key":"2667_CR200","doi-asserted-by":"publisher","first-page":"7466","DOI":"10.1109\/TCSVT.2023.3274545","volume":"33","author":"J Wang","year":"2023","unstructured":"Wang, J., & Tan, X. (2023). Mutually beneficial transformer for multimodal data fusion. IEEE Transactions on Circuits and Systems for Video Technology,33, 7466\u20137479.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"2667_CR201","unstructured":"Wang, P., Bai, S., Tan, S., Wang, S., Fan, Z., Bai, J., Chen, K., Liu, X., Wang, J., Ge, W., Fan, Y., Dang, K., Du, M., Ren, X., Men, R., Liu, D., Zhou, C., Zhou, J., & Lin, J. (2024). Qwen2-vl: Enhancing vision-language model\u2019s perception of the world at any resolution."},{"key":"2667_CR202","unstructured":"Wang, P., Wang, S., Lin, J., Bai, S., Zhou, X., Zhou, J., Wang, X., & Zhou, C. (2023). One-peace: Exploring one general representation model toward unlimited modalities."},{"key":"2667_CR203","unstructured":"Wang, W., Lv, Q., Yu, W., Hong, W., Qi, J., Wang, Y., Ji, J., Yang, Z., Zhao, L., Song, X., Xu, J., Xu, B., Li, J., Dong, Y., Ding, M., & Tang, J. (2024). CogVLM: Visual expert for pretrained language models."},{"key":"2667_CR204","unstructured":"Wang, W., Bao, H., Dong, L., Bjorck, J., Peng, Z., Liu, Q., Aggarwal, K., Mohammed, O. K., Singhal, S., Som, S., & Wei, F. Image as a foreign language: BEiT pretraining for all vision and vision-language tasks."},{"key":"2667_CR205","doi-asserted-by":"crossref","unstructured":"Wang, X., Liang, J., Wang, C.-K., Deng, K., Lou, Y., Lin, M., & Yang, S. (2024). Vila: Efficient video-language alignment for video question answering.","DOI":"10.1007\/978-3-031-73033-7_11"},{"key":"2667_CR206","volume-title":"The role of spatial alignment in multimodal medical image fusion using deep learning for diagnostic problems","author":"X Wang","year":"2022","unstructured":"Wang, X., Shu, K., Kuang, H., Luo, S., Jin, R., & Liu, J. (2022). The role of spatial alignment in multimodal medical image fusion using deep learning for diagnostic problems. New York, NY, USA: Association for Computing Machinery."},{"key":"2667_CR207","doi-asserted-by":"crossref","unstructured":"Wang, Y.-C., Zhang, C., Deng, N., & Wang, Y. (2011). Kernel-based data fusion improves the drug-protein interaction prediction. Computational biology and, chemistry, 35(6), 353\u201362.","DOI":"10.1016\/j.compbiolchem.2011.10.003"},{"key":"2667_CR208","doi-asserted-by":"publisher","first-page":"597","DOI":"10.1109\/TMM.2012.2189550","volume":"14","author":"Y Wang","year":"2012","unstructured":"Wang, Y., Guan, L., & Venetsanopoulos, A. N. (2012). Kernel cross-modal factor analysis for information fusion with application to bimodal emotion recognition. IEEE Transactions on Multimedia,14, 597\u2013607.","journal-title":"IEEE Transactions on Multimedia"},{"key":"2667_CR209","unstructured":"Wang, Z., Jiahui, Y., Yu, A. W., Dai, Z., Tsvetkov, Y., & Cao, Y. (2022). Simvlm: Simple visual language model pretraining with weak supervision."},{"key":"2667_CR210","doi-asserted-by":"publisher","first-page":"184","DOI":"10.1109\/TASE.2020.2964998","volume":"18","author":"Y Wei","year":"2021","unstructured":"Wei, Y., Dazhong, W., & Terpenny, J. P. (2021). Decision-level data fusion in quality control and predictive maintenance. IEEE Transactions on Automation Science and Engineering,18, 184\u2013194.","journal-title":"IEEE Transactions on Automation Science and Engineering"},{"key":"2667_CR211","unstructured":"Wen, H., Zhuang, H., Zamani, H., Hauptmann, A., & Bendersky, M. (2024). Multimodal reranking for knowledge-intensive visual question answering"},{"key":"2667_CR212","doi-asserted-by":"publisher","first-page":"116","DOI":"10.1109\/LSP.2023.3342649","volume":"31","author":"W Fei","year":"2024","unstructured":"Fei, W., Ma, Y., Jin, H., Jing, X.-Y., & Jiang, G.-P. (2024). Mfeclip: Clip with mapping-fusion embedding for text-guided image editing. IEEE Signal Processing Letters,31, 116\u2013120.","journal-title":"IEEE Signal Processing Letters"},{"key":"2667_CR213","unstructured":"Wu, M., & Goodman, N. (2019). Multimodal generative models for compositional representation learning."},{"key":"2667_CR214","doi-asserted-by":"crossref","unstructured":"Xie, Q., & Tang, H. (2025). TTTFusion: A Test-Time Training-Based Strategy for Multimodal Medical Image Fusion in Surgical Robots.","DOI":"10.1109\/IROS60139.2025.11246624"},{"issue":"5","key":"2667_CR215","first-page":"1231","volume":"22","author":"W Xiong","year":"2020","unstructured":"Xiong, W., Zhang, Y., & Li, W. (2020). Scene graph-based semantic alignment for multimodal tasks. IEEE Transactions on Multimedia,22(5), 1231\u20131243.","journal-title":"IEEE Transactions on Multimedia"},{"key":"2667_CR216","doi-asserted-by":"crossref","unstructured":"Xiong, Y., Wang, D., Zhang, Y., Feng, S., & Wang, G. (2014). Multimodal data fusion in text-image heterogeneous graph for social media recommendation. In: International Conference on Neural Information Processing, (pp. 96\u201399)","DOI":"10.1007\/978-3-319-08010-9_12"},{"key":"2667_CR217","unstructured":"Xu, J., Guo, Z., He, J., Hu, H., He, T., Bai, S., Chen, K., Wang, J., Fan, Y., Dang, K., Zhang, B., Wang, X., Chu, Y., & Lin, J. (2025). Qwen2.5-omni technical report."},{"key":"2667_CR218","doi-asserted-by":"crossref","unstructured":"Xu, X., Wu, C., Rosenman, S., Lal, V., & Che, W. (2023). Bridgetower: Building bridges between encoders in vision-language representation learning. In: Proceedings of the AAAI Conference on Artificial Intelligence","DOI":"10.1609\/aaai.v37i9.26263"},{"key":"2667_CR219","doi-asserted-by":"crossref","unstructured":"Xue, Z., Marculescu, R., & (2023). Dynamic multimodal fusion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, (pp. 2575\u20132584)","DOI":"10.1109\/CVPRW59228.2023.00256"},{"key":"2667_CR220","doi-asserted-by":"crossref","unstructured":"Yan, S., Huang, D., & Soleymani, M. (2020). Mitigating biases in multimodal personality assessment, In: Proceedings of the 2020 International Conference on Multimodal Interaction. (pp. 361\u2013369)","DOI":"10.1145\/3382507.3418889"},{"key":"2667_CR221","doi-asserted-by":"publisher","first-page":"6956","DOI":"10.1109\/TMM.2024.3358086","volume":"26","author":"B Yang","year":"2024","unstructured":"Yang, B., Xiang, X., Kong, W., Zhang, J., & Peng, Y. (2024). Dmf-gan: Deep multimodal fusion generative adversarial networks for text-to-image synthesis. IEEE Transactions on Multimedia,26, 6956\u20136967.","journal-title":"IEEE Transactions on Multimedia"},{"key":"2667_CR222","first-page":"3841","volume":"25","author":"G Yang","year":"2022","unstructured":"Yang, G., Fini, E., Dan, X., Rota, P., Ding, M., Tang, H., Alameda-Pineda, X., & Ricci, E. (2022). Continual attentive fusion for incremental learning in semantic segmentation. IEEE TMM,25, 3841\u20133854.","journal-title":"IEEE TMM"},{"key":"2667_CR223","unstructured":"Yang, H., & Li, S. (2023). Videochat: Conversational agents in video understanding. IEEE Transactions on Neural Networks and Learning Systems."},{"key":"2667_CR224","first-page":"3839","volume":"82","author":"H Yang","year":"2023","unstructured":"Yang, H., Yifan, W., Si, Z., Zhao, Y., Liu, J., & Qin, B. (2023). Macsa: A multimodal aspect-category sentiment analysis dataset with multimodal fine-grained aligned annotations. Multimedia Tools and Applications,82, 3839\u20133858.","journal-title":"Multimedia Tools and Applications"},{"key":"2667_CR225","unstructured":"Yin, S., Fu, C., Zhao, S., Li, K., Sun, X., Xu, T., & Chen, E. A survey on multimodal large language models."},{"key":"2667_CR226","unstructured":"Young, A., Chen, B., Li, C., Huang, C., Zhang, G., Zhang, G., Li, H., Zhu, J., Chen, J., Chang, J., Yu, K., Liu, P., Liu, Q., Yue, S., Yang, S., Yang, S., Yu, T., Xie, W., Huang, W., Hu, X., Ren, X., Niu, X., Nie, P., Xu, Y., Liu, Y., Wang, Y., Cai, Y., Gu, Z., Liu, Z., & Dai, Z. (2024). Yi: Open foundation models by 01.ai."},{"key":"2667_CR227","unstructured":"Yu, J., Wang, Z., Vasudevan, V., Yeung, L., Seyedhosseini, M., & Wu, Y. CoCa: Contrastive captioners are image-text foundation models."},{"key":"2667_CR228","doi-asserted-by":"crossref","unstructured":"Yu, P., Kong, Z., Zhao, P., Dong, P., Tang, H., Sun, F., Xue, L., & Wang, Y. (2025). Q-tempfusion: Quantization-aware temporal multi-sensor fusion on bird\u2019s-eye view representation. (pp. 5489\u20135499)","DOI":"10.1109\/WACV61041.2025.00536"},{"key":"2667_CR229","unstructured":"Qiying, Yu., Sun, Q., Zhang, X., Cui, Y., Zhang, F., Cao, Y., Wang, X., & Liu, J. (2024). Capsfusion: Rethinking image-text data at scale."},{"key":"2667_CR230","doi-asserted-by":"crossref","unstructured":"Yu, W., Xu, H., Meng, F., Zhu, Y., Ma, Y., Wu, J., Zou, J., & Yang, K. (2020). CH-SIMS: A Chinese multimodal sentiment analysis dataset with fine-grained annotation of modality. In: Jurafsky, D., Chai, J., Schluter, N., & Tetreault, J., (Ed.), Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 3718\u20133727, Online, July 2020. Association for Computational Linguistics.","DOI":"10.18653\/v1\/2020.acl-main.343"},{"key":"2667_CR231","doi-asserted-by":"crossref","unstructured":"Yuan, S., Bhatia, P., Celikkaya, B., Liu, H., & Choi, K. (2021). Towards user friendly medication mapping using entity-boosted two-tower neural network.","DOI":"10.1007\/978-981-16-0575-8_10"},{"key":"2667_CR232","unstructured":"Yuksekgonul, M., Bianchi, F., Kalluri, P., Jurafsky, D., & Zou, J. (2023). When and why vision-language models behave like bags-of-words, and what to do about it?."},{"key":"2667_CR233","doi-asserted-by":"crossref","unstructured":"Zadeh, A., Chen, M., Poria, S., Cambria, E., & Morency, L.-P. (2017). Tensor fusion network for multimodal sentiment analysis. In: Palmer, M., Hwa, R., & Riedel, S., (Ed.) Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pp. 1103\u20131114, Copenhagen, Denmark, September 2017. Association for Computational Linguistics.","DOI":"10.18653\/v1\/D17-1115"},{"key":"2667_CR234","doi-asserted-by":"crossref","unstructured":"Zang, Y., Li, W., Han, J., Zhou, K., & Loy, C. C. (2024). Contextual object detection with multimodal large language models.","DOI":"10.1007\/s11263-024-02214-4"},{"key":"2667_CR235","doi-asserted-by":"crossref","unstructured":"Zhai, X., Mustafa, B., Kolesnikov, A., & Beyer, L. (2023). Sigmoid loss for language image pre-training.","DOI":"10.1109\/ICCV51070.2023.01100"},{"key":"2667_CR236","doi-asserted-by":"publisher","first-page":"478","DOI":"10.1109\/JSTSP.2020.2987728","volume":"14","author":"C Zhang","year":"2020","unstructured":"Zhang, C., Yang, Z., He, X., & Deng, L. (2020). Multimodal intelligence: Representation learning, information fusion, and applications. IEEE Journal of Selected Topics in Signal Processing,14, 478\u2013493.","journal-title":"IEEE Journal of Selected Topics in Signal Processing"},{"key":"2667_CR237","doi-asserted-by":"crossref","unstructured":"Zhang, D., Yu, Y., Dong, J., Li, C., Su, D., Chu, C., & Yu, D. (2024). MM-LLMs: Recent advances in MultiModal large language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar, editors, Findings of the Association for Computational Linguistics: ACL 2024, pp. 12401\u201312430, Bangkok, Thailand, August 2024. Association for Computational Linguistics.","DOI":"10.18653\/v1\/2024.findings-acl.738"},{"key":"2667_CR238","unstructured":"Zhang, H., Li, F., Liu, S., Zhang, L., Hang, S., Zhu, J., Ni, L. M., & Shum, H.-Y. (2022). Dino: Detr with improved denoising anchor boxes for end-to-end object detection."},{"key":"2667_CR239","first-page":"1","volume":"71","author":"J Zhang","year":"2022","unstructured":"Zhang, J., Liu, A., Wang, D., Liu, Y., Wang, Z. J., & Chen, X. (2022). Transformer-based end-to-end anatomical and functional image fusion. IEEE Transactions on Instrumentation and Measurement,71, 1\u201311.","journal-title":"IEEE Transactions on Instrumentation and Measurement"},{"issue":"8","key":"2667_CR240","doi-asserted-by":"publisher","first-page":"5625","DOI":"10.1109\/TPAMI.2024.3369699","volume":"46","author":"J Zhang","year":"2024","unstructured":"Zhang, J., Huang, J., Jin, S., & Shijian, L. (2024). Vision-language models for vision tasks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence,46(8), 5625\u20135644.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2667_CR241","unstructured":"Zhang, M. (2023). Neural attention: Enhancing qkv calculation in self-attention mechanism with neural networks."},{"key":"2667_CR242","unstructured":"Zhang, P., Dong, X., Wang, B., Cao, Y., Chao, X., Ouyang, L., Zhao, Z., Duan, H., Zhang, S., Ding, S., Zhang, W., Yan, H., Zhang, X., Li, W., Li, J., Chen, K., He, C., Xingcheng Zhang, Yu., Qiao, D. L., & Wang, J. (2023). Internlm-xcomposer: A vision-language large model for advanced text-image comprehension and composition."},{"key":"2667_CR243","unstructured":"Zhang, P., Dong, X., Zang, Y., Cao, Y., Qian, R., Chen, L., Guo, Q., Duan, H., Wang, B., Ouyang, L., Zhang, S., Zhang, W., Li, Y., Gao, Y., Sun, P., Zhang, X., Li, W., Li, J., Wang, W., Yan, H., He, C., Zhang, X., Chen, K., Dai, J., Qiao, Y., Lin, D., & Wang, J. (2024). Internlm-xcomposer-2.5: A versatile large vision language model supporting long-contextual input and output."},{"key":"2667_CR244","doi-asserted-by":"crossref","unstructured":"Zhang, P., Li, X., Xiaowei, H., Yang, J., Zhang, L., Wang, L., Choi, Y., & Gao, J. (2021). Vinvl: Revisiting visual representations in vision-language models.","DOI":"10.1109\/CVPR46437.2021.00553"},{"issue":"3","key":"2667_CR245","doi-asserted-by":"publisher","first-page":"1915","DOI":"10.3390\/app13031915","volume":"13","author":"R Zhang","year":"2023","unstructured":"Zhang, R., Xue, C., Qi, Q., Lin, L., Zhang, J., & Zhang, L. (2023). Bimodal fusion network with multi-head attention for multimodal sentiment analysis. Applied Sciences,13(3), 1915.","journal-title":"Applied Sciences"},{"key":"2667_CR246","doi-asserted-by":"crossref","unstructured":"Zhang, T., Zeng, Z., Xiao, Y., Zhuang, H., Chen, C., Foulds, J., & Pan, S. (2024). Genderalign: An alignment dataset for mitigating gender bias in large language models.","DOI":"10.18653\/v1\/2025.acl-long.553"},{"key":"2667_CR247","doi-asserted-by":"publisher","first-page":"Article 106639","DOI":"10.1016\/j.knosys.2020.106639","volume":"212","author":"W Zhang","year":"2021","unstructured":"Zhang, W., Jing, Yu., Wang, Y., & Wang, W. (2021). Multimodal deep fusion for image question answering. Knowledge-Based Systems,212, Article 106639.","journal-title":"Knowledge-Based Systems"},{"key":"2667_CR248","unstructured":"Zhang, X., Shen, C., Yuan, X., Yan, S., Xie, L., Wang, W., Gu, C., Tang, H., & Ye, J. (2024). From redundancy to relevance: Enhancing explainability in multimodal large language models. arXiv preprint arXiv:2406.06579."},{"key":"2667_CR249","unstructured":"Zhang, X., Xu, Z., Tang, H., Gu, C., Chen, W., Zhu, S., & Guan, X. (2023). Enlighten-your-voice: When multimodal meets zero-shot low-light image enhancement. arXiv:2312.10109."},{"key":"2667_CR250","unstructured":"Zhang, Y., Latham, P.E., Saxe, A. (2024). Understanding unimodal bias in multimodal deep linear networks."},{"key":"2667_CR251","unstructured":"Zhang, Y., Jinming, W., Li, W., Li, B., Ma, Z., Liu, Z., & Li, C. (2025). Llava-video: Video instruction tuning with synthetic data."},{"issue":"10","key":"2667_CR252","first-page":"3121","volume":"33","author":"Z Zhang","year":"2021","unstructured":"Zhang, Z., Mai, W., Xiong, H., & Cheng, W. (2021). A token-wise graph-based framework for multimodal named entity recognition. IEEE Transactions on Knowledge and Data Engineering,33(10), 3121\u20133134.","journal-title":"IEEE Transactions on Knowledge and Data Engineering"},{"key":"2667_CR253","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Yang, L., & Xiang, Z. (2024). Risurconv: Rotation invariant surface attention-augmented convolutions for 3d point cloud classification and segmentation.","DOI":"10.1007\/978-3-031-73390-1_6"},{"key":"2667_CR254","first-page":"1","volume":"62","author":"Z Zhang","year":"2024","unstructured":"Zhang, Z., Zhao, T., Guo, Y., & Yin, J. (2024). Rs5m and georsclip: A large-scale vision- language dataset and a large vision-language model for remote sensing. IEEE Transactions on Geoscience and Remote Sensing,62, 1\u201323.","journal-title":"IEEE Transactions on Geoscience and Remote Sensing"},{"issue":"9","key":"2667_CR255","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3649447","volume":"56","author":"F Zhao","year":"2024","unstructured":"Zhao, F., Zhang, C., & Geng, B. (2024). Deep multimodal data fusion. ACM Computing Surveys,56(9), 1\u201336.","journal-title":"ACM Computing Surveys"},{"key":"2667_CR256","doi-asserted-by":"crossref","first-page":"1172","DOI":"10.1109\/TNNLS.2022.3182882","volume":"35","author":"L Zhao","year":"2024","unstructured":"Zhao, L., & Wang, H. (2024). Deep multimodal learning with vision, audio, and text: Challenges and innovations. IEEE Transactions on Neural Networks and Learning Systems,35, 1172\u20131184.","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"2667_CR257","doi-asserted-by":"crossref","unstructured":"Zhou, T., Cao, J., Zhu, X., Liu, B., & Li, S. (2021). Visual-textual sentiment analysis enhanced by hierarchical cross-modality interaction. IEEE Systems Journal,15, 4303\u20134314.","DOI":"10.1109\/JSYST.2020.3026879"},{"key":"2667_CR258","doi-asserted-by":"crossref","unstructured":"Zhou, Y., Shimada, N., & (June 2023). Vision + language applications: A survey. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, (pp. 826\u2013842)","DOI":"10.1109\/CVPRW59228.2023.00090"},{"key":"2667_CR259","unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., & Elhoseiny, M. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023."},{"key":"2667_CR260","doi-asserted-by":"crossref","unstructured":"Zuo, R., Li, G., Choi, B., Bhowmick, S., yin Mah, D. N., & Wong., G. L. (2023). Svp-t: A shape-level variable-position transformer for multivariate time series classification. In Proceedings of the AAAI Conference on Artificial Intelligence, volume\u00a037, pp. 11497\u201311505.","DOI":"10.1609\/aaai.v37i9.26359"},{"key":"2667_CR261","doi-asserted-by":"crossref","unstructured":"\u00c7etin, M., Chen, L., Fisher III, J. W., Ihler, A. T., Moses, R. L., Wainwright, M. J., & Willsky, A.S. (2006). Distributed fusion in sensor networks: a graphical models perspective.","DOI":"10.1109\/MSP.2006.1657816"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02667-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-025-02667-1","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02667-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T10:04:33Z","timestamp":1774605873000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-025-02667-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2,6]]},"references-count":261,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2026,3]]}},"alternative-id":["2667"],"URL":"https:\/\/doi.org\/10.1007\/s11263-025-02667-1","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,2,6]]},"assertion":[{"value":"24 February 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"30 September 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 February 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}],"article-number":"103"}}