{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,15]],"date-time":"2026-01-15T02:45:01Z","timestamp":1768445101725,"version":"3.49.0"},"reference-count":69,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T00:00:00Z","timestamp":1761955200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100013314","name":"Higher Education Discipline Innovation Project","doi-asserted-by":"publisher","award":["BP0719010"],"award-info":[{"award-number":["BP0719010"]}],"id":[{"id":"10.13039\/501100013314","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62306178"],"award-info":[{"award-number":["62306178"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003399","name":"Science and Technology Commission of Shanghai Municipality","doi-asserted-by":"publisher","award":["21DZ1100100"],"award-info":[{"award-number":["21DZ1100100"]}],"id":[{"id":"10.13039\/501100003399","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003399","name":"Science and Technology Commission of Shanghai Municipality","doi-asserted-by":"publisher","award":["22511105700"],"award-info":[{"award-number":["22511105700"]}],"id":[{"id":"10.13039\/501100003399","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003399","name":"Science and Technology Commission of Shanghai Municipality","doi-asserted-by":"publisher","award":["22511106101"],"award-info":[{"award-number":["22511106101"]}],"id":[{"id":"10.13039\/501100003399","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2022 ZD0160702"],"award-info":[{"award-number":["2022 ZD0160702"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Neural Networks"],"published-print":{"date-parts":[[2025,11]]},"DOI":"10.1016\/j.neunet.2025.107821","type":"journal-article","created":{"date-parts":[[2025,7,15]],"date-time":"2025-07-15T15:32:07Z","timestamp":1752593527000},"page":"107821","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":2,"special_numbering":"C","title":["Redundancy-Adaptive Multimodal Learning for imperfect data"],"prefix":"10.1016","volume":"191","author":[{"ORCID":"https:\/\/orcid.org\/0009-0006-6790-6490","authenticated-orcid":false,"given":"Mengxi","family":"Chen","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6115-5194","authenticated-orcid":false,"given":"Jiangchao","family":"Yao","sequence":"additional","affiliation":[]},{"given":"Linyu","family":"Xing","sequence":"additional","affiliation":[]},{"given":"Yu","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5390-9053","authenticated-orcid":false,"given":"Ya","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3196-2347","authenticated-orcid":false,"given":"Yanfeng","family":"Wang","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.neunet.2025.107821_b1","doi-asserted-by":"crossref","first-page":"204","DOI":"10.1016\/j.inffus.2021.06.003","article-title":"Multimodal video sentiment analysis using deep learning approaches, a survey","volume":"76","author":"Abdu","year":"2021","journal-title":"Information Fusion"},{"key":"10.1016\/j.neunet.2025.107821_b2","series-title":"Proceedings of the IEEE\/CVF winter conference on applications of computer vision","first-page":"1","article-title":"Openface: An open source facial behavior analysis toolkit","author":"Baltru\u0161aitis","year":"2016"},{"issue":"7","key":"10.1016\/j.neunet.2025.107821_b3","doi-asserted-by":"crossref","first-page":"1152","DOI":"10.3390\/electronics9071152","article-title":"On robustness of multi-modal fusion\u2014robotics perspective","volume":"9","author":"Bednarek","year":"2020","journal-title":"Electronics"},{"key":"10.1016\/j.neunet.2025.107821_b4","series-title":"Proceedings of the European conference on computer vision","first-page":"446","article-title":"Food-101\u2013mining discriminative components with random forests","author":"Bossard","year":"2014"},{"issue":"4","key":"10.1016\/j.neunet.2025.107821_b5","doi-asserted-by":"crossref","first-page":"335","DOI":"10.1007\/s10579-008-9076-6","article-title":"IEMOCAP: Interactive emotional dyadic motion capture database","volume":"42","author":"Busso","year":"2008","journal-title":"Language Resources and Evaluation"},{"key":"10.1016\/j.neunet.2025.107821_b6","doi-asserted-by":"crossref","unstructured":"Chen, C., Dou, Q., Jin, Y., Chen, H., Qin, J., & Heng, P.-A. (2019). Robust Multimodal Brain Tumor Segmentation via Feature Disentanglement and Gated Fusion. In International conference on medical image computing and computer assisted intervention (pp. 447\u2013456).","DOI":"10.1007\/978-3-030-32248-9_50"},{"key":"10.1016\/j.neunet.2025.107821_b7","doi-asserted-by":"crossref","unstructured":"Chen, M., Xing, L., Wang, Y., & Zhang, Y. (2023). Enhanced Multimodal Representation Learning with Cross-modal KD. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 11766\u201311775).","DOI":"10.1109\/CVPR52729.2023.01132"},{"key":"10.1016\/j.neunet.2025.107821_b8","unstructured":"Chen, M., Zhang, F., Zhao, Z., Yao, J., Zhang, Y., & Wang, Y. (2024). Probabilistic Conformal Distillation for Enhancing Missing Modality Robustness. 37, In Advances in Neural Information Processing Systems (pp. 36218\u201336242)."},{"key":"10.1016\/j.neunet.2025.107821_b9","series-title":"IEEE international conference on acoustics, speech and signal processing","first-page":"960","article-title":"COVAREP\u2014A collaborative voice analysis repository for speech technologies","author":"Degottex","year":"2014"},{"key":"10.1016\/j.neunet.2025.107821_b10","unstructured":"Devlin, J., Chang, M., Lee, K., & Toutanova, K. (2019). BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the North American chapter of the association for computational linguistics: human language technologies (pp. 4171\u20134186)."},{"key":"10.1016\/j.neunet.2025.107821_b11","first-page":"2420","article-title":"Consistent feature selection for analytic deep neural networks","volume":"33","author":"Dinh","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"issue":"7","key":"10.1016\/j.neunet.2025.107821_b12","doi-asserted-by":"crossref","first-page":"1553","DOI":"10.1109\/TMM.2013.2267205","article-title":"Multimodal saliency and fusion for movie summarization based on aural, visual, and textual attention","volume":"15","author":"Evangelopoulos","year":"2013","journal-title":"IEEE Transactions on Multimedia"},{"key":"10.1016\/j.neunet.2025.107821_b13","doi-asserted-by":"crossref","unstructured":"Eyben, F., W\u00f6llmer, M., & Schuller, B. (2010). OpenSMILE: The Munich Versatile and Fast Open-source Audio Feature Extractor. In Proceedings of ACM international conference on multimedia (pp. 1459\u20131462).","DOI":"10.1145\/1873951.1874246"},{"issue":"177","key":"10.1016\/j.neunet.2025.107821_b14","first-page":"1","article-title":"All models are wrong, but many are useful: Learning a variable\u2019s importance by studying an entire class of prediction models simultaneously","volume":"20","author":"Fisher","year":"2019","journal-title":"Journal of Machine Learning Research"},{"key":"10.1016\/j.neunet.2025.107821_b15","doi-asserted-by":"crossref","unstructured":"Han, Z., Yang, F., Huang, J., Zhang, C., & Yao, J. (2022). Multimodal Dynamics: Dynamical Fusion for Trustworthy Multimodal Classification. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 20707\u201320717).","DOI":"10.1109\/CVPR52688.2022.02005"},{"issue":"2-3","key":"10.1016\/j.neunet.2025.107821_b16","doi-asserted-by":"crossref","first-page":"74","DOI":"10.1561\/3300000043","article-title":"Trustworthy machine learning: from data to models","volume":"7","author":"Han","year":"2025","journal-title":"Foundations and Trends\u00ae in Privacy and Security"},{"key":"10.1016\/j.neunet.2025.107821_b17","unstructured":"Han, Z., Zhang, C., Fu, H., & Zhou, J. T. (2021). Trusted Multi-view Classification. In International conference on learning representations (pp. 1\u201311)."},{"issue":"2","key":"10.1016\/j.neunet.2025.107821_b18","doi-asserted-by":"crossref","first-page":"2551","DOI":"10.1109\/TPAMI.2022.3171983","article-title":"Trusted multi-view classification with dynamic evidential fusion","volume":"45","author":"Han","year":"2022","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.neunet.2025.107821_b19","doi-asserted-by":"crossref","unstructured":"Hao, X., Zhu, Y., Appalaraju, S., Zhang, A., Zhang, W., Li, B., et al. (2023). MixGen: A New Multi-modal Data Augmentation. In Proceedings of the IEEE\/CVF winter conference on applications of computer vision (pp. 379\u2013389).","DOI":"10.1109\/WACVW58289.2023.00042"},{"key":"10.1016\/j.neunet.2025.107821_b20","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep Residual Learning for Image Recognition. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 770\u2013778).","DOI":"10.1109\/CVPR.2016.90"},{"key":"10.1016\/j.neunet.2025.107821_b21","doi-asserted-by":"crossref","unstructured":"Hoffman, J., Gupta, S., & Darrell, T. (2016). Learning with Side Information through Modality Hallucination. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 826\u2013834).","DOI":"10.1109\/CVPR.2016.96"},{"issue":"5","key":"10.1016\/j.neunet.2025.107821_b22","doi-asserted-by":"crossref","first-page":"4340","DOI":"10.1109\/TGRS.2020.3016820","article-title":"More diverse means better: Multimodal deep learning meets remote-sensing imagery classification","volume":"59","author":"Hong","year":"2021","journal-title":"IEEE Transactions on Geoscience and Remote Sensing"},{"key":"10.1016\/j.neunet.2025.107821_b23","doi-asserted-by":"crossref","DOI":"10.1109\/JSAC.2025.3559140","article-title":"Varfvv: view-adaptive real-time interactive free-view video streaming with edge computing","author":"Hu","year":"2025","journal-title":"IEEE Journal on Selected Areas in Communications"},{"key":"10.1016\/j.neunet.2025.107821_b24","doi-asserted-by":"crossref","unstructured":"Hu, M., Maillard, M., Zhang, Y., Ciceri, T., La Barbera, G., Bloch, I., et al. (2020). Knowledge Distillation from Multi-modal to Mono-modal Segmentation Networks. In International conference on medical image computing and computer assisted intervention (pp. 772\u2013781).","DOI":"10.1007\/978-3-030-59710-8_75"},{"key":"10.1016\/j.neunet.2025.107821_b25","first-page":"10944","article-title":"What makes multi-modal learning better than single (provably)","volume":"34","author":"Huang","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"10.1016\/j.neunet.2025.107821_b26","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van Der Maaten, L., & Weinberger, K. Q. (2017). Densely Connected Convolutional Networks. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 4700\u20134708).","DOI":"10.1109\/CVPR.2017.243"},{"issue":"3","key":"10.1016\/j.neunet.2025.107821_b27","doi-asserted-by":"crossref","first-page":"248","DOI":"10.1109\/TAI.2021.3060350","article-title":"SleepPrintNet: A multivariate multimodal neural network based on physiological time-series for automatic sleep staging","volume":"1","author":"Jia","year":"2020","journal-title":"IEEE Transactions on Artificial Intelligence"},{"key":"10.1016\/j.neunet.2025.107821_b28","doi-asserted-by":"crossref","unstructured":"Kim, Y. (2014). Convolutional Neural Networks for Sentence Classification. In Proceedings of conference on empirical methods in natural language processing (pp. 1746\u20131751).","DOI":"10.3115\/v1\/D14-1181"},{"key":"10.1016\/j.neunet.2025.107821_b29","doi-asserted-by":"crossref","unstructured":"Kim, J., Koh, J., Kim, Y., Choi, J., Hwang, Y., & Choi, J. W. (2019). Robust Deep Multi-modal Learning Based on Gated Information Fusion Network. In Proceedings of the Asian conference on computer vision (pp. 90\u2013106).","DOI":"10.1007\/978-3-030-20870-7_6"},{"key":"10.1016\/j.neunet.2025.107821_b30","unstructured":"Kingma, D. P., & Ba, J. (2015). Adam: A Method for Stochastic Optimization. In International conference on learning representations (pp. 1\u201315)."},{"key":"10.1016\/j.neunet.2025.107821_b31","unstructured":"Kingma, D. P., & Welling, M. (2014). Auto-Encoding Variational Bayes. In International conference on learning representations (pp. 1\u201314)."},{"key":"10.1016\/j.neunet.2025.107821_b32","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2024.123655","article-title":"MIT-FRNet: Modality-invariant temporal representation learning-based feature reconstruction network for missing modalities","volume":"249","author":"Li","year":"2024","journal-title":"Expert Systems with Applications"},{"key":"10.1016\/j.neunet.2025.107821_b33","series-title":"Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition","first-page":"6631","article-title":"Decoupled multimodal distilling for emotion recognition","author":"Li","year":"2023"},{"key":"10.1016\/j.neunet.2025.107821_b34","doi-asserted-by":"crossref","unstructured":"Liang, J., Li, R., & Jin, Q. (2020). Semi-supervised Multi-modal Emotion Recognition with Cross-modal Distribution Matching. In Proceedings of ACM international conference on multimedia (pp. 2852\u20132861).","DOI":"10.1145\/3394171.3413579"},{"key":"10.1016\/j.neunet.2025.107821_b35","doi-asserted-by":"crossref","unstructured":"Ma, M., Ren, J., Zhao, L., Testuggine, D., & Peng, X. (2022). Are Multimodal Transformers Robust to Missing Modality?. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 18156\u201318165).","DOI":"10.1109\/CVPR52688.2022.01764"},{"key":"10.1016\/j.neunet.2025.107821_b36","series-title":"Benchmarking robustness in object detection: Autonomous driving when winter is coming","author":"Michaelis","year":"2019"},{"key":"10.1016\/j.neunet.2025.107821_b37","series-title":"Interspeech","first-page":"2866","article-title":"Speaker attentive speech emotion recognition","author":"Moine","year":"2021"},{"key":"10.1016\/j.neunet.2025.107821_b38","series-title":"Interspeech","first-page":"3400","article-title":"Emotion recognition from speech using wav2vec 2.0 embeddings","author":"Pepino","year":"2021"},{"key":"10.1016\/j.neunet.2025.107821_b39","unstructured":"Poklukar, P., Vasco, M., Yin, H., Melo, F. S., Paiva, A., & Kragic, D. (2022). Geometric Multimodal Contrastive Representation Learning. In Proceedings of international conference on machine learning (pp. 17782\u201317800)."},{"key":"10.1016\/j.neunet.2025.107821_b40","doi-asserted-by":"crossref","first-page":"443","DOI":"10.1016\/j.neunet.2023.03.003","article-title":"COM: Contrastive masked-attention model for incomplete multimodal learning","volume":"162","author":"Qian","year":"2023","journal-title":"Neural Networks"},{"issue":"6","key":"10.1016\/j.neunet.2025.107821_b41","doi-asserted-by":"crossref","first-page":"608","DOI":"10.1109\/TAI.2021.3104791","article-title":"CapsCovNet: A modified capsule network to diagnose COVID-19 from multimodal medical imaging","volume":"2","author":"Saif","year":"2021","journal-title":"IEEE Transactions on Artificial Intelligence"},{"key":"10.1016\/j.neunet.2025.107821_b42","series-title":"Long short-term memory based recurrent neural network architectures for large vocabulary speech recognition","author":"Sak","year":"2014"},{"key":"10.1016\/j.neunet.2025.107821_b43","doi-asserted-by":"crossref","unstructured":"Sanchez, E., Tellamekala, M. K., Valstar, M., & Tzimiropoulos, G. (2021). Affective Processes: Stochastic Modelling of Temporal Context for Emotion and Facial Expression Recognition. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 9074\u20139084).","DOI":"10.1109\/CVPR46437.2021.00896"},{"key":"10.1016\/j.neunet.2025.107821_b44","first-page":"15692","article-title":"Variational mixture-of-experts autoencoders for multi-modal deep generative models","volume":"32","author":"Shi","year":"2019","journal-title":"Advances in Neural Information Processing Systems"},{"key":"10.1016\/j.neunet.2025.107821_b45","doi-asserted-by":"crossref","unstructured":"Subedar, M., Krishnan, R., Meyer, P. L., Tickoo, O., & Huang, J. (2019). Uncertainty-aware Audiovisual Activity Recognition Using Deep Bayesian Variational Inference. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 6301\u20136310).","DOI":"10.1109\/ICCV.2019.00640"},{"issue":"2","key":"10.1016\/j.neunet.2025.107821_b46","doi-asserted-by":"crossref","first-page":"805","DOI":"10.1109\/TPAMI.2023.3325770","article-title":"COLD fusion: Calibrated and ordinal latent distribution fusion for uncertainty-aware multimodal emotion recognition","volume":"46","author":"Tellamekala","year":"2023","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.neunet.2025.107821_b47","doi-asserted-by":"crossref","unstructured":"Tran, L., Liu, X., Zhou, J., & Jin, R. (2017). Missing Modalities Imputation via Cascaded Residual Autoencoder. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 1405\u20131414).","DOI":"10.1109\/CVPR.2017.528"},{"issue":"5","key":"10.1016\/j.neunet.2025.107821_b48","doi-asserted-by":"crossref","first-page":"1305","DOI":"10.1109\/TAI.2022.3201809","article-title":"M2R2: Missing-modality robust emotion recognition framework with iterative data augmentation","volume":"4","author":"Wang","year":"2023","journal-title":"IEEE Transactions on Artificial Intelligence"},{"key":"10.1016\/j.neunet.2025.107821_b49","doi-asserted-by":"crossref","unstructured":"Wang, H., Chen, Y., Ma, C., Avery, J., Hull, L., & Carneiro, G. (2023). Multi-modal Learning with Missing Modality via Shared-Specific Feature Modelling. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 15878\u201315887).","DOI":"10.1109\/CVPR52729.2023.01524"},{"key":"10.1016\/j.neunet.2025.107821_b50","series-title":"N15News: A new dataset for multimodal news classification","author":"Wang","year":"2021"},{"key":"10.1016\/j.neunet.2025.107821_b51","doi-asserted-by":"crossref","unstructured":"Wang, W., Tran, D., & Feiszli, M. (2020). What Makes Training Multi-modal Classification Networks Hard?. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 12695\u201312705).","DOI":"10.1109\/CVPR42600.2020.01271"},{"key":"10.1016\/j.neunet.2025.107821_b52","doi-asserted-by":"crossref","unstructured":"Wei, S., Luo, C., & Luo, Y. (2023). MMANet: Margin-aware Distillation and Modality-aware Regularization for Incomplete Multimodal Learning. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 20039\u201320049).","DOI":"10.1109\/CVPR52729.2023.01919"},{"key":"10.1016\/j.neunet.2025.107821_b53","first-page":"5580","article-title":"Multimodal generative models for scalable weakly-supervised learning","volume":"31","author":"Wu","year":"2018","journal-title":"Advances in Neural Information Processing Systems"},{"key":"10.1016\/j.neunet.2025.107821_b54","doi-asserted-by":"crossref","unstructured":"Xaviar, S., Yang, X., & Ardakanian, O. (2023). Robust Multimodal Fusion for Human Activity Recognition. arXiv preprint arXiv:2303.04636.","DOI":"10.1109\/JSEN.2024.3388893"},{"key":"10.1016\/j.neunet.2025.107821_b55","first-page":"16416","article-title":"Drfuse: Learning disentangled representation for clinical multi-modal fusion with missing modality and modal inconsistency","volume":"Vol. 38","author":"Yao","year":"2024"},{"key":"10.1016\/j.neunet.2025.107821_b56","doi-asserted-by":"crossref","unstructured":"Ye, S., & Lu, J. (2023). Sequence unlearning for sequential recommender systems. In Australasian Joint Conference on Artificial Intelligence (pp. 403\u2013415).","DOI":"10.1007\/978-981-99-8388-9_33"},{"issue":"1","key":"10.1016\/j.neunet.2025.107821_b57","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3641285","article-title":"Robust recommender systems with rating flip noise","volume":"16","author":"Ye","year":"2024","journal-title":"ACM Transactions on Intelligent Systems and Technology"},{"key":"10.1016\/j.neunet.2025.107821_b58","doi-asserted-by":"crossref","unstructured":"Ye, S., Lu, J., & Zhang, G. (2025). Towards Safe Machine Unlearning: A Paradigm that Mitigates Performance Degradation. In Proceedings of the ACM on Web Conference (pp. 4635\u20134652).","DOI":"10.1145\/3696410.3714638"},{"key":"10.1016\/j.neunet.2025.107821_b59","first-page":"10790","article-title":"Learning modality-specific representations with self-supervised multi-task learning for multimodal sentiment analysis","volume":"Vol. 35","author":"Yu","year":"2021"},{"key":"10.1016\/j.neunet.2025.107821_b60","unstructured":"Zadeh, A. B., Liang, P. P., Poria, S., Cambria, E., & Morency, L.-P. (2018). Multimodal Language Analysis in the Wild: CMU-MOSEI Dataset and Interpretable Dynamic Fusion Graph. In Proceedings of annual meeting of the association for computational linguistics (pp. 2236\u20132236)."},{"key":"10.1016\/j.neunet.2025.107821_b61","series-title":"Proceedings of international ACM SIGIR conference on research and development in information retrieval","first-page":"1545","article-title":"Tag-assisted multimodal sentiment analysis under uncertain missing modalities","author":"Zeng","year":"2022"},{"issue":"5","key":"10.1016\/j.neunet.2025.107821_b62","first-page":"2402","article-title":"Deep partial multi-view learning","volume":"44","author":"Zhang","year":"2022","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.neunet.2025.107821_b63","series-title":"International conference on medical image computing and computer assisted intervention","first-page":"107","article-title":"mmFormer: Multimodal medical transformer for incomplete multimodal learning of brain tumor segmentation","author":"Zhang","year":"2022"},{"key":"10.1016\/j.neunet.2025.107821_b64","doi-asserted-by":"crossref","unstructured":"Zhang, W., Shu, K., Wang, S., Liu, H., & Wang, Y. (2018). Multimodal Fusion of Brain Networks with Longitudinal Couplings. In International conference on medical image computing and computer assisted intervention (pp. 3\u201311).","DOI":"10.1007\/978-3-030-00931-1_1"},{"key":"10.1016\/j.neunet.2025.107821_b65","series-title":"International conference on medical image computing and computer assisted intervention","first-page":"589","article-title":"Modality-aware mutual learning for multi-modal medical image segmentation","author":"Zhang","year":"2021"},{"key":"10.1016\/j.neunet.2025.107821_b66","series-title":"Proceedings of conference on empirical methods in natural language processing","first-page":"8505","article-title":"Multimodal robustness for neural machine translation","author":"Zhao","year":"2022"},{"key":"10.1016\/j.neunet.2025.107821_b67","series-title":"Proceedings of annual meeting of the association for computational linguistics and international joint conference on natural language processing","first-page":"2608","article-title":"Missing modality imagination network for emotion recognition with uncertain missing modalities","author":"Zhao","year":"2021"},{"key":"10.1016\/j.neunet.2025.107821_b68","series-title":"Interspeech","first-page":"4725","article-title":"Multi-level fusion of wav2vec 2.0 and BERT for multimodal emotion recognition","author":"Zhao","year":"2022"},{"key":"10.1016\/j.neunet.2025.107821_b69","doi-asserted-by":"crossref","DOI":"10.1016\/j.ins.2024.121473","article-title":"Granular correlation-based label-specific feature augmentation for multi-label classification","volume":"689","author":"Zhao","year":"2025","journal-title":"Information Sciences"}],"container-title":["Neural Networks"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0893608025007014?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0893608025007014?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T21:04:05Z","timestamp":1756847045000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0893608025007014"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11]]},"references-count":69,"alternative-id":["S0893608025007014"],"URL":"https:\/\/doi.org\/10.1016\/j.neunet.2025.107821","relation":{},"ISSN":["0893-6080"],"issn-type":[{"value":"0893-6080","type":"print"}],"subject":[],"published":{"date-parts":[[2025,11]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Redundancy-Adaptive Multimodal Learning for imperfect data","name":"articletitle","label":"Article Title"},{"value":"Neural Networks","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.neunet.2025.107821","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2025 Elsevier Ltd. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"107821"}}