{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,14]],"date-time":"2026-01-14T02:47:54Z","timestamp":1768358874254,"version":"3.49.0"},"publisher-location":"Singapore","reference-count":27,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819556786","type":"print"},{"value":"9789819556793","type":"electronic"}],"license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-5679-3_2","type":"book-chapter","created":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T18:36:35Z","timestamp":1768329395000},"page":"17-31","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Overcoming Feature Missing: Joint Reconstruction and\u00a0Prior Semantics Transmission for\u00a0Robust Multimodal Sentiment Analysis"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-2551-4425","authenticated-orcid":false,"given":"Yiyang","family":"Tang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3410-2155","authenticated-orcid":false,"given":"Ning","family":"Luo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9930-2903","authenticated-orcid":false,"given":"Qian","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0005-5729-0410","authenticated-orcid":false,"given":"Nanjie","family":"Zheng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,14]]},"reference":[{"key":"2_CR1","doi-asserted-by":"crossref","unstructured":"Baltru\u0161aitis, T., Robinson, P., Morency, L.P.: Openface: an open source facial behavior analysis toolkit. In: 2016 IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 1\u201310. IEEE (2016)","DOI":"10.1109\/WACV.2016.7477553"},{"key":"2_CR2","doi-asserted-by":"crossref","unstructured":"Guo, J., Tang, J., Dai, W., Ding, Y., Kong, W.: Dynamically adjust word representations using unaligned multimodal information. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 3394\u20133402 (2022)","DOI":"10.1145\/3503161.3548137"},{"key":"2_CR3","doi-asserted-by":"crossref","unstructured":"Han, W., Chen, H., Poria, S.: Improving multimodal fusion with hierarchical mutual information maximization for multimodal sentiment analysis. arXiv preprint arXiv:2109.00412 (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.723"},{"key":"2_CR4","doi-asserted-by":"crossref","unstructured":"Hazarika, D., Li, Y., Cheng, B., Zhao, S., Zimmermann, R., Poria, S.: Analyzing modality robustness in multimodal sentiment analysis. arXiv preprint arXiv:2205.15465 (2022)","DOI":"10.18653\/v1\/2022.naacl-main.50"},{"key":"2_CR5","unstructured":"Kenton, J.D.M.W.C., Toutanova, L.K.: Bert: Pre-training of deep bidirectional transformers for language understanding. In: Proceedings of naacL-HLT, vol.\u00a01, p.\u00a02. Minneapolis, Minnesota (2019)"},{"key":"2_CR6","doi-asserted-by":"crossref","unstructured":"Li, B., Li, C., Duan, F., Zheng, N., Zhao, Q.: Tpfn: applying outer product along time to multimodal sentiment analysis fusion on incomplete data. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XXIV 16, pp. 431\u2013447. Springer (2020)","DOI":"10.1007\/978-3-030-58586-0_26"},{"key":"2_CR7","doi-asserted-by":"crossref","unstructured":"Li, M., et al.: A unified self-distillation framework for multimodal sentiment analysis with uncertain missing modalities. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a038, pp. 10074\u201310082 (2024)","DOI":"10.1609\/aaai.v38i9.28871"},{"key":"2_CR8","doi-asserted-by":"crossref","unstructured":"Li, Y., Wang, Y., Cui, Z.: Decoupled multimodal distilling for emotion recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6631\u20136640 (2023)","DOI":"10.1109\/CVPR52729.2023.00641"},{"key":"2_CR9","doi-asserted-by":"crossref","unstructured":"Liang, P.P., Liu, Z., Tsai, Y.H.H., Zhao, Q., Salakhutdinov, R., Morency, L.P.: Learning representations from imperfect time series data via tensor rank regularization. arXiv preprint arXiv:1907.01011 (2019)","DOI":"10.18653\/v1\/P19-1152"},{"key":"2_CR10","unstructured":"Liang, P.P., Zadeh, A., Morency, L.P.: Foundations and trends in multimodal machine learning: principles, challenges, and open questions. arXiv preprint arXiv:2209.03430 (2022)"},{"key":"2_CR11","doi-asserted-by":"crossref","unstructured":"Lv, F., Chen, X., Huang, Y., Duan, L., Lin, G.: Progressive modality reinforcement for human multimodal emotion recognition from unaligned multimodal sequences. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2554\u20132562 (2021)","DOI":"10.1109\/CVPR46437.2021.00258"},{"key":"2_CR12","doi-asserted-by":"crossref","unstructured":"Ma, M., Ren, J., Zhao, L., Testuggine, D., Peng, X.: Are multimodal transformers robust to missing modality? In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18177\u201318186 (2022)","DOI":"10.1109\/CVPR52688.2022.01764"},{"issue":"3","key":"2_CR13","doi-asserted-by":"publisher","first-page":"2276","DOI":"10.1109\/TAFFC.2022.3172360","volume":"14","author":"S Mai","year":"2022","unstructured":"Mai, S., Zeng, Y., Zheng, S., Hu, H.: Hybrid contrastive learning of tri-modal representation for multimodal sentiment analysis. IEEE Trans. Affect. Comput. 14(3), 2276\u20132289 (2022)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"2_CR14","doi-asserted-by":"crossref","unstructured":"Mao, H., Zhang, B., Xu, H., Yuan, Z., Liu, Y.: Robust-msa: understanding the impact of modality noise on multimodal sentiment analysis. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a037, pp. 16458\u201316460 (2023)","DOI":"10.1609\/aaai.v37i13.27078"},{"key":"2_CR15","doi-asserted-by":"crossref","unstructured":"McFee, B., Raffel, C., Liang, D., Ellis, D.P., McVicar, M., Battenberg, E., Nieto, O.: librosa: Audio and music signal analysis in python. In: SciPy, pp. 18\u201324 (2015)","DOI":"10.25080\/Majora-7b98e3ed-003"},{"issue":"1","key":"2_CR16","doi-asserted-by":"publisher","first-page":"309","DOI":"10.1109\/TAFFC.2023.3274829","volume":"15","author":"L Sun","year":"2023","unstructured":"Sun, L., Lian, Z., Liu, B., Tao, J.: Efficient multimodal transformer with dual-level feature restoration for robust multimodal sentiment analysis. IEEE Trans. Affect. Comput. 15(1), 309\u2013325 (2023)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"2_CR17","doi-asserted-by":"crossref","unstructured":"Tang, J., Li, K., Jin, X., Cichocki, A., Zhao, Q., Kong, W.: Ctfn: hierarchical learning for multimodal sentiment analysis using coupled-translation fusion network. In: Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pp. 5301\u20135311 (2021)","DOI":"10.18653\/v1\/2021.acl-long.412"},{"key":"2_CR18","doi-asserted-by":"crossref","unstructured":"Tsai, Y.H.H., Bai, S., Liang, P.P., Kolter, J.Z., Morency, L.P., Salakhutdinov, R.: Multimodal transformer for unaligned multimodal language sequences. In: Proceedings of the Conference. Association for computational linguistics. Meeting, vol.\u00a02019, p.\u00a06558. NIH Public Access (2019)","DOI":"10.18653\/v1\/P19-1656"},{"key":"2_CR19","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2022.109259","volume":"136","author":"D Wang","year":"2023","unstructured":"Wang, D., Guo, X., Tian, Y., Liu, J., He, L., Luo, X.: Tetfn: a text enhanced transformer fusion network for multimodal sentiment analysis. Pattern Recogn. 136, 109259 (2023)","journal-title":"Pattern Recogn."},{"key":"2_CR20","doi-asserted-by":"crossref","unstructured":"Wang, Z., Wan, Z., Wan, X.: Transmodality: an end2end fusion method with transformer for multimodal sentiment analysis. In: Proceedings of the Web Conference 2020, pp. 2514\u20132520 (2020)","DOI":"10.1145\/3366423.3380000"},{"key":"2_CR21","doi-asserted-by":"crossref","unstructured":"Xu, W., Jiang, H., Liang, X.: Leveraging knowledge of modality experts for incomplete multimodal learning. In: Proceedings of the 32nd ACM International Conference on Multimedia, pp. 438\u2013446 (2024)","DOI":"10.1145\/3664647.3681683"},{"key":"2_CR22","doi-asserted-by":"crossref","unstructured":"Yang, D., Huang, S., Kuang, H., Du, Y., Zhang, L.: Disentangled representation learning for multimodal emotion recognition. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 1642\u20131651 (2022)","DOI":"10.1145\/3503161.3547754"},{"key":"2_CR23","doi-asserted-by":"publisher","first-page":"529","DOI":"10.1109\/TMM.2023.3267882","volume":"26","author":"Z Yuan","year":"2023","unstructured":"Yuan, Z., Liu, Y., Xu, H., Gao, K.: Noise imitation based adversarial training for robust multimodal sentiment analysis. IEEE Trans. Multimedia 26, 529\u2013539 (2023)","journal-title":"IEEE Trans. Multimedia"},{"issue":"6","key":"2_CR24","doi-asserted-by":"publisher","first-page":"82","DOI":"10.1109\/MIS.2016.94","volume":"31","author":"A Zadeh","year":"2016","unstructured":"Zadeh, A., Zellers, R., Pincus, E., Morency, L.P.: Multimodal sentiment intensity analysis in videos: Facial gestures and verbal messages. IEEE Intell. Syst. 31(6), 82\u201388 (2016)","journal-title":"IEEE Intell. Syst."},{"key":"2_CR25","doi-asserted-by":"crossref","unstructured":"Zadeh, A.B., Liang, P.P., Poria, S., Cambria, E., Morency, L.P.: Multimodal language analysis in the wild: Cmu-mosei dataset and interpretable dynamic fusion graph. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2236\u20132246 (2018)","DOI":"10.18653\/v1\/P18-1208"},{"key":"2_CR26","doi-asserted-by":"crossref","unstructured":"Zhang, H., Wang, W., Yu, T.: Towards robust multimodal sentiment analysis with incomplete data. arXiv preprint arXiv:2409.20012 (2024)","DOI":"10.52202\/079017-1779"},{"key":"2_CR27","doi-asserted-by":"crossref","unstructured":"Zhang, H., Wang, Y., Yin, G., Liu, K., Liu, Y., Yu, T.: Learning language-guided adaptive hyper-modality representation for multimodal sentiment analysis. arXiv preprint arXiv:2310.05804 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.49"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-5679-3_2","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T18:36:38Z","timestamp":1768329398000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-5679-3_2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"ISBN":["9789819556786","9789819556793"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-5679-3_2","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]},"assertion":[{"value":"14 January 2026","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision  (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Shanghai","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15 October 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 October 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/2025.prcv.cn\/index.asp","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}