{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,23]],"date-time":"2026-01-23T15:11:23Z","timestamp":1769181083499,"version":"3.49.0"},"publisher-location":"Singapore","reference-count":38,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819555666","type":"print"},{"value":"9789819555673","type":"electronic"}],"license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-5567-3_17","type":"book-chapter","created":{"date-parts":[[2026,1,22]],"date-time":"2026-01-22T21:13:50Z","timestamp":1769116430000},"page":"241-255","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Multimodal Sentiment Analysis via\u00a0Spatio-Temporal Decoupling and\u00a0Language-Focused Fusion"],"prefix":"10.1007","author":[{"given":"Jinhong","family":"Li","sequence":"first","affiliation":[]},{"given":"Leheng","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Hui","family":"Cui","sequence":"additional","affiliation":[]},{"given":"Jingxian","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Rui","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,23]]},"reference":[{"key":"17_CR1","doi-asserted-by":"crossref","unstructured":"Bagher\u00a0Zadeh, A., Liang, P.P., Poria, S., Cambria, E., Morency, L.P.: Multimodal language analysis in the wild: CMU-MOSEI dataset and interpretable dynamic fusion graph. In: Gurevych, I., Miyao, Y. (eds.) Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2236\u20132246 (2018)","DOI":"10.18653\/v1\/P18-1208"},{"key":"17_CR2","doi-asserted-by":"crossref","unstructured":"Bagher\u00a0Zadeh, A., Liang, P.P., Poria, S., Cambria, E., Morency, L.P.: Multimodal language analysis in the wild: CMU-MOSEI dataset and interpretable dynamic fusion graph. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, pp. 2236\u20132246 (2018)","DOI":"10.18653\/v1\/P18-1208"},{"key":"17_CR3","doi-asserted-by":"crossref","unstructured":"Baltru\u0161aitis, T., Robinson, P., Morency, L.P.: Openface: an open source facial behavior analysis toolkit. In: IEEE 16th Winter Conference on Applications of Computer Vision, pp. 1\u201310 (2016)","DOI":"10.1109\/WACV.2016.7477553"},{"key":"17_CR4","doi-asserted-by":"publisher","first-page":"104391","DOI":"10.1016\/j.cviu.2025.104391","volume":"258","author":"J Chen","year":"2025","unstructured":"Chen, J., Song, S., Tan, Y., Xia, H.: Temsa: text enhanced modal representation learning for multimodal sentiment analysis. Comput. Vis. Image Underst. 258, 104391 (2025)","journal-title":"Comput. Vis. Image Underst."},{"key":"17_CR5","doi-asserted-by":"crossref","unstructured":"Degottex, G., Kane, J., Drugman, T., Raitio, T., Scherer, S.: Covarep \u2014 a collaborative voice analysis repository for speech technologies. In: IEEE 39th International Conference on Acoustics, Speech and Signal Processing, pp. 960\u2013964 (2014)","DOI":"10.1109\/ICASSP.2014.6853739"},{"key":"17_CR6","doi-asserted-by":"crossref","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: Pre-training of deep bidirectional transformers for language understanding. In: Proceedings of the 17th Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, vol. 1, pp. 4171\u20134186 (2019)","DOI":"10.18653\/v1\/N19-1423"},{"key":"17_CR7","doi-asserted-by":"crossref","unstructured":"Guan, T., et al.: Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models. In: IEEE\/CVF 37th Conference on Computer Vision and Pattern Recognition, pp. 14375\u201314385 (2024)","DOI":"10.1109\/CVPR52733.2024.01363"},{"key":"17_CR8","doi-asserted-by":"crossref","unstructured":"Hazarika, D., Zimmermann, R., Poria, S.: Misa: modality-invariant and -specific representations for multimodal sentiment analysis. In: Proceedings of the 28th ACM International Conference on Multimedia, p. 1122\u20131131 (2020)","DOI":"10.1145\/3394171.3413678"},{"issue":"4","key":"17_CR9","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1109\/5254.708428","volume":"13","author":"M Hearst","year":"1998","unstructured":"Hearst, M., Dumais, S., Osuna, E., Platt, J., Scholkopf, B.: Support vector machines. IEEE Intell. Syst. Appl. 13(4), 18\u201328 (1998)","journal-title":"IEEE Intell. Syst. Appl."},{"key":"17_CR10","first-page":"1234","volume":"46","author":"J Hessel","year":"2024","unstructured":"Hessel, J., Lee, Y.: Interpretable tensor fusion for multimodal learning. IEEE Trans. Pattern Anal. Mach. Intell. 46, 1234\u20131247 (2024)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"17_CR11","doi-asserted-by":"crossref","unstructured":"Hu, G., Zhu, Z., Hershcovich, D., Hu, L., Seifi, H., Xie, J.: UniMEEC: towards unified multimodal emotion recognition and emotion cause. In: Findings of the Association for Computational Linguistics: Conference on Empirical Methods in Natural Language Processing, pp. 5248\u20135261 (2024)","DOI":"10.18653\/v1\/2024.findings-emnlp.302"},{"key":"17_CR12","doi-asserted-by":"publisher","first-page":"122302","DOI":"10.1109\/ACCESS.2022.3223705","volume":"10","author":"S Kakuba","year":"2022","unstructured":"Kakuba, S., Poulose, A., Han, D.S.: Attention-based multi-learning approach for speech emotion recognition with dilated convolution. IEEE Access 10, 122302\u2013122313 (2022)","journal-title":"IEEE Access"},{"issue":"11","key":"17_CR13","doi-asserted-by":"publisher","first-page":"2139","DOI":"10.3390\/electronics14112139","volume":"14","author":"EH Kim","year":"2025","unstructured":"Kim, E.H., Lim, M.J., Shin, J.H.: Mmer-lmf: multi-modal emotion recognition in lightweight modality fusion. Electronics 14(11), 2139 (2025)","journal-title":"Electronics"},{"issue":"11","key":"17_CR14","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y Lecun","year":"1998","unstructured":"Lecun, Y., Bottou, L., Bengio, Y., Haffner, P.: Gradient-based learning applied to document recognition. Proc. IEEE 86(11), 2278\u20132324 (1998)","journal-title":"Proc. IEEE"},{"issue":"1","key":"17_CR15","doi-asserted-by":"publisher","first-page":"250","DOI":"10.1109\/TAFFC.2024.3430045","volume":"16","author":"M Li","year":"2025","unstructured":"Li, M., Zhu, Z., Li, K., Pei, H.: Diversity and balance: multimodal sentiment analysis using multimodal-prefixed and cross-modal attention. IEEE Trans. Affect. Comput. 16(1), 250\u2013263 (2025)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"17_CR16","doi-asserted-by":"crossref","unstructured":"Li, Y., Zhu, Q., He, H., Gu, Z., Zheng, C.: Moc: multi-modal sentiment analysis via optimal transport and contrastive interactions. In: Neural Information Processing: 30th International Conference, pp. 439\u2013451 (2023)","DOI":"10.1007\/978-981-99-8082-6_34"},{"key":"17_CR17","doi-asserted-by":"crossref","unstructured":"Li, Z., Xu, B., Zhu, C., Zhao, T.: CLMLF: a contrastive learning and multi-layer fusion method for multimodal sentiment detection. In: North American Chapter of the Association for Computational Linguistics Conference, pp. 2282\u20132294. ACL (2022)","DOI":"10.18653\/v1\/2022.findings-naacl.175"},{"key":"17_CR18","doi-asserted-by":"crossref","unstructured":"Liang, T., Lin, G., Feng, L., Zhang, Y., Lv, F.: Attention is not enough: Mitigating the distribution discrepancy in asynchronous multimodal sequence fusion. In: IEEE\/CVF 18th International Conference on Computer Vision, pp. 8128\u20138136 (2021)","DOI":"10.1109\/ICCV48922.2021.00804"},{"key":"17_CR19","unstructured":"Liu, X., Wang, Y., Zhang, Z.: Hierarchical interactive multimodal transformer for aspect-based multimodal sentiment analysis. In: Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 8632\u20138636 (2023)"},{"key":"17_CR20","doi-asserted-by":"publisher","first-page":"62","DOI":"10.1016\/j.jprocont.2018.02.005","volume":"64","author":"Y Liu","year":"2018","unstructured":"Liu, Y., Ge, Z.: Weighted random forests for fault classification in industrial processes with hierarchical clustering model selection. J. Process Control 64, 62\u201370 (2018)","journal-title":"J. Process Control"},{"key":"17_CR21","doi-asserted-by":"crossref","unstructured":"Liu, Z., Shen, Y., Lakshminarasimhan, V.B., Liang, P.P., Bagher\u00a0Zadeh, A., Morency, L.P.: Efficient low-rank multimodal fusion with modality-specific factors. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2247\u20132256 (2018)","DOI":"10.18653\/v1\/P18-1209"},{"key":"17_CR22","doi-asserted-by":"crossref","unstructured":"Rahman, W., et al.: Integrating multimodal information in large pretrained transformers. In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 2359\u20132369 (2020)","DOI":"10.18653\/v1\/2020.acl-main.214"},{"key":"17_CR23","doi-asserted-by":"crossref","unstructured":"Tran, D., Bourdev, L., Fergus, R., Torresani, L., Paluri, M.: Learning spatiotemporal features with 3d convolutional networks. In: IEEE 25th International Conference on Computer Vision, pp. 4489\u20134497 (2015)","DOI":"10.1109\/ICCV.2015.510"},{"key":"17_CR24","doi-asserted-by":"crossref","unstructured":"Tsai, Y.H.H., Bai, S., Liang, P.P., Kolter, J.Z., Morency, L.P., Salakhutdinov, R.: Multimodal transformer for unaligned multimodal language sequences. In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 6558\u20136569 (2019)","DOI":"10.18653\/v1\/P19-1656"},{"key":"17_CR25","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Proceedings of the 31st International Conference on Neural Information Processing Systems, p. 6000\u20136010 (2017)"},{"key":"17_CR26","doi-asserted-by":"crossref","unstructured":"Wang, P., Zhou, Q., Wu, Y., Chen, T., Hu, J.: Dlf: disentangled-language-focused multimodal sentiment analysis. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a039, pp. 21180\u201321188 (2025)","DOI":"10.1609\/aaai.v39i20.35416"},{"issue":"11","key":"17_CR27","doi-asserted-by":"publisher","first-page":"1716","DOI":"10.1109\/JPROC.2024.3525147","volume":"112","author":"T Wang","year":"2024","unstructured":"Wang, T., Li, F., Zhu, L., Li, J., Zhang, Z., Shen, H.T.: Cross-modal retrieval: a systematic review of methods and future directions. Proc. IEEE 112(11), 1716\u20131754 (2024)","journal-title":"Proc. IEEE"},{"key":"17_CR28","doi-asserted-by":"crossref","unstructured":"Wang, W., et al.: Image as a foreign language: Beit pretraining for vision and vision-language tasks. In: IEEE\/CVF 36th Conference on Computer Vision and Pattern Recognition, pp. 19175\u201319186 (2023)","DOI":"10.1109\/CVPR52729.2023.01838"},{"issue":"15","key":"17_CR29","doi-asserted-by":"publisher","first-page":"25150","DOI":"10.1109\/JSEN.2024.3416295","volume":"24","author":"X Wang","year":"2024","unstructured":"Wang, X., Li, Y., Fang, A., He, P., Guo, Y.: A dual pipeline with spatio-temporal attention fusion approach for human activity recognition. IEEE Sens. J. 24(15), 25150\u201325162 (2024)","journal-title":"IEEE Sens. J."},{"key":"17_CR30","doi-asserted-by":"crossref","unstructured":"Wang, Y., Sun, F., Lu, M., Yao, A.: Learning deep multimodal feature representation with asymmetric multi-layer fusion. In: Proceedings of the 28th ACM International Conference on Multimedia, p. 3902\u20133910 (2020)","DOI":"10.1145\/3394171.3413621"},{"key":"17_CR31","doi-asserted-by":"crossref","unstructured":"Yang, D., Kuang, H., Huang, S., Zhang, L.: Learning modality-specific and -agnostic representations for asynchronous multimodal language sequences. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 1708\u20131717 (2022)","DOI":"10.1145\/3503161.3547755"},{"key":"17_CR32","doi-asserted-by":"crossref","unstructured":"Yu, W., Xu, H., Yuan, Z., Wu, J.: Learning modality-specific representations with self-supervised multi-task learning for multimodal sentiment analysis. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a035, pp. 10790\u201310797 (2021)","DOI":"10.1609\/aaai.v35i12.17289"},{"key":"17_CR33","doi-asserted-by":"crossref","unstructured":"Zadeh, A., Chen, M., Poria, S., Cambria, E., Morency, L.P.: Tensor fusion network for multimodal sentiment analysis. In: Proceedings of the 39th Conference on Empirical Methods in Natural Language Processing, pp. 1103\u20131114 (2017)","DOI":"10.18653\/v1\/D17-1115"},{"issue":"6","key":"17_CR34","doi-asserted-by":"publisher","first-page":"82","DOI":"10.1109\/MIS.2016.94","volume":"31","author":"A Zadeh","year":"2016","unstructured":"Zadeh, A., Zellers, R., Pincus, E., Morency, L.P.: Multimodal sentiment intensity analysis in videos: facial gestures and verbal messages. IEEE Intell. Syst. 31(6), 82\u201388 (2016)","journal-title":"IEEE Intell. Syst."},{"key":"17_CR35","doi-asserted-by":"crossref","unstructured":"Zhang, H., Wang, Y., Yin, G., Liu, K., Liu, Y., Yu, T.: Learning language-guided adaptive hyper-modality representation for multimodal sentiment analysis. In: Proceedings of the 28th Conference on Empirical Methods in Natural Language Processing, pp. 756\u2013767 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.49"},{"key":"17_CR36","doi-asserted-by":"crossref","unstructured":"Zhang, J., Xue, S., Wang, X., Liu, J.: Survey of multimodal sentiment analysis based on deep learning. In: IEEE 9th International Conference on Cloud Computing and Intelligent Systems, pp. 446\u2013450 (2023)","DOI":"10.1109\/CCIS59572.2023.10263012"},{"key":"17_CR37","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Lin, S., Zhou, F., Wang, R.: Hierarchical attention feature fusion and refinement network for point cloud upsampling. In: IEEE 25th International Conference on Multimedia and Expo, pp.\u00a01\u20138 (2024)","DOI":"10.1109\/ICME57554.2024.10688150"},{"key":"17_CR38","doi-asserted-by":"crossref","unstructured":"Zhou, Z., Wang, Y., Li, M.: Feature selection method based on hybrid sa-ga and random forests. In: International Conference on Computing and Data Science, pp. 139\u2013142 (2020)","DOI":"10.1109\/CDS49703.2020.00034"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-5567-3_17","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,22]],"date-time":"2026-01-22T21:13:56Z","timestamp":1769116436000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-5567-3_17"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"ISBN":["9789819555666","9789819555673"],"references-count":38,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-5567-3_17","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]},"assertion":[{"value":"23 January 2026","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision  (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Shanghai","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"15 October 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18 October 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/2025.prcv.cn\/index.asp","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}