{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,16]],"date-time":"2026-04-16T21:16:58Z","timestamp":1776374218393,"version":"3.51.2"},"reference-count":43,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2025,1,7]],"date-time":"2025-01-07T00:00:00Z","timestamp":1736208000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,1,7]],"date-time":"2025-01-07T00:00:00Z","timestamp":1736208000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"name":"Chongqing Basic Research and Frontier Exploration Project","award":["CSTB2022NSCQ-MSX0918"],"award-info":[{"award-number":["CSTB2022NSCQ-MSX0918"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Complex Intell. Syst."],"published-print":{"date-parts":[[2025,2]]},"DOI":"10.1007\/s40747-024-01724-5","type":"journal-article","created":{"date-parts":[[2025,1,7]],"date-time":"2025-01-07T08:27:52Z","timestamp":1736238472000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":10,"title":["TMFN: a text-based multimodal fusion network with multi-scale feature extraction and unsupervised contrastive learning for multimodal sentiment analysis"],"prefix":"10.1007","volume":"11","author":[{"given":"Junsong","family":"Fu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4345-0370","authenticated-orcid":false,"given":"Youjia","family":"Fu","sequence":"additional","affiliation":[]},{"given":"Huixia","family":"Xue","sequence":"additional","affiliation":[]},{"given":"Zihao","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,1,7]]},"reference":[{"key":"1724_CR1","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2024.123776","volume":"249","author":"H Nie","year":"2024","unstructured":"Nie H, Lu S (2024) Fedcrmw: federated model ownership verification with compression-resistant model watermarking. Expert Syst Appl 249:123776","journal-title":"Expert Syst Appl"},{"key":"1724_CR2","doi-asserted-by":"crossref","unstructured":"Nie H, Lu S, Wu J et\u00a0al (2024) Deep model intellectual property protection with compression-resistant model watermarking. IEEE Trans Artif Intell 5(7):3362\u20133373","DOI":"10.1109\/TAI.2024.3351116"},{"key":"1724_CR3","doi-asserted-by":"crossref","unstructured":"Lin C, Obaidat MS (2019) Behavioral biometrics based on human-computer interaction devices. In: Obaidat M, Traore I, Woungang I (eds) Biometric-based physical and cybersecurity systems. Springer, Cham, pp 189\u2013209","DOI":"10.1007\/978-3-319-98734-7_7"},{"key":"1724_CR4","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2024.111675","volume":"293","author":"H Nie","year":"2024","unstructured":"Nie H, Lu S (2024) Persistverify: federated model ownership verification with spatial attention and boundary sampling. Knowl Based Syst 293:111675","journal-title":"Knowl Based Syst"},{"key":"1724_CR5","doi-asserted-by":"crossref","unstructured":"Nie H, Lu S, Wang M et\u00a0al (2024) Verichroma: ownership verification for federated models via rgb filters. In: European conference on parallel processing. Springer, pp 332\u2013345","DOI":"10.1007\/978-3-031-69766-1_23"},{"key":"1724_CR6","doi-asserted-by":"crossref","unstructured":"Nie H, Lu S (2024) Securing IP in edge AI: neural network watermarking for multimodal models. Appl Intell 54(21):10455\u201310472","DOI":"10.1007\/s10489-024-05746-x"},{"issue":"14","key":"1724_CR7","doi-asserted-by":"publisher","first-page":"2835","DOI":"10.3390\/electronics13142835","volume":"13","author":"Y Fu","year":"2024","unstructured":"Fu Y, Fu J, Xue H et al (2024) Self-hcl: self-supervised multitask learning with hybrid contrastive learning strategy for multimodal sentiment analysis. Electronics 13(14):2835","journal-title":"Electronics"},{"key":"1724_CR8","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2022.108837","volume":"130","author":"Q Shi","year":"2022","unstructured":"Shi Q, Fan J, Wang Z et al (2022) Multimodal channel-wise attention transformer inspired by multisensory integration mechanisms of the brain. Pattern Recogn 130:108837","journal-title":"Pattern Recogn"},{"key":"1724_CR9","doi-asserted-by":"publisher","first-page":"424","DOI":"10.1016\/j.inffus.2022.09.025","volume":"91","author":"A Gandhi","year":"2023","unstructured":"Gandhi A, Adhvaryu K, Poria S et al (2023) Multimodal sentiment analysis: a systematic review of history, datasets, multimodal fusion methods, applications, challenges and future directions. Inf Fusion 91:424\u2013444","journal-title":"Inf Fusion"},{"key":"1724_CR10","doi-asserted-by":"crossref","unstructured":"Poria S, Cambria E, Hazarika D et\u00a0al (2017) Multi-level multiple attentions for contextual multimodal sentiment analysis. In: 2017 IEEE international conference on data mining (ICDM). IEEE, pp 1033\u20131038","DOI":"10.1109\/ICDM.2017.134"},{"issue":"8","key":"1724_CR11","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter S, Schmidhuber J (1997) Long short-term memory. Neural Comput 9(8):1735\u20131780","journal-title":"Neural Comput"},{"key":"1724_CR12","doi-asserted-by":"publisher","first-page":"37","DOI":"10.1016\/j.inffus.2022.11.022","volume":"92","author":"K Kim","year":"2023","unstructured":"Kim K, Park S (2023) Aobert: all-modalities-in-one bert for multimodal sentiment analysis. Inf Fusion 92:37\u201345","journal-title":"Inf Fusion"},{"key":"1724_CR13","unstructured":"Vaswani A (2017) Attention is all you need. Advances in Neural Information Processing Systems"},{"key":"1724_CR14","doi-asserted-by":"publisher","first-page":"51","DOI":"10.1016\/j.patcog.2018.07.001","volume":"84","author":"Y Liu","year":"2018","unstructured":"Liu Y, Liu L, Guo Y et al (2018) Learning visual and textual representations for multimodal matching and classification. Pattern Recogn 84:51\u201367","journal-title":"Pattern Recogn"},{"key":"1724_CR15","doi-asserted-by":"crossref","unstructured":"Tsai YHH, Bai S, Liang PP et\u00a0al (2019) Multimodal transformer for unaligned multimodal language sequences. In: Proceedings of the conference. Association for computational linguistics. Meeting, NIH public access, p 6558","DOI":"10.18653\/v1\/P19-1656"},{"key":"1724_CR16","doi-asserted-by":"crossref","unstructured":"Han W, Chen H, Poria S (2021) Improving multimodal fusion with hierarchical mutual information maximization for multimodal sentiment analysis. arXiv preprint arXiv:2109.00412","DOI":"10.18653\/v1\/2021.emnlp-main.723"},{"key":"1724_CR17","doi-asserted-by":"crossref","unstructured":"Hazarika D, Zimmermann R, Poria S (2020) Misa: modality-invariant and-specific representations for multimodal sentiment analysis. In: Proceedings of the 28th ACM international conference on multimedia, pp 1122\u20131131","DOI":"10.1145\/3394171.3413678"},{"key":"1724_CR18","doi-asserted-by":"crossref","unstructured":"Yu W, Xu H, Yuan Z, et\u00a0al (2021) Learning modality-specific representations with self-supervised multi-task learning for multimodal sentiment analysis. In: Proceedings of the AAAI conference on artificial intelligence, pp 10790\u201310797","DOI":"10.1609\/aaai.v35i12.17289"},{"key":"1724_CR19","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2022.109259","volume":"136","author":"D Wang","year":"2023","unstructured":"Wang D, Guo X, Tian Y et al (2023) Tetfn: a text enhanced transformer fusion network for multimodal sentiment analysis. Pattern Recogn 136:109259","journal-title":"Pattern Recogn"},{"key":"1724_CR20","unstructured":"Belghazi MI, Baratin A, Rajeshwar S et\u00a0al (2018) Mutual information neural estimation. In: International conference on machine learning, PMLR, pp 531\u2013540"},{"key":"1724_CR21","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2023.111346","volume":"285","author":"J Huang","year":"2024","unstructured":"Huang J, Zhou J, Tang Z et al (2024) Tmbl: transformer-based multimodal binding learning model for multimodal sentiment analysis. Knowl Based Syst 285:111346","journal-title":"Knowl Based Syst"},{"key":"1724_CR22","doi-asserted-by":"crossref","unstructured":"Zadeh A, Chen M, Poria S et\u00a0al (2017) Tensor fusion network for multimodal sentiment analysis. arXiv preprint arXiv:1707.07250","DOI":"10.18653\/v1\/D17-1115"},{"key":"1724_CR23","doi-asserted-by":"crossref","unstructured":"Liu Z, Shen Y, Lakshminarasimhan VB et\u00a0al (2018) Efficient low-rank multimodal fusion with modality-specific factors. arXiv preprint arXiv:1806.00064","DOI":"10.18653\/v1\/P18-1209"},{"key":"1724_CR24","doi-asserted-by":"crossref","unstructured":"Pham H, Liang PP, Manzini T et\u00a0al (2019) Found in translation: Learning robust joint representations by cyclic translations between modalities. In: Proceedings of the AAAI conference on artificial intelligence, pp 6892\u20136899","DOI":"10.1609\/aaai.v33i01.33016892"},{"key":"1724_CR25","doi-asserted-by":"crossref","unstructured":"Sun Z, Sarma P, Sethares W et\u00a0al (2020) Learning relationships between text, audio, and video via deep canonical correlation for multimodal language analysis. In: Proceedings of the AAAI conference on artificial intelligence, pp 8992\u20138999","DOI":"10.1609\/aaai.v34i05.6431"},{"key":"1724_CR26","doi-asserted-by":"crossref","unstructured":"Wang P, Yu R, Gao N et al (2020) Task-driven data offloading for fog-enabled urban iot services. IEEE Internet Things J 8(9):7562\u20137574","DOI":"10.1109\/JIOT.2020.3039467"},{"key":"1724_CR27","doi-asserted-by":"crossref","unstructured":"Lin TY, Doll\u00e1r P, Girshick R et\u00a0al (2017) Feature pyramid networks for object detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 2117\u20132125","DOI":"10.1109\/CVPR.2017.106"},{"key":"1724_CR28","doi-asserted-by":"crossref","unstructured":"Li J, Fang F, Mei K et\u00a0al (2018) Multi-scale residual network for image super-resolution. In: Proceedings of the European conference on computer vision (ECCV), pp 517\u2013532","DOI":"10.1007\/978-3-030-01237-3_32"},{"key":"1724_CR29","doi-asserted-by":"publisher","first-page":"853","DOI":"10.1109\/TASLP.2022.3145293","volume":"30","author":"Y Lei","year":"2022","unstructured":"Lei Y, Yang S, Wang X et al (2022) Msemotts: multi-scale emotion transfer, prediction, and control for emotional speech synthesis. IEEE\/ACM Trans Audio Speech Lang Process 30:853\u2013864","journal-title":"IEEE\/ACM Trans Audio Speech Lang Process"},{"key":"1724_CR30","doi-asserted-by":"crossref","unstructured":"Cao X, Liangwen H, Wang H et\u00a0al (2020) Microblog-oriented multi-scale cnn multi-label sentiment classification model. In: 2020 IEEE\/WIC\/ACM international joint conference on web intelligence and intelligent agent technology (WI-IAT). IEEE, pp 626\u2013631","DOI":"10.1109\/WIIAT50758.2020.00094"},{"key":"1724_CR31","doi-asserted-by":"crossref","unstructured":"Lin C, Cheng H, Rao Q et\u00a0al (2024) M3SA: multimodal sentiment analysis based on multi-scale feature extraction and multi-task learning. IEEE\/ACM Trans Audio Speech Lang Process 32:1416\u20131429","DOI":"10.1109\/TASLP.2024.3361374"},{"key":"1724_CR32","doi-asserted-by":"crossref","unstructured":"Franceschini R, Fini E, Beyan C et\u00a0al (2022) Multimodal emotion recognition with modality-pairwise unsupervised contrastive loss. In: 2022 26th international conference on pattern recognition (ICPR). IEEE, pp 2589\u20132596","DOI":"10.1109\/ICPR56361.2022.9956589"},{"key":"1724_CR33","doi-asserted-by":"crossref","unstructured":"Liu Y, Fan Q, Zhang S et\u00a0al (2021) Contrastive multimodal fusion with tupleinfonce. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp 754\u2013763","DOI":"10.1109\/ICCV48922.2021.00079"},{"key":"1724_CR34","unstructured":"Devlin J, Chang MW, Lee K et\u00a0al (2018) Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805"},{"key":"1724_CR35","doi-asserted-by":"crossref","unstructured":"Degottex G, Kane J, Drugman T et\u00a0al (2014) Covarep\u2014a collaborative voice analysis repository for speech technologies. In: 2014 IEEE international conference on acoustics, speech and signal processing (icassp). IEEE, pp 960\u2013964","DOI":"10.1109\/ICASSP.2014.6853739"},{"key":"1724_CR36","unstructured":"Lei J, Sala J, Jasra SK (2017) Identifying correlation between facial expression and heart rate and skin conductance with iMotions biometric platform. J Emerg Forensic Sci Res 2(2):53\u201383"},{"key":"1724_CR37","doi-asserted-by":"crossref","unstructured":"Zhao X, Zhang L, Pang Y et\u00a0al (2020) A single stream network for robust and real-time rgb-d salient object detection. In: Computer vision\u2014ECCV 2020: 16th European conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XXII 16. Springer, pp 646\u2013662","DOI":"10.1007\/978-3-030-58542-6_39"},{"key":"1724_CR38","unstructured":"Zadeh A, Zellers R, Pincus E et\u00a0al (2016) Mosi: multimodal corpus of sentiment intensity and subjectivity analysis in online opinion videos. arXiv preprint arXiv:1606.06259"},{"key":"1724_CR39","doi-asserted-by":"crossref","unstructured":"Zadeh AB, Liang PP, Poria S et\u00a0al (2018) Multimodal language analysis in the wild: Cmu-mosei dataset and interpretable dynamic fusion graph. In: Proceedings of the 56th annual meeting of the Association for Computational Linguistics (volume 1: long papers), pp 2236\u20132246","DOI":"10.18653\/v1\/P18-1208"},{"key":"1724_CR40","doi-asserted-by":"crossref","unstructured":"Wang Y, Shen Y, Liu Z et\u00a0al (2019) Words can shift: dynamically adjusting word representations using nonverbal behaviors. In: Proceedings of the AAAI conference on artificial intelligence, pp 7216\u20137223","DOI":"10.1609\/aaai.v33i01.33017216"},{"key":"1724_CR41","doi-asserted-by":"crossref","unstructured":"Rahman W, Hasan MK, Lee S et\u00a0al (2020) Integrating multimodal information in large pretrained transformers. In: Proceedings of the conference. Association for Computational Linguistics. Meeting, NIH Public Access, p 2359","DOI":"10.18653\/v1\/2020.acl-main.214"},{"key":"1724_CR42","doi-asserted-by":"crossref","unstructured":"Hwang Y, Kim JH (2023) Self-supervised unimodal label generation strategy using recalibrated modality representations for multimodal sentiment analysis. Find Assoc Comput Linguist EACL 2023:35\u201346","DOI":"10.18653\/v1\/2023.findings-eacl.2"},{"key":"1724_CR43","unstructured":"Hinton GE, Roweis S (2002) Stochastic neighbor embedding. Advances in neural information processing systems, 15"}],"container-title":["Complex &amp; Intelligent Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01724-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s40747-024-01724-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01724-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,7]],"date-time":"2025-02-07T16:28:48Z","timestamp":1738945728000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s40747-024-01724-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,1,7]]},"references-count":43,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2025,2]]}},"alternative-id":["1724"],"URL":"https:\/\/doi.org\/10.1007\/s40747-024-01724-5","relation":{},"ISSN":["2199-4536","2198-6053"],"issn-type":[{"value":"2199-4536","type":"print"},{"value":"2198-6053","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,1,7]]},"assertion":[{"value":"29 July 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 December 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 January 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"133"}}