{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T06:13:58Z","timestamp":1773814438280,"version":"3.50.1"},"reference-count":42,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62376266"],"award-info":[{"award-number":["62376266"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62406318"],"award-info":[{"award-number":["62406318"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Pattern Recognition"],"published-print":{"date-parts":[[2026,4]]},"DOI":"10.1016\/j.patcog.2025.112719","type":"journal-article","created":{"date-parts":[[2025,11,8]],"date-time":"2025-11-08T23:44:11Z","timestamp":1762645451000},"page":"112719","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"PD","title":["Resolving sentiment discrepancy for multimodal sentiment detection via semantics completion and decomposition"],"prefix":"10.1016","volume":"172","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-2320-387X","authenticated-orcid":false,"given":"Daiqing","family":"Wu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8628-411X","authenticated-orcid":false,"given":"Dongbao","family":"Yang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1016-299X","authenticated-orcid":false,"given":"Huawen","family":"Shen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0004-2307-5002","authenticated-orcid":false,"given":"Can","family":"Ma","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4188-9953","authenticated-orcid":false,"given":"Yu","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"issue":"10","key":"10.1016\/j.patcog.2025.112719_bib0001","doi-asserted-by":"crossref","first-page":"6729","DOI":"10.1109\/TPAMI.2021.3094362","article-title":"Affective image content analysis: two decades review and new perspectives","volume":"44","author":"Zhao","year":"2022","journal-title":"TPAMI"},{"key":"10.1016\/j.patcog.2025.112719_bib0002","first-page":"4014","article-title":"Image-text multimodal emotion classification via multi-View attentional network","volume":"23","author":"Yang","year":"2021","journal-title":"TMM"},{"key":"10.1016\/j.patcog.2025.112719_bib0003","series-title":"ACL","first-page":"5240","article-title":"Tackling modality heterogeneity with multi-view calibration network for multimodal sentiment detection","author":"Wei","year":"2023"},{"issue":"2","key":"10.1016\/j.patcog.2025.112719_bib0004","doi-asserted-by":"crossref","first-page":"617","DOI":"10.1007\/s10115-018-1236-4","article-title":"A survey of sentiment analysis in social media","volume":"60","author":"Yue","year":"2019","journal-title":"Knowl. Inf. Syst."},{"key":"10.1016\/j.patcog.2025.112719_bib0005","series-title":"EMNLP","first-page":"1115","article-title":"Representing social media users for sarcasm detection","author":"Kolchinski","year":"2018"},{"key":"10.1016\/j.patcog.2025.112719_bib0006","article-title":"TETFN: a text enhanced transformer fusion network for multimodal sentiment analysis","volume":"136","author":"Wang","year":"2023","journal-title":"PR"},{"key":"10.1016\/j.patcog.2025.112719_bib0007","series-title":"MMM","first-page":"15","article-title":"Sentiment analysis on multi-View social data","volume":"Vol. 9517","author":"Niu","year":"2016"},{"key":"10.1016\/j.patcog.2025.112719_bib0008","series-title":"ACL","first-page":"2506","article-title":"Multi-modal sarcasm detection in twitter with hierarchical fusion model","author":"Cai","year":"2019"},{"key":"10.1016\/j.patcog.2025.112719_bib0009","series-title":"ACL","first-page":"1746","article-title":"Convolutional neural networks for sentence classification","author":"Kim","year":"2014"},{"issue":"1","key":"10.1016\/j.patcog.2025.112719_bib0010","doi-asserted-by":"crossref","first-page":"147","DOI":"10.1109\/TAFFC.2019.2949559","article-title":"Micro and macro facial expression recognition using advanced local motion patterns","volume":"13","author":"Allaert","year":"2022","journal-title":"IEEE Trans. Affect. Comput."},{"key":"10.1016\/j.patcog.2025.112719_bib0011","series-title":"CBMI","first-page":"1","article-title":"Motion consistency constraint map for facial expression spotting","author":"Jemaa","year":"2024"},{"key":"10.1016\/j.patcog.2025.112719_bib0012","series-title":"MM","first-page":"5780","article-title":"Robust multimodal sentiment analysis of image-text pairs by distribution-based feature recovery and fusion","author":"Wu","year":"2024"},{"key":"10.1016\/j.patcog.2025.112719_bib0013","series-title":"ICML","article-title":"An empirical study on configuring in-context learning demonstrations for unleashing MLLMs\u2019 sentimental perception capability","author":"Wu","year":"2025"},{"key":"10.1016\/j.patcog.2025.112719_bib0014","article-title":"Co-attention fusion network for multimodal skin cancer diagnosis","volume":"133","author":"He","year":"2023","journal-title":"PR"},{"key":"10.1016\/j.patcog.2025.112719_bib0015","series-title":"CIKM","first-page":"2399","article-title":"MultiSentiNet: a deep semantic network for multimodal sentiment analysis","author":"Xu","year":"2017"},{"key":"10.1016\/j.patcog.2025.112719_bib0016","series-title":"ISI","first-page":"152","article-title":"Analyzing multimodal public sentiment based on hierarchical semantic attentional network","author":"Xu","year":"2017"},{"key":"10.1016\/j.patcog.2025.112719_bib0017","series-title":"SIGIR","first-page":"929","article-title":"A co-memory network for multimodal sentiment analysis","author":"Xu","year":"2018"},{"key":"10.1016\/j.patcog.2025.112719_bib0018","series-title":"AAAI","first-page":"371","article-title":"Multi-interactive memory network for aspect based multimodal sentiment analysis","author":"Xu","year":"2019"},{"key":"10.1016\/j.patcog.2025.112719_bib0019","series-title":"ACL","first-page":"328","article-title":"Multimodal sentiment detection based on multi-channel graph neural networks","author":"Yang","year":"2021"},{"key":"10.1016\/j.patcog.2025.112719_bib0020","series-title":"Findings of NAACL","first-page":"2282","article-title":"CLMLF: a contrastive learning and multi-layer fusion method for multimodal sentiment detection","author":"Li","year":"2022"},{"key":"10.1016\/j.patcog.2025.112719_bib0021","series-title":"MM","first-page":"602","article-title":"Bridging visual affective gap: borrowing textual knowledge by learning from noisy image-text pairs","author":"Wu","year":"2024"},{"key":"10.1016\/j.patcog.2025.112719_bib0022","article-title":"MPCCT: multimodal vision-language learning paradigm with context-based compact transformer","volume":"147","author":"Chen","year":"2024","journal-title":"PR"},{"key":"10.1016\/j.patcog.2025.112719_bib0023","article-title":"Crisis event summary generative model based on hierarchical multimodal fusion","volume":"144","author":"Wang","year":"2023","journal-title":"PR"},{"key":"10.1016\/j.patcog.2025.112719_bib0024","series-title":"CVPR","first-page":"2540","article-title":"DIP: dual incongruity perceiving network for sarcasm detection","author":"Wen","year":"2023"},{"key":"10.1016\/j.patcog.2025.112719_bib0025","series-title":"MM","first-page":"1122","article-title":"MISA: modality-invariant and -specific representations for multimodal sentiment analysis","author":"Hazarika","year":"2020"},{"key":"10.1016\/j.patcog.2025.112719_bib0026","series-title":"NeurIPS","first-page":"9694","article-title":"Align before fuse: vision and language representation learning with momentum distillation","author":"Li","year":"2021"},{"key":"10.1016\/j.patcog.2025.112719_bib0027","series-title":"CVPR","first-page":"14667","article-title":"Multimodal categorization of crisis events in social media","author":"Abavisani","year":"2020"},{"key":"10.1016\/j.patcog.2025.112719_bib0028","series-title":"AAAI","first-page":"9100","article-title":"Tailor versatile multi-modal learning for multi-label emotion recognition","author":"Zhang","year":"2022"},{"issue":"5","key":"10.1016\/j.patcog.2025.112719_bib0029","first-page":"1063","article-title":"Words matter: scene text for image classification and retrieval","volume":"19","author":"Karaoglu","year":"2017","journal-title":"TMM"},{"key":"10.1016\/j.patcog.2025.112719_bib0030","series-title":"ICCV","first-page":"2861","article-title":"TextPlace: visual place recognition and topological localization through reading scene texts","author":"Hong","year":"2019"},{"key":"10.1016\/j.patcog.2025.112719_bib0031","series-title":"CVPR","first-page":"5174","article-title":"ViSTA: vision and scene text aggregation for cross-modal retrieval","author":"Cheng","year":"2022"},{"key":"10.1016\/j.patcog.2025.112719_bib0032","series-title":"AAAI","first-page":"11474","article-title":"Real-time scene text detection with differentiable binarization","author":"Liao","year":"2020"},{"key":"10.1016\/j.patcog.2025.112719_bib0033","series-title":"AAAI","first-page":"8610","article-title":"Show, attend and read: a simple and strong baseline for irregular text recognition","author":"Li","year":"2019"},{"key":"10.1016\/j.patcog.2025.112719_bib0034","series-title":"ICCV","first-page":"9992","article-title":"Swin transformer: hierarchical vision transformer using shifted windows","author":"Liu","year":"2021"},{"key":"10.1016\/j.patcog.2025.112719_bib0035","series-title":"NAACL","first-page":"4171","article-title":"BERT: pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2019"},{"issue":"3\u20134","key":"10.1016\/j.patcog.2025.112719_bib0036","doi-asserted-by":"crossref","first-page":"169","DOI":"10.1080\/02699939208411068","article-title":"An argument for basic emotions","volume":"6","author":"Ekman","year":"1992","journal-title":"Cognit. Emotion"},{"key":"10.1016\/j.patcog.2025.112719_bib0037","series-title":"ACL","article-title":"Attention-based bidirectional long short-term memory networks for relation classification","author":"Zhou","year":"2016"},{"key":"10.1016\/j.patcog.2025.112719_bib0038","series-title":"CVPR","first-page":"770","article-title":"Deep residual learning for image recognition","author":"He","year":"2016"},{"key":"10.1016\/j.patcog.2025.112719_bib0039","series-title":"ICLR","article-title":"An image is worth 16x16 words: transformers for image recognition at scale","author":"Dosovitskiy","year":"2021"},{"key":"10.1016\/j.patcog.2025.112719_bib0040","series-title":"IJCAI","first-page":"5408","article-title":"Adapting BERT for target-oriented multimodal sentiment classification","author":"Yu","year":"2019"},{"key":"10.1016\/j.patcog.2025.112719_bib0041","series-title":"AAAI","first-page":"5674","article-title":"Adaptive co-attention network for named entity recognition in tweets","author":"Zhang","year":"2018"},{"key":"10.1016\/j.patcog.2025.112719_bib0042","series-title":"ACL","first-page":"1990","article-title":"Visual attention model for name tagging in multimodal social media","author":"Lu","year":"2018"}],"container-title":["Pattern Recognition"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0031320325013822?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0031320325013822?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T05:19:22Z","timestamp":1773811162000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0031320325013822"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4]]},"references-count":42,"alternative-id":["S0031320325013822"],"URL":"https:\/\/doi.org\/10.1016\/j.patcog.2025.112719","relation":{},"ISSN":["0031-3203"],"issn-type":[{"value":"0031-3203","type":"print"}],"subject":[],"published":{"date-parts":[[2026,4]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Resolving sentiment discrepancy for multimodal sentiment detection via semantics completion and decomposition","name":"articletitle","label":"Article Title"},{"value":"Pattern Recognition","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.patcog.2025.112719","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2025 Elsevier Ltd. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"112719"}}