{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T03:48:09Z","timestamp":1774928889473,"version":"3.50.1"},"reference-count":34,"publisher":"Springer Science and Business Media LLC","issue":"24","license":[{"start":{"date-parts":[[2024,9,25]],"date-time":"2024-09-25T00:00:00Z","timestamp":1727222400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,25]],"date-time":"2024-09-25T00:00:00Z","timestamp":1727222400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1007\/s10489-024-05841-z","type":"journal-article","created":{"date-parts":[[2024,9,25]],"date-time":"2024-09-25T05:02:14Z","timestamp":1727240534000},"page":"12629-12643","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":8,"title":["FMCF: Few-shot Multimodal aspect-based sentiment analysis framework based on Contrastive Finetuning"],"prefix":"10.1007","volume":"54","author":[{"given":"Yongping","family":"Du","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0002-1751-086X","authenticated-orcid":false,"given":"Runfeng","family":"Xie","sequence":"additional","affiliation":[]},{"given":"Bochao","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Zihao","family":"Yin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,25]]},"reference":[{"issue":"13s","key":"5841_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3586075","volume":"55","author":"R Das","year":"2023","unstructured":"Das R, Singh TD (2023) Multimodal sentiment analysis: a survey of methods, trends, and challenges. ACM Computing Surveys 55(13s):1\u201338","journal-title":"ACM Computing Surveys"},{"key":"5841_CR2","doi-asserted-by":"crossref","unstructured":"Li Z, Xu B, Zhu C, Zhao T (2022) CLMLF: a contrastive learning and multi-layer fusion method for multimodal sentiment detection. Findings of the Association for Computational Linguistics: NAACL 2022, pp 2282\u20132294","DOI":"10.18653\/v1\/2022.findings-naacl.175"},{"key":"5841_CR3","doi-asserted-by":"crossref","unstructured":"Ye J, Zhou J, Tian J, Wang R, Zhang Q, Gui T, Huang XJ (2023) RethinkingTMSC: an empirical study for target-oriented multimodal sentiment classification. In: Findings of the Association for Computational Linguistics: Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp 270\u2013277","DOI":"10.18653\/v1\/2023.findings-emnlp.21"},{"key":"5841_CR4","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2023.127222","volume":"573","author":"J Yang","year":"2024","unstructured":"Yang J, Xu M, Xiao Y, Du X (2024) AMIFN: aspect-guided multi-view interactions and fusion network for multimodal aspect-based sentiment analysis. Neurocomputing 573:127222","journal-title":"Neurocomputing"},{"key":"5841_CR5","doi-asserted-by":"crossref","unstructured":"Ling Y, Yu J, Xia R (2022) May Vision-Language Pre-Training for Multimodal Aspect-Based Sentiment Analysis. In: Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp 2149\u20132159","DOI":"10.18653\/v1\/2022.acl-long.152"},{"issue":"11","key":"5841_CR6","doi-asserted-by":"publisher","first-page":"11019","DOI":"10.1109\/TKDE.2022.3230975","volume":"35","author":"W Zhang","year":"2023","unstructured":"Zhang W, Li X, Deng Y, Bing L, Lam W (2023) A survey on aspect-based sentiment analysis: tasks, methods, and challenges. IEEE Trans Knowl Data Eng 35(11):11019\u201311038","journal-title":"IEEE Trans Knowl Data Eng"},{"key":"5841_CR7","doi-asserted-by":"crossref","unstructured":"Fan F, Feng Y, Zhao D (2018) Multi-grained attention network for aspect-level sentiment classification. Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp 3433\u20133442","DOI":"10.18653\/v1\/D18-1380"},{"key":"5841_CR8","doi-asserted-by":"crossref","unstructured":"Zhang C, Li Q, Song D (2019) Aspect-based sentiment classification with aspect-specific graph convolutional networks. Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp 4568\u20134578","DOI":"10.18653\/v1\/D19-1464"},{"key":"5841_CR9","first-page":"385","volume":"380","author":"C Sun","year":"2019","unstructured":"Sun C, Huang L, Qiu X (2019) Utilizing BERT for aspect-based sentiment analysis via constructing auxiliary sentence. Proc NAACL-HLT 380:385","journal-title":"Proc NAACL-HLT"},{"key":"5841_CR10","unstructured":"Kenton JDMWC, Toutanova LK (2019) Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of naacL-HLT, vol 1. p 2"},{"key":"5841_CR11","doi-asserted-by":"crossref","unstructured":"Xu N, Mao W (2017) Multisentinet: a deep semantic network for multimodal sentiment analysis. Proceedings of the 2017 ACM on Conference on Information and Knowledge Management, pp 2399\u20132402","DOI":"10.1145\/3132847.3133142"},{"key":"5841_CR12","doi-asserted-by":"crossref","unstructured":"Truong Q-T, Lauw HW (2019) VistaNet: visual aspect attention network for multimodal sentiment analysis. Proceedings of the Thirty-Third AAAI Conference on Artificial Intelligence and Thirty-First Innovative Applications of Artificial Intelligence Conference and Ninth AAAI Symposium on Educational Advances in Artificial Intelligence, pp 305\u2013312","DOI":"10.1609\/aaai.v33i01.3301305"},{"key":"5841_CR13","doi-asserted-by":"crossref","unstructured":"YU J, JIANG J (2019) Adapting BERT for target-oriented multimodal sentiment classification. Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, pp 5408\u20135414","DOI":"10.24963\/ijcai.2019\/751"},{"key":"5841_CR14","doi-asserted-by":"crossref","unstructured":"Xu N, Mao W, Chen G (2019) Multi-interactive memory network for aspect based multimodal sentiment analysis. Proc AAAI Conf Artif Intell 33(01):371\u2013378","DOI":"10.1609\/aaai.v33i01.3301371"},{"key":"5841_CR15","doi-asserted-by":"crossref","unstructured":"Khan Z, Fu Y (2021) Exploiting BERT for multimodal target sentiment classification through input space translation. Proceedings of the 29th ACM International Conference on Multimedia, pp 3034\u20133042","DOI":"10.1145\/3474085.3475692"},{"key":"5841_CR16","doi-asserted-by":"crossref","unstructured":"Carion N, Massa F, Synnaeve G, Usunier N, Kirillov A, Zagoruyko S (2020) End-to-end object detection with transformers. European Conference on Computer Vision, pp 213\u2013229","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"5841_CR17","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser \u0141, Polosukhin I (2017) Attention is all you need. Advances in Neural Information Processing Systems, 30"},{"key":"5841_CR18","doi-asserted-by":"crossref","unstructured":"Li Z, Sun Q, Guo Q, Wu H, Deng L, Zhang Q, \u2026, Chen Y (2021) Visual sentiment analysis based on image caption and adjective\u2013noun\u2013pair description. Soft Computing 1\u201313","DOI":"10.1007\/s00500-021-06530-6"},{"key":"5841_CR19","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2022.117575","volume":"204","author":"R Das","year":"2022","unstructured":"Das R, Singh TD (2022) A multi-stage multimodal framework for sentiment analysis of assamese in low resource setting. Expert Syst Appl 204:117575","journal-title":"Expert Syst Appl"},{"key":"5841_CR20","doi-asserted-by":"crossref","unstructured":"Lester B, Al-Rfou R, Constant N (2021) The power of scale for parameter-efficient prompt tuning. Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp 3045\u20133059","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"5841_CR21","doi-asserted-by":"crossref","unstructured":"Schick T, Sch\u00fctze H (2021) It\u2019s Not Just Size That Matters: Small language models are also few-shot learners. Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp 2339\u20132352","DOI":"10.18653\/v1\/2021.naacl-main.185"},{"key":"5841_CR22","doi-asserted-by":"crossref","unstructured":"Tam D, Menon RR, Bansal M, Srivastava S, Raffel C (2021) Improving and Simplifying Pattern Exploiting Training. Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp 4980\u20134991","DOI":"10.18653\/v1\/2021.emnlp-main.407"},{"key":"5841_CR23","unstructured":"Tunstall L, Reimers N, Jo UES, Bates L, Korat D, Wasserblat M, Pereg O (2022) Efficient few-shot learning without prompts. ArXiv Preprint ArXiv:2209.11055"},{"issue":"8","key":"5841_CR24","doi-asserted-by":"publisher","first-page":"8761","DOI":"10.1007\/s10489-022-03896-4","volume":"53","author":"R Song","year":"2023","unstructured":"Song R, Liu Z, Chen X, An H, Zhang Z, Wang X, Xu H (2023) Label prompt for multi-label text classification. Appl Intell 53(8):8761\u20138775","journal-title":"Appl Intell"},{"key":"5841_CR25","doi-asserted-by":"crossref","unstructured":"Hu S, Ding N, Wang H, Liu Z, Wang J, Li J, Wu W, Sun M (2022) Knowledgeable prompt-tuning: incorporating knowledge into prompt verbalizer for text classification. Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp 2225\u20132240","DOI":"10.18653\/v1\/2022.acl-long.158"},{"key":"5841_CR26","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"5841_CR27","doi-asserted-by":"crossref","unstructured":"Reimers N, Gurevych I (2019) Sentence-BERT: sentence embeddings using siamese BERT-Networks. Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp 3982\u20133992","DOI":"10.18653\/v1\/D19-1410"},{"key":"5841_CR28","first-page":"18661","volume":"33","author":"P Khosla","year":"2020","unstructured":"Khosla P, Teterwak P, Wang C, Sarna A, Tian Y, Isola P, Maschinot A, Liu C, Krishnan D (2020) Supervised contrastive learning. Adv Neural Inf Process Syst 33:18661\u201318673","journal-title":"Adv Neural Inf Process Syst"},{"issue":"1","key":"5841_CR29","doi-asserted-by":"publisher","DOI":"10.3390\/technologies9010002","volume":"9","author":"A Jaiswal","year":"2020","unstructured":"Jaiswal A, Babu AR, Zadeh MZ, Banerjee D, Makedon F (2020) A survey on contrastive self-supervised learning. Technologies 9(1):2","journal-title":"Technologies"},{"key":"5841_CR30","doi-asserted-by":"crossref","unstructured":"Zhang Q, Fu J, Liu X, Huang X (2018) Adaptive co-attention network for named entity recognition in tweets. Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence and Thirtieth Innovative Applications of Artificial Intelligence Conference and Eighth AAAI Symposium on Educational Advances in Artificial Intelligence, pp 5674\u20135681","DOI":"10.1609\/aaai.v32i1.11962"},{"key":"5841_CR31","doi-asserted-by":"crossref","unstructured":"Lu D, Neves L, Carvalho V, Zhang N, Ji H (2018) Visual attention model for name tagging in multimodal social media. Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp 1990\u20131999","DOI":"10.18653\/v1\/P18-1185"},{"key":"5841_CR32","doi-asserted-by":"crossref","unstructured":"Seoh R, Birle I, Tak M, Chang H-S, Pinette B, Hough A (2021) Open Aspect Target Sentiment Classification with Natural Language Prompts. Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pp 6311\u20136322","DOI":"10.18653\/v1\/2021.emnlp-main.509"},{"key":"5841_CR33","doi-asserted-by":"publisher","first-page":"47","DOI":"10.1016\/j.neucom.2021.05.040","volume":"455","author":"J Zhou","year":"2021","unstructured":"Zhou J, Zhao J, Huang JX, Hu QV, He L (2021) MASAD: a large-scale dataset for multimodal aspect-based sentiment analysis. Neurocomputing 455:47\u201358","journal-title":"Neurocomputing"},{"issue":"9","key":"5841_CR34","doi-asserted-by":"publisher","first-page":"2337","DOI":"10.1007\/s11263-022-01653-1","volume":"130","author":"K Zhou","year":"2022","unstructured":"Zhou K, Yang J, Loy CC, Liu Z (2022) Learning to prompt for vision-language models. Int J Comput Vision 130(9):2337\u20132348","journal-title":"Int J Comput Vision"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-024-05841-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-024-05841-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-024-05841-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,12]],"date-time":"2024-11-12T02:04:35Z","timestamp":1731377075000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-024-05841-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,25]]},"references-count":34,"journal-issue":{"issue":"24","published-print":{"date-parts":[[2024,12]]}},"alternative-id":["5841"],"URL":"https:\/\/doi.org\/10.1007\/s10489-024-05841-z","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,9,25]]},"assertion":[{"value":"1 September 2024","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 September 2024","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"Not applicable.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to participate"}},{"value":"Not applicable.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}