{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T14:23:07Z","timestamp":1774621387977,"version":"3.50.1"},"reference-count":47,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2026,2,22]],"date-time":"2026-02-22T00:00:00Z","timestamp":1771718400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T00:00:00Z","timestamp":1774569600000},"content-version":"vor","delay-in-days":33,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"name":"Symbiosis International"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Discov Artif Intell"],"DOI":"10.1007\/s44163-026-01023-7","type":"journal-article","created":{"date-parts":[[2026,2,22]],"date-time":"2026-02-22T03:23:58Z","timestamp":1771730638000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Context aware hierarchical alignment for robust multimodal three stream sentiment analysis"],"prefix":"10.1007","volume":"6","author":[{"given":"Mudigonda Krishna Siva","family":"Prasad","sequence":"first","affiliation":[]},{"given":"Manoj","family":"Pennada","sequence":"additional","affiliation":[]},{"given":"Kranthi Kumar","family":"Singamaneni","sequence":"additional","affiliation":[]},{"given":"S. K.","family":"Prakalya","sequence":"additional","affiliation":[]},{"given":"Ch","family":"Rushitha","sequence":"additional","affiliation":[]},{"given":"G. Bhavana","family":"Mallesh","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,2,22]]},"reference":[{"key":"1023_CR1","doi-asserted-by":"publisher","first-page":"1331642","DOI":"10.3389\/frsc.2024.1331642","volume":"6","author":"M Acharya","year":"2024","unstructured":"Acharya M, Mohbey KK. Recency-based spatio-temporal similarity exploration for POI recommendation in location-based social networks. Front Sustain Cities. 2024;6:1331642.","journal-title":"Front Sustain Cities"},{"key":"1023_CR2","doi-asserted-by":"publisher","first-page":"11584","DOI":"10.1109\/ACCESS.2024.3354934","volume":"12","author":"M Acharya","year":"2024","unstructured":"Acharya M, Mohbey KK, Rajput DS. Long-term preference mining with temporal and spatial fusion for point-of-interest recommendation. IEEE Access. 2024;12:11584\u201396.","journal-title":"IEEE Access"},{"key":"1023_CR3","doi-asserted-by":"crossref","unstructured":"Aipe A, Mukuntha NS, Ekbal A. Sentiment-aware recommendation system for healthcare using social media. In International conference on computational linguistics and intelligent text processing Cham: Springer Nature Switzerland. pp. 166\u2013181;2019","DOI":"10.1007\/978-3-031-24340-0_13"},{"key":"1023_CR4","doi-asserted-by":"crossref","unstructured":"Aipe A, Mukuntha NS, Ekbal A. Sentiment-aware recommendation system for healthcare using social media. In International conference on computational linguistics and intelligent text processing. (pp. 166\u2013181). Cham: Springer Nature Switzerland. 2019.","DOI":"10.1007\/978-3-031-24340-0_13"},{"key":"1023_CR5","doi-asserted-by":"publisher","first-page":"2126","DOI":"10.1038\/s41598-025-85859-6","volume":"15","author":"Y Cai","year":"2025","unstructured":"Cai Y, Li X, Zhang Y, et al. Multimodal sentiment analysis based on multi-layer feature fusion and multi-task learning. Sci Rep. 2025;15:2126.","journal-title":"Sci Rep"},{"issue":"7","key":"1023_CR6","doi-asserted-by":"publisher","first-page":"934","DOI":"10.3390\/sym16070934","volume":"16","author":"Q Chen","year":"2024","unstructured":"Chen Q, Dong S, Wang P. Advanced multimodal sentiment analysis with enhanced contextual fusion and robustness (AMSA-ECFR): symmetry in feature integration and data alignment. Symmetry. 2024;16(7):934.","journal-title":"Symmetry"},{"issue":"7","key":"1023_CR7","doi-asserted-by":"publisher","first-page":"934","DOI":"10.3390\/sym16070934","volume":"16","author":"Q Chen","year":"2024","unstructured":"Chen Q, Dong S, Wang P. Advanced multimodal sentiment analysis with enhanced contextual fusion and robustness (AMSA-ECFR): symmetry in feature integration and data alignment. Symmetry. 2024;16(7):934.","journal-title":"Symmetry"},{"issue":"13s","key":"1023_CR8","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3586075","volume":"55","author":"R Das","year":"2023","unstructured":"Das R, Singh TD. Multimodal sentiment analysis: a survey of methods, trends, and challenges. ACM Comput Surv. 2023;55(13s):1\u201338.","journal-title":"ACM Comput Surv"},{"key":"1023_CR9","unstructured":"Devamanyu H, Roger Z, Soujanya P. MISA: Modality-invariant and -specific representations for multimodal sentiment analysis. arXiv preprint arXiv:2005.03545. 2020. Available from: https:\/\/arxiv.org\/abs\/2005.03545."},{"key":"1023_CR10","doi-asserted-by":"crossref","unstructured":"Fang L, Liu G, Zhang R. Sense-aware Bert and multi-task fine-tuning for multimodal sentiment analysis. In 2022 International joint conference on neural networks (IJCNN) 2022;1\u20138.","DOI":"10.1109\/IJCNN55064.2022.9892116"},{"key":"1023_CR11","doi-asserted-by":"crossref","unstructured":"Faria FTJ, Baniata LH, Baniata MH, Khair MA, Ata AIB, Bunterngchit C, et al. SentimentFormer a transformer-based multi-modal fusion framework for enhanced sentiment analysis of memes in under-resourced Bangla Language. 2025;14(4):799.","DOI":"10.3390\/electronics14040799"},{"issue":"1","key":"1023_CR12","doi-asserted-by":"publisher","first-page":"22270","DOI":"10.1038\/s41598-024-73452-2","volume":"14","author":"R Geethanjali","year":"2024","unstructured":"Geethanjali R, Valarmathi A. A novel hybrid deep learning IChOA-CNN-LSTM model for modality-enriched and multilingual emotion recognition in social media. Sci Rep. 2024;14(1):22270.","journal-title":"Sci Rep"},{"key":"1023_CR13","doi-asserted-by":"publisher","DOI":"10.1016\/j.compeleceng.2025.110627","volume":"127","author":"S Han","year":"2025","unstructured":"Han S, Li F, Han X, Zhang S. A novel feature engineering method for network anomaly detection. Comput Electr Eng. 2025;127:110627.","journal-title":"Comput Electr Eng"},{"key":"1023_CR14","unstructured":"http:\/\/multicomp.cs.cmu.edu\/resources\/cmu-mosei-dataset"},{"key":"1023_CR15","unstructured":"http:\/\/multicomp.cs.cmu.edu\/resources\/cmu-mosi-dataset"},{"issue":"16","key":"1023_CR16","doi-asserted-by":"publisher","first-page":"3504","DOI":"10.3390\/electronics12163504","volume":"12","author":"J Huang","year":"2023","unstructured":"Huang J, Lu P, Sun S, Wang F. Multimodal sentiment analysis in realistic environments based on cross-modal hierarchical fusion network. Electronics. 2023;12(16):3504.","journal-title":"Electronics"},{"issue":"16","key":"1023_CR17","doi-asserted-by":"publisher","first-page":"3504","DOI":"10.3390\/electronics12163504","volume":"12","author":"J Huang","year":"2023","unstructured":"Huang J, Lu P, Sun S, Wang F. Multimodal sentiment analysis in realistic environments based on cross-modal hierarchical fusion network. Electronics. 2023;12(16):3504.","journal-title":"Electronics"},{"key":"1023_CR18","unstructured":"Hubert TY-H, Shaojie B, Liang Paul P, Zico KJ, Louis-Philippe M, Ruslan S. Multimodal transformer for unaligned multimodal language sequences. arXiv preprint arXiv:1906.00295. 2019. Available from: https:\/\/arxiv.org\/abs\/1906.00295."},{"key":"1023_CR19","doi-asserted-by":"crossref","unstructured":"Hu A, Flaxman S. Multimodal sentiment analysis to explore the structure of emotions. In proceedings of the 24th ACM SIGKDD international conference on knowledge discovery & data mining 2018;350\u20138.","DOI":"10.1145\/3219819.3219853"},{"issue":"4","key":"1023_CR20","doi-asserted-by":"publisher","first-page":"330","DOI":"10.1016\/j.jksues.2016.04.002","volume":"30","author":"DMEDM Hussein","year":"2018","unstructured":"Hussein DMEDM. A survey on sentiment analysis challenges. J King Saud Univ Eng Sci. 2018;30(4):330\u20138.","journal-title":"J King Saud Univ Eng Sci"},{"key":"1023_CR21","unstructured":"Junyoung C, Caglar G, Kyunghyun C, Yoshua B. Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555. 2014. Available from: https:\/\/arxiv.org\/abs\/1412.3555"},{"key":"1023_CR22","doi-asserted-by":"publisher","first-page":"38","DOI":"10.4018\/IJSSMET.2019040103","volume":"10","author":"R Kaur","year":"2019","unstructured":"Kaur R, Kautish S. Multimodal sentiment analysis: a survey and comparison. Int J Serv Sci Manag Eng Technol. 2019;10:38\u201358. https:\/\/doi.org\/10.4018\/IJSSMET.2019040103.","journal-title":"Int J Serv Sci Manag Eng Technol"},{"key":"1023_CR23","doi-asserted-by":"publisher","first-page":"24103","DOI":"10.1007\/s11042-019-7390-1","volume":"78","author":"A Kumar","year":"2019","unstructured":"Kumar A, Garg G. Sentiment analysis of multimodal twitter data. Multimedia Tools Appl. 2019;78:24103\u201319.","journal-title":"Multimedia Tools Appl"},{"key":"1023_CR24","doi-asserted-by":"crossref","unstructured":"Kyunghyun C, Bart VM, Caglar G, Dzmitry B, Fethi B, Holger S, et al. Learning phrase representations using RNN encoder-decoder for statistical machine translation. In: Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP). 2014;1724\u201334.","DOI":"10.3115\/v1\/D14-1179"},{"key":"1023_CR25","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2024.111457","volume":"287","author":"Y Li","year":"2024","unstructured":"Li Y, Li Y, Zhang S, Liu G, Chen Y, Shang R, et al. An attention-based, context-aware multimodal fusion method for sarcasm detection using inter-modality inconsistency. Knowl-Based Syst. 2024;287:111457.","journal-title":"Knowl-Based Syst"},{"key":"1023_CR26","doi-asserted-by":"crossref","unstructured":"Liu Y, Yu J, Chu Z, Li S, Du X. Unsupervised Dual-discriminative graph neural network for anomaly detection. In 2025 international joint conference on neural networks (IJCNN) (pp. 1\u20139). IEEE. 2025.","DOI":"10.1109\/IJCNN64981.2025.11228687"},{"key":"1023_CR27","doi-asserted-by":"crossref","unstructured":"Li J, Wang S, Chao Y, Liu X, Meng H. Context-aware multimodal fusion for emotion recognition. In INTERSPEECH. 2022;2013\u20137.","DOI":"10.21437\/Interspeech.2022-10592"},{"key":"1023_CR28","doi-asserted-by":"crossref","unstructured":"Lopes V, Gaspar A, Alexandre LA, Cordeiro J. An AutoML-based approach to multimodal image sentiment analysis. In 2021 International joint conference on neural networks (IJCNN). IEEE. 2021;1\u20139.","DOI":"10.1109\/IJCNN52387.2021.9533552"},{"key":"1023_CR29","doi-asserted-by":"publisher","first-page":"124","DOI":"10.1016\/j.knosys.2018.07.041","volume":"161","author":"N Majumder","year":"2018","unstructured":"Majumder N, Hazarika D, Gelbukh A, Cambria E, Poria S. Multimodal sentiment analysis using hierarchical fusion with context modeling. Knowl-Based Syst. 2018;161:124\u201333.","journal-title":"Knowl-Based Syst"},{"issue":"6","key":"1023_CR30","first-page":"1","volume":"16","author":"J Mu","year":"2024","unstructured":"Mu J, Wang W, Liu W, Yan T, Wang G. Multimodal large language model with LoRA fine-tuning for multimodal sentiment analysis. ACM Trans Intell Sys Technol. 2024;16(6):1\u201323.","journal-title":"ACM Trans Intell Sys Technol"},{"key":"1023_CR31","doi-asserted-by":"crossref","unstructured":"Nguyen KN, Le-Duc K, Tat BP, Le D, Vo-Dang L, Hy TS. Sentiment reasoning for healthcare. 2024. arXiv preprint arXiv:2407.21054","DOI":"10.18653\/v1\/2025.acl-industry.82"},{"issue":"2","key":"1023_CR32","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s42001-025-00374-y","volume":"8","author":"Z Pakdaman","year":"2025","unstructured":"Pakdaman Z, Koochari A, Sharifi A. Content-aware sentiment understanding: cross-modal analysis with encoder-decoder architectures. J Comput Soc Sci. 2025;8(2):1\u201324.","journal-title":"J Comput Soc Sci"},{"issue":"18","key":"1023_CR33","first-page":"54249","volume":"83","author":"A Paul","year":"2024","unstructured":"Paul A, Nayyar A. A context-sensitive multi-tier deep learning framework for multimodal sentiment analysis. Multimedia Tools Appl. 2024;83(18):54249\u201378.","journal-title":"Multimedia Tools Appl"},{"issue":"1","key":"1023_CR34","doi-asserted-by":"publisher","first-page":"104","DOI":"10.1007\/s13278-022-00910-y","volume":"12","author":"S Sivakumar","year":"2022","unstructured":"Sivakumar S, Rajalakshmi R. Context-aware sentiment analysis with attention-enhanced features from bidirectional transformers. Soc Netw Anal Min. 2022;12(1):104.","journal-title":"Soc Netw Anal Min"},{"issue":"1","key":"1023_CR35","doi-asserted-by":"publisher","first-page":"325","DOI":"10.1146\/annurev-linguistics-011415-040518","volume":"2","author":"M Taboada","year":"2016","unstructured":"Taboada M. Sentiment analysis: an overview from linguistics. Annual Review of Linguist. 2016;2(1):325\u201347.","journal-title":"Annual Review of Linguist"},{"key":"1023_CR36","first-page":"469","volume-title":"Multimodal sentiment analysis of social media content and its impact on mental wellbeing&association for computing machinery","author":"R Thareja","year":"2024","unstructured":"Thareja R. Multimodal sentiment analysis of social media content and its impact on mental wellbeing&association for computing machinery. New York: NY, USA; 2024. p. 469\u201373."},{"key":"1023_CR37","first-page":"2514","volume":"2020","author":"Z Wang","year":"2020","unstructured":"Wang Z, Wan Z, Wan X. Transmodality: an end2end fusion method with transformer for multimodal sentiment analysis. Proceed Web Conf. 2020;2020:2514\u201320.","journal-title":"Proceed Web Conf"},{"issue":"5","key":"1023_CR38","doi-asserted-by":"publisher","first-page":"2679","DOI":"10.3390\/s23052679","volume":"23","author":"H Wang","year":"2023","unstructured":"Wang H, Li X, Ren Z, Wang M, Ma C. Multimodal sentiment analysis representations learning via contrastive learning with condense attention fusion. Sensors. 2023;23(5):2679. https:\/\/doi.org\/10.3390\/s23052679.","journal-title":"Sensors"},{"issue":"5","key":"1023_CR39","doi-asserted-by":"publisher","first-page":"2679","DOI":"10.3390\/s23052679","volume":"23","author":"H Wang","year":"2023","unstructured":"Wang H, Li X, Ren Z, Wang M, Ma C. Multimodal sentiment analysis representations learning via contrastive learning with condense attention fusion. Sensors. 2023;23(5):2679.","journal-title":"Sensors"},{"key":"1023_CR40","unstructured":"Wasifur R, Hasan Md, Kamrul LS, Amir Z, Chengfeng M, Louis-Philippe M, et al. Integrating multimodal information in large pretrained transformers. arXiv preprint arXiv:1908.05787. 2019. Available from: https:\/\/arxiv.org\/abs\/1908.05787."},{"key":"1023_CR41","unstructured":"Wu Z, Gong Z, Koo J, Hirschberg J. Multimodal multi-loss fusion network for sentiment analysis. 2023. arXiv preprint arXiv:2308.00264."},{"key":"1023_CR42","doi-asserted-by":"publisher","DOI":"10.1088\/1742-6596\/1748\/3\/032054","volume":"1748","author":"Y Xing","year":"2021","unstructured":"Xing Y, Changhui L, Xiaodong F. Sentiment analysis based on BiGRU information enhancement. J Phys: Conf Ser. 2021;1748:032054. https:\/\/doi.org\/10.1088\/1742-6596\/1748\/3\/032054.","journal-title":"J Phys: Conf Ser"},{"key":"1023_CR43","doi-asserted-by":"crossref","unstructured":"Xueming Y, Haiwei X, Shengyi J, Ziang L. Multimodal sentiment analysis using multi-tensor fusion network with cross-modal modeling. Appl Artif Intell. 2021 Available from: https:\/\/doi.org\/10.1080\/08839514.2021.2000688.","DOI":"10.1080\/08839514.2021.2000688"},{"key":"1023_CR44","unstructured":"Yangmin L, Ruiqi Z, Wengen L. CorMulT: A semi-supervised modality correlation-aware multimodal transformer for sentiment analysis. arXiv preprint arXiv:2407.07046. 2024. Available from: https:\/\/arxiv.org\/abs\/2407.07046."},{"key":"1023_CR45","doi-asserted-by":"publisher","first-page":"0081","DOI":"10.34133\/icomputing.0081","volume":"3","author":"G Yi","year":"2024","unstructured":"Yi G, Fan C, Tao J, Lv Z, Wen Z, Pei G, et al. A two-stage stacked transformer framework for multimodal sentiment analysis. Intell Comput. 2024;3:0081.","journal-title":"Intell Comput"},{"key":"1023_CR46","unstructured":"Yu Wenmeng X, Hua YZ, Jiele W. Learning Modality-specific representations with self-supervised multi-task learning for multimodal sentiment analysis. arXiv preprint arXiv:2102.04830. 2021. Available from: https:\/\/arxiv.org\/abs\/2102.04830."},{"key":"1023_CR47","doi-asserted-by":"crossref","unstructured":"Zang X, Yang B, Liu X, Li A. Dnea: dynamic network embedding method for anomaly detection. In International conference on knowledge science engineering and management. (pp. 236\u2013248). Cham: Springer International Publishing. 2021.","DOI":"10.1007\/978-3-030-82153-1_20"}],"container-title":["Discover Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s44163-026-01023-7","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s44163-026-01023-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s44163-026-01023-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T13:24:45Z","timestamp":1774617885000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s44163-026-01023-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2,22]]},"references-count":47,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2026,12]]}},"alternative-id":["1023"],"URL":"https:\/\/doi.org\/10.1007\/s44163-026-01023-7","relation":{},"ISSN":["2731-0809"],"issn-type":[{"value":"2731-0809","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,2,22]]},"assertion":[{"value":"5 November 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 February 2026","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 February 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"Not applicable.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Clinical trial number"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to publication"}},{"value":"This study used only publicly available benchmark datasets and therefore did not require separate institutional ethical approval.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval and consent to participate"}},{"value":"The authors declare no competing interests..","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing of Interests"}}],"article-number":"261"}}