{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T08:05:42Z","timestamp":1776067542765,"version":"3.50.1"},"reference-count":57,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2025,2,28]],"date-time":"2025-02-28T00:00:00Z","timestamp":1740700800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,2,28]],"date-time":"2025-02-28T00:00:00Z","timestamp":1740700800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62337001"],"award-info":[{"award-number":["62337001"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100022963","name":"Key Research and Development Program of Zhejiang Province","doi-asserted-by":"publisher","award":["2022C03106"],"award-info":[{"award-number":["2022C03106"]}],"id":[{"id":"10.13039\/100022963","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Complex Intell. Syst."],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1007\/s40747-025-01806-y","type":"journal-article","created":{"date-parts":[[2025,2,28]],"date-time":"2025-02-28T05:59:05Z","timestamp":1740722345000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":10,"title":["$$\\text {H}^2\\text {CAN}$$: heterogeneous hypergraph attention network with counterfactual learning for multimodal sentiment analysis"],"prefix":"10.1007","volume":"11","author":[{"given":"Changqin","family":"Huang","sequence":"first","affiliation":[]},{"given":"Zhenheng","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Qionghao","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Xiaodi","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Fan","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Jili","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,28]]},"reference":[{"issue":"3","key":"1806_CR1","volume":"36","author":"Z Li","year":"2024","unstructured":"Li Z, Zou Z (2024) Punctuation and lexicon aid representation: a hybrid model for short text sentiment analysis on social media platform. J King Saud Univ Comput Inf Sci 36(3):102010","journal-title":"J King Saud Univ Comput Inf Sci"},{"issue":"5","key":"1806_CR2","volume":"11","author":"G Chandrasekaran","year":"2021","unstructured":"Chandrasekaran G, Nguyen TN, Hemanth DJ (2021) Multimodal sentimental analysis for social media applications: a comprehensive review. Wiley Interdiscip Rev: Data Min Knowl Discov 11(5):e1415","journal-title":"Wiley Interdiscip Rev: Data Min Knowl Discov"},{"key":"1806_CR3","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2023.111346","volume":"285","author":"J Huang","year":"2024","unstructured":"Huang J, Zhou J, Tang Z, Lin J, Chen CYC (2024) TMBL: transformer-based multimodal binding learning model for multimodal sentiment analysis. Knowl Based Syst 285:111346","journal-title":"Knowl Based Syst"},{"key":"1806_CR4","unstructured":"E, Morency LP (2017) Tensor fusion network for multimodal sentiment analysis. In: Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing. Pittsburgh: Association for Computational Linguistics, pp 1103\u20131114"},{"key":"1806_CR5","doi-asserted-by":"crossref","unstructured":"Liu Z, Shen Y, Lakshminarasimhan VB, Liang PP, Zadeh A, 853 Morency LP (2018) Efficient low-rank multimodal fusion with modality-specific factors. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Pittsburgh: Association for Computational Linguistics, pp 2247\u20132256","DOI":"10.18653\/v1\/P18-1209"},{"key":"1806_CR6","doi-asserted-by":"crossref","unstructured":"Zadeh A, Liang PP, Poria S, Vij P, Cambria E, Morency LP (2018) Multi-attention recurrent network for human communication comprehension. In: In Proceedings of the AAAI Conference on Artificial Intelligence. Palo Alto: AAAI Press, pp 5642\u20135649","DOI":"10.1609\/aaai.v32i1.12024"},{"key":"1806_CR7","doi-asserted-by":"crossref","first-page":"424","DOI":"10.1016\/j.inffus.2022.09.025","volume":"91","author":"A Gandhi","year":"2023","unstructured":"Gandhi A, Adhvaryu K, Poria S, Cambria E, Hussain A (2023) Multimodal sentiment analysis: a systematic review of history, datasets, multimodal fusion methods, applications, challenges and future directions. Inf Fusion 91:424\u2013444","journal-title":"Inf Fusion"},{"key":"1806_CR8","doi-asserted-by":"crossref","unstructured":"Tsai YHH, Bai S, Liang PP, Kolter JZ, Morency LP, Salakhutdinov R (2019) Multimodal transformer for unaligned multimodal language sequences. In: Proceedings of the conference. Association for computational linguistics. Meeting, vol 2019. New York: NIH Public Access, p 6558","DOI":"10.18653\/v1\/P19-1656"},{"key":"1806_CR9","volume":"136","author":"D Wang","year":"2023","unstructured":"Wang D, Guo X, Tian Y, Liu J, He L, Luo X (2023) TETFN: a text enhanced transformer fusion network for multimodal sentiment analysis. Pattern Recognit 136:109259","journal-title":"Pattern Recognit"},{"key":"1806_CR10","doi-asserted-by":"crossref","unstructured":"Xiao L, Wu X, Wu W, Yang J, He L (2022) Multi-channel attentive graph convolutional network with sentiment fusion for multimodal sentiment analysis. In: ICASSP 2022-2022 IEEE international conference on acoustics, speech and signal processing (ICASSP). New York: IEEE, pp 4578\u20134582","DOI":"10.1109\/ICASSP43922.2022.9747542"},{"key":"1806_CR11","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2023.126992","volume":"565","author":"J Huang","year":"2024","unstructured":"Huang J, Pu Y, Zhou D, Cao J, Gu J, Zhao Z et al (2024) Dynamic hypergraph convolutional network for multimodal sentiment analysis. Neurocomputing 565:126992","journal-title":"Neurocomputing"},{"key":"1806_CR12","doi-asserted-by":"crossref","first-page":"133","DOI":"10.1007\/s10919-019-00293-3","volume":"43","author":"D Keltner","year":"2019","unstructured":"Keltner D, Sauter D, Tracy J, Cowen A (2019) Emotional expression: advances in basic emotion theory. J Nonverbal Behav 43:133\u2013160","journal-title":"J Nonverbal Behav"},{"key":"1806_CR13","doi-asserted-by":"crossref","first-page":"296","DOI":"10.1016\/j.inffus.2022.07.006","volume":"88","author":"F Zhang","year":"2022","unstructured":"Zhang F, Li XC, Lim CP, Hua Q, Dong CR, Zhai JH (2022) Deep emotional arousal network for multimodal sentiment analysis and emotion recognition. Inf Fusion 88:296\u2013304","journal-title":"Inf Fusion"},{"key":"1806_CR14","doi-asserted-by":"crossref","unstructured":"Rao Y, Chen G, Lu J, Zhou J (2021) Counterfactual attention learning for fine-grained visual categorization and re-identification. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision. New York: IEEE, pp 1025\u20131034","DOI":"10.1109\/ICCV48922.2021.00106"},{"key":"1806_CR15","doi-asserted-by":"crossref","unstructured":"Zhu Y, Chen Z, Wu F (2019) Multimodal deep denoise framework for affective video content analysis. In: Proceedings of the 27th ACM International Conference on Multimedia. New York: Association for Computing Machinery, pp 130\u2013138","DOI":"10.1145\/3343031.3350997"},{"key":"1806_CR16","doi-asserted-by":"crossref","unstructured":"Kotsia I, Pitas I (2005) Real time facial expression recognition from image sequences using support vector machines. In: IEEE international conference on image processing 2005, vol 2. New York: IEEE, pp II\u2013966","DOI":"10.1109\/ICIP.2005.1530218"},{"key":"1806_CR17","doi-asserted-by":"crossref","unstructured":"Wang Y, Shen Y, Liu Z, Liang PP, Zadeh A, Morency LP (2019) Words can shift: dynamically adjusting word representations using nonverbal behaviors. In: Proceedings of the AAAI conference on artificial intelligence, vol 33, Palo Alto: AAAI Press, pp 7216\u20137223","DOI":"10.1609\/aaai.v33i01.33017216"},{"key":"1806_CR18","doi-asserted-by":"crossref","unstructured":"Yu W, Xu H, Yuan Z, Wu J (2021) Learning modality-specific representations with self-supervised multi-task learning for multimodal sentiment analysis. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol 35, Palo Alto: AAAI Press, pp 10790\u201310797","DOI":"10.1609\/aaai.v35i12.17289"},{"issue":"5","key":"1806_CR19","doi-asserted-by":"crossref","first-page":"82","DOI":"10.1109\/MIS.2024.3441408","volume":"39","author":"X Zhang","year":"2024","unstructured":"Zhang X, Wang Z, Cao G, Ho SB (2024) Joint weakly supervised image emotion analysis based on interclass discrimination and interclass correlation. IEEE Intell Syst 39(5):82\u201389","journal-title":"IEEE Intell Syst"},{"issue":"2","key":"1806_CR20","doi-asserted-by":"crossref","first-page":"2869","DOI":"10.1007\/s40747-023-01296-w","volume":"10","author":"H Zhang","year":"2024","unstructured":"Zhang H, Liu Y, Xiong Z, Wu Z, Xu D (2024) Visual sentiment analysis with semantic correlation enhancement. Complex Intell Syst 10(2):2869\u20132881","journal-title":"Complex Intell Syst"},{"key":"1806_CR21","doi-asserted-by":"crossref","unstructured":"Yang J, Yu Y, Niu D, Guo W, Xu Y (2023) ConFEDE: contrastive feature decomposition for multimodal sentiment analysis. In: Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics, vol 1 (long papers), Pittsburgh: Association for Computational Linguistics, pp 7617\u20137630","DOI":"10.18653\/v1\/2023.acl-long.421"},{"key":"1806_CR22","doi-asserted-by":"crossref","unstructured":"Hazarika D, Zimmermann R, Poria S (2020) Misa: modalityinvariant and-specific representations for multimodal sentiment analysis. In: Proceedings of the 28th ACM International Conference on Multimedia, New York: Association for Computing Machinery, pp 1122\u20131131","DOI":"10.1145\/3394171.3413678"},{"key":"1806_CR23","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2023.110502","volume":"269","author":"C Huang","year":"2023","unstructured":"Huang C, Zhang J, Wu X, Wang Y, Li M, Huang X (2023) TeFNA: text-centered fusion network with crossmodal attention for multimodal sentiment analysis. Knowl Based Syst 269:110502","journal-title":"Knowl Based Syst"},{"key":"1806_CR24","doi-asserted-by":"crossref","unstructured":"Gan C, Tang Y, Fu X, Zhu Q, Jain DK, Garc\u00eda S (2024) Video multimodal sentiment analysis using cross-modal feature translation and dynamical propagation. Knowledge-Based Systems, 299: 111982","DOI":"10.1016\/j.knosys.2024.111982"},{"issue":"3","key":"1806_CR25","volume":"61","author":"L Wang","year":"2024","unstructured":"Wang L, Peng J, Zheng C, Zhao T et al (2024) A cross modal hierarchical fusion multimodal sentiment analysis method based on multi-task learning. Inf Process Manag 61(3):103675","journal-title":"Inf Process Manag"},{"key":"1806_CR26","doi-asserted-by":"crossref","unstructured":"Yang J, Wang Y, Yi R, Zhu Y, Rehman A, Zadeh A et al (2020) MTAG: modal-temporal attention graph for unaligned human multimodal language sequences. In: Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. Pittsburgh: Association for Computational Linguistics, pp 1009\u20131021","DOI":"10.18653\/v1\/2021.naacl-main.79"},{"key":"1806_CR27","doi-asserted-by":"crossref","unstructured":"Feng Y, You H, Zhang Z, Ji R, Gao Y (2019) Hypergraph neural networks. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol 33, Palo Alto: AAAI Press, pp 3558\u20133565","DOI":"10.1609\/aaai.v33i01.33013558"},{"key":"1806_CR28","doi-asserted-by":"crossref","unstructured":"Kim ES, Kang WY, On KW, Heo YJ, Zhang BT (2020) Hypergraph attention networks for multimodal learning. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, New York: IEEE, pp 14581\u201314590","DOI":"10.1109\/CVPR42600.2020.01459"},{"key":"1806_CR29","unstructured":"Li Q, Ji C, Guo S, Zhao Y, Mao Q, Wang S et al (2024) Variational multi-modal hypergraph attention network for multi-modal relation extraction. In: Proceedings of the ACM International Conference on Multimedia 2024. New York: Association for Computing Machinery, pp 1\u201311"},{"key":"1806_CR30","doi-asserted-by":"crossref","unstructured":"Khan B, Wu J, Yang J, Ma X (2023) Heterogeneous hypergraph neural network for social recommendation using attention network. In: ACM Transactions on Recommender Systems. New York: Association for Computing Machinery, pp 1\u201323","DOI":"10.1145\/3613964"},{"issue":"13s","key":"1806_CR31","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3586075","volume":"55","author":"R Das","year":"2023","unstructured":"Das R, Singh TD (2023) Multimodal sentiment analysis: a survey of methods, trends, and challenges. ACM Comput Surv 55(13s):1\u201338","journal-title":"ACM Comput Surv"},{"key":"1806_CR32","doi-asserted-by":"crossref","unstructured":"Li S, Guo D, Liu K, Hong R, Xue F (2023) Multimodal counterfactual learning network for multimedia-based recommendation. In: Proceedings of the 46th international ACM SIGIR Conference on Research and Development in Information Retrieval. New York: Association for Computing Machinery, pp 1539\u20131548","DOI":"10.1145\/3539618.3591739"},{"key":"1806_CR33","doi-asserted-by":"crossref","unstructured":"Zhang X, Wang Z, Wang H, Xiang J, Wu C, Cao G (2024) CausVSR: causality inspired visual sentiment recognition. In: Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence. San Mateo, CA: Morgan Kaufmann Publishers Inc., pp 3196\u20133204","DOI":"10.24963\/ijcai.2024\/354"},{"key":"1806_CR34","doi-asserted-by":"crossref","unstructured":"Sun T,WangW, Jing L, Cui Y, Song X, Nie L (2022) Counterfactual reasoning for out-of-distribution multimodal sentiment analysis. In: Proceedings of the 30th ACM International Conference on Multimedia. New York: Association for Computing Machinery, pp 15\u201323","DOI":"10.1145\/3503161.3548211"},{"key":"1806_CR35","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2024.102725","volume":"114","author":"C Huang","year":"2025","unstructured":"Huang C, Chen J, Huang Q, Wang S, Tu Y, Huang X (2025) AtCAF: attention-based causality-aware fusion network for multimodal sentiment analysis. Inf Fusion 114:102725","journal-title":"Inf Fusion"},{"key":"1806_CR36","unstructured":"Devlin J, Chang MW, Lee K, Toutanova K (2018) Bert: pre-training 960 of deep bidirectional transformers for language understanding. In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. Pittsburgh: Association for Computational Linguistics, pages 4171\u20134186"},{"key":"1806_CR37","doi-asserted-by":"crossref","unstructured":"Zhu Q, Yeh MC, Cheng KT, Avidan S (2006) Fast human detection using a cascade of histograms of oriented gradients. In: 2006 IEEE computer society conference on computer vision and pattern recognition (CVPR\u201906), vol 2. New York: IEEE, pp 1491\u20131498","DOI":"10.1109\/CVPR.2006.119"},{"key":"1806_CR38","doi-asserted-by":"crossref","unstructured":"Degottex G, Kane J, Drugman T, Raitio T, Scherer S (2014) COVAREP\u2014a collaborative voice analysis repository for speech technologies. In: 2014 IEEE international conference on acoustics, speech and signal processing (ICASSP). New York: IEEE, pp 960\u2013964","DOI":"10.1109\/ICASSP.2014.6853739"},{"key":"1806_CR39","unstructured":"Huang Z, Xu W, Yu K (2015) Bidirectional LSTM-CRF models for sequence tagging. arXiv preprint. arXiv:1508.01991"},{"key":"1806_CR40","unstructured":"Vaswani A (2017) Attention is all you need. In: Advances in Neural Information Processing Systems. San Mateo: Morgan Kaufmann Pubs, pp 1\u201312"},{"key":"1806_CR41","volume":"571","author":"Y Fu","year":"2024","unstructured":"Fu Y, Zhang Z, Yang R, Yao C (2024) Hybrid cross-modal interaction learning for multimodal sentiment analysis. Neurocomputing 571:127201","journal-title":"Neurocomputing"},{"key":"1806_CR42","first-page":"5812","volume":"33","author":"Y You","year":"2020","unstructured":"You Y, Chen T, Sui Y, Chen T, Wang Z, Shen Y (2020) Graph contrastive learning with augmentations. Adv Neural Inf Process Syst 33:5812\u20135823","journal-title":"Adv Neural Inf Process Syst"},{"key":"1806_CR43","doi-asserted-by":"crossref","DOI":"10.1017\/CBO9781139979573","volume-title":"Cause and correlation in biology: a user\u2019s guide to path analysis, structural equations and causal inference with R","author":"B Shipley","year":"2016","unstructured":"Shipley B (2016) Cause and correlation in biology: a user\u2019s guide to path analysis, structural equations and causal inference with R. Cambridge University Press, Cambridge"},{"key":"1806_CR44","doi-asserted-by":"crossref","unstructured":"Pearl J (2022) Direct and indirect effects. In: Probabilistic and causal inference: the works of Judea Pearl. New York: Association for Computing Machinery, pp 373\u2013392","DOI":"10.1145\/3501714.3501736"},{"key":"1806_CR45","doi-asserted-by":"crossref","unstructured":"Chang CH, Adam GA, Goldenberg A (2021) Towards robust classification model by counterfactual and invariant data generation. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. New York: IEEE, pp 15212\u201315221","DOI":"10.1109\/CVPR46437.2021.01496"},{"key":"1806_CR46","volume-title":"Explanation in causal inference: methods for mediation and interaction","author":"T VanderWeele","year":"2015","unstructured":"VanderWeele T (2015) Explanation in causal inference: methods for mediation and interaction. Oxford University Press, Oxford"},{"issue":"6","key":"1806_CR47","doi-asserted-by":"crossref","first-page":"82","DOI":"10.1109\/MIS.2016.94","volume":"31","author":"A Zadeh","year":"2016","unstructured":"Zadeh A, Zellers R, Pincus E, Morency LP (2016) Multimodal sentiment intensity analysis in videos: facial gestures and verbal messages. IEEE Intell Syst 31(6):82\u201388","journal-title":"IEEE Intell Syst"},{"key":"1806_CR48","doi-asserted-by":"crossref","unstructured":"Zadeh AB, Liang PP, Poria S, Cambria E, Morency LP (2018) Multimodal language analysis in the wild: CMU-MOSEI dataset and interpretable dynamic fusion graph. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, vol 1 (long papers). Pittsburgh: Association for Computational Linguistics, pp 2236\u20132246","DOI":"10.18653\/v1\/P18-1208"},{"key":"1806_CR49","doi-asserted-by":"crossref","unstructured":"Yu W, Xu H, Meng F, Zhu Y, Ma Y, Wu J et al (2020) CH-SIMS: a Chinese multimodal sentiment analysis dataset with fine-grained annotation of modality. In: Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics. Pittsburgh: Association for Computational Linguistics, pp 3718\u20133727","DOI":"10.18653\/v1\/2020.acl-main.343"},{"key":"1806_CR50","volume":"283","author":"G Yi","year":"2024","unstructured":"Yi G, Fan C, Zhu K, Lv Z, Liang S, Wen Z et al (2024) VLP2MSA: expanding vision-language pre-training to multimodal sentiment analysis. Knowl Based Syst 283:111136","journal-title":"Knowl Based Syst"},{"key":"1806_CR51","unstructured":"Kingma DP, Ba J (2014) Adam: a method for stochastic optimization. arXiv preprint. arXiv:1412.6980"},{"key":"1806_CR52","unstructured":"Tsai YHH, Liang PP, Zadeh A, Morency LP, Salakhutdinov R (2018) Learning factorized multimodal representations. arXiv preprint. arXiv:1806.06176"},{"key":"1806_CR53","doi-asserted-by":"crossref","unstructured":"Rahman W, Hasan MK, Lee S, Zadeh A, Mao C, Morency LP et al (2020) Integrating multimodal information in large pretrained transformers. In: Proceedings of the conference. Association for computational linguistics. Meeting, vol 2020. New York: NIH Public Access, p 2359","DOI":"10.18653\/v1\/2020.acl-main.214"},{"key":"1806_CR54","doi-asserted-by":"crossref","unstructured":"Han W, Chen H, Gelbukh A, Zadeh A, Morency L, Poria S (2021) Bi-bimodal modality fusion for correlation-controlled multimodal sentiment analysis. In: Proceedings of the 2021 International Conference on Multimodal Interaction. New York: Association for Computing Machinery, pp 6\u201315","DOI":"10.1145\/3462244.3479919"},{"key":"1806_CR55","doi-asserted-by":"crossref","first-page":"4121","DOI":"10.1109\/TMM.2022.3171679","volume":"25","author":"S Mai","year":"2022","unstructured":"Mai S, Zeng Y, Hu H (2022) Multimodal information bottleneck: learning minimal sufficient unimodal and multimodal representations. IEEE Trans Multimed 25:4121\u20134134","journal-title":"IEEE Trans Multimed"},{"key":"1806_CR56","doi-asserted-by":"crossref","DOI":"10.1016\/j.ins.2023.119125","volume":"641","author":"Z Tang","year":"2023","unstructured":"Tang Z, Xiao Q, Zhou X, Li Y, Chen C, Li K (2023) Learning discriminative multi-relation representations for multimodal sentiment analysis. Inf Sci 641:119125","journal-title":"Inf Sci"},{"key":"1806_CR57","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. New York: IEEE, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"}],"container-title":["Complex &amp; Intelligent Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-025-01806-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s40747-025-01806-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-025-01806-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,30]],"date-time":"2025-03-30T21:25:07Z","timestamp":1743369907000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s40747-025-01806-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,2,28]]},"references-count":57,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2025,4]]}},"alternative-id":["1806"],"URL":"https:\/\/doi.org\/10.1007\/s40747-025-01806-y","relation":{},"ISSN":["2199-4536","2198-6053"],"issn-type":[{"value":"2199-4536","type":"print"},{"value":"2198-6053","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,2,28]]},"assertion":[{"value":"8 July 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 January 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 February 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"All authors certify that they have no affiliations with or involvement in any organization or entity with any financial interest or non-financial interest in the subject matter or materials discussed in this manuscript. On behalf of all authors, the corresponding author states that there is no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"196"}}