{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,15]],"date-time":"2026-05-15T17:03:42Z","timestamp":1778864622432,"version":"3.51.4"},"reference-count":52,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T00:00:00Z","timestamp":1740096000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T00:00:00Z","timestamp":1740096000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Comput Soc Sc"],"published-print":{"date-parts":[[2025,5]]},"DOI":"10.1007\/s42001-025-00374-y","type":"journal-article","created":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T15:07:15Z","timestamp":1740150435000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Content-aware sentiment understanding: cross-modal analysis with encoder-decoder architectures"],"prefix":"10.1007","volume":"8","author":[{"given":"Zahra","family":"Pakdaman","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0584-6470","authenticated-orcid":false,"given":"Abbas","family":"Koochari","sequence":"additional","affiliation":[]},{"given":"Arash","family":"Sharifi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,21]]},"reference":[{"key":"374_CR1","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2024.123247","author":"P P\u0159ib\u00e1\u0148","year":"2024","unstructured":"P\u0159ib\u00e1\u0148, P., \u0160m\u00edd, J., Steinberger, J., & Mi\u0161tera, A. (2024). A comparative study of cross-lingual sentiment analysis. Expert Systems with Applications. https:\/\/doi.org\/10.1016\/j.eswa.2024.123247","journal-title":"Expert Systems with Applications"},{"key":"374_CR2","doi-asserted-by":"publisher","first-page":"17","DOI":"10.1007\/s10462-023-10633-x","volume":"57","author":"H Zhang","year":"2024","unstructured":"Zhang, H., Cheah, Y., Alyasiri, O., & An, J. (2024). Exploring aspect-based sentiment quadruple extraction with implicit aspects, opinions, and ChatGPT: A comprehensive survey. Artificial Intelligence Review, 57, 17. https:\/\/doi.org\/10.1007\/s10462-023-10633-x","journal-title":"Artificial Intelligence Review"},{"key":"374_CR3","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-023-18081-z","author":"T Singh","year":"2024","unstructured":"Singh, T., Rajput, V., Sharma, N., & Kumar, M. (2024). Sentiment analysis based distributed recommendation system. Multimedia Tools and Applications. https:\/\/doi.org\/10.1007\/s11042-023-18081-z","journal-title":"Multimedia Tools and Applications"},{"key":"374_CR4","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-023-17953-8","author":"R Dey","year":"2024","unstructured":"Dey, R., & Das, A. (2024). Neighbour adjusted dispersive flies optimization based deep hybrid sentiment analysis framework. Multimedia Tools and Applications. https:\/\/doi.org\/10.1007\/s11042-023-17953-8","journal-title":"Multimedia Tools and Applications"},{"key":"374_CR5","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-024-18156-5","author":"M Danyal","year":"2024","unstructured":"Danyal, M., Khan, S., Khan, M., Ullah, S., Mehmood, F., & Ali, I. (2024). Proposing sentiment analysis model based on BERT and XLNet for movie reviews. Multimedia Tools and Applications. https:\/\/doi.org\/10.1007\/s11042-024-18156-5","journal-title":"Multimedia Tools and Applications"},{"key":"374_CR6","doi-asserted-by":"publisher","DOI":"10.1016\/j.chbr.2023.100328","volume":"12","author":"R Strubytskyi","year":"2023","unstructured":"Strubytskyi, R., & Shakhovska, N. (2023). Method and models for sentiment analysis and hidden propaganda finding. Computers in Human Behavior Reports, 12, 100328.","journal-title":"Computers in Human Behavior Reports"},{"issue":"9","key":"374_CR7","doi-asserted-by":"publisher","first-page":"10096","DOI":"10.1007\/s10489-022-04046-6","volume":"53","author":"P Kumar","year":"2023","unstructured":"Kumar, P., Pathania, K., & Raman, B. (2023). Zero-shot learning based cross-lingual sentiment analysis for sanskrit text with insufficient labeled data. Applied Intelligence, 53(9), 10096\u201310113.","journal-title":"Applied Intelligence"},{"key":"374_CR8","doi-asserted-by":"crossref","unstructured":"Liu , Z., Liao , H., Li , M., Yang , Q., & Meng, F. (2023). A deep learning-based sentiment analysis approach for online product ranking with probabilistic linguistic term sets. IEEE Transactions on Engineering Management.","DOI":"10.1109\/TEM.2023.3271597"},{"key":"374_CR9","unstructured":"\u00c7ano, E. (2023). Albmore: A corpus of movie reviews for sentiment analysis in albanian. arXiv preprint arXiv:2306.08526."},{"issue":"1","key":"374_CR10","doi-asserted-by":"publisher","first-page":"339","DOI":"10.32604\/iasc.2023.031987","volume":"36","author":"S Saranya","year":"2023","unstructured":"Saranya, S., & Usha, G. (2023). A machine learning-based technique with IntelligentWordNet lemmatize for twitter sentiment analysis. Intelligent Automation & Soft Computing, 36(1), 339.","journal-title":"Intelligent Automation & Soft Computing"},{"key":"374_CR11","doi-asserted-by":"crossref","unstructured":"Sun, T., Wang, W., Jing, L., Cui, Y., Song, X., & Nie, L. (2022, October). Counterfactual reasoning for out-of-distribution multimodal sentiment analysis. In\u00a0Proceedings of the 30th ACM International Conference on Multimedia\u00a0(pp. 15\u201323).","DOI":"10.1145\/3503161.3548211"},{"issue":"8","key":"374_CR12","doi-asserted-by":"publisher","first-page":"9507","DOI":"10.1609\/aaai.v37i8.26138","volume":"37","author":"Y Qiao","year":"2023","unstructured":"Qiao, Y., Jing, L., Song, X., Chen, X., Zhu, L., & Nie, L. (2023). Mutual-enhanced incongruity learning network for multi-modal sarcasm detection. Proceedings of the AAAI Conference on Artificial Intelligence, 37(8), 9507\u20139515. https:\/\/doi.org\/10.1609\/aaai.v37i8.26138","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence"},{"key":"374_CR13","doi-asserted-by":"crossref","unstructured":"Jing, L., Song, X., Ouyang, K., Jia, M., & Nie, L. (2023). Multi-source semantic graph-based multimodal sarcasm explanation generation.\u00a0arXiv preprint arXiv:2306.16650.","DOI":"10.18653\/v1\/2023.acl-long.635"},{"issue":"12","key":"374_CR14","doi-asserted-by":"publisher","first-page":"12605","DOI":"10.1109\/TKDE.2023.3270940","volume":"35","author":"T Sun","year":"2023","unstructured":"Sun, T., Jing, L., Wei, Y., Song, X., Cheng, Z., & Nie, L. (2023). Dual consistency-enhanced semi-supervised sentiment analysis towards COVID-19 Tweets. IEEE Transactions on Knowledge and Data Engineering, 35(12), 12605\u201312617.","journal-title":"IEEE Transactions on Knowledge and Data Engineering"},{"issue":"16","key":"374_CR15","doi-asserted-by":"publisher","first-page":"18354","DOI":"10.1609\/aaai.v38i16.29795","volume":"38","author":"M Jia","year":"2024","unstructured":"Jia, M., Xie, C., & Jing, L. (2024). Debiasing multimodal sarcasm detection with contrastive learning. In Proceedings of the AAAI Conference on Artificial Intelligence, 38(16), 18354\u201318362.","journal-title":"In Proceedings of the AAAI Conference on Artificial Intelligence"},{"key":"374_CR16","unstructured":"Ouyang, K., Jing, L., Song, X., Liu, M., Hu, Y., & Nie, L. (2024). Sentiment-enhanced Graph-based Sarcasm Explanation in Dialogue.\u00a0arXiv preprint arXiv:2402.03658."},{"key":"374_CR17","doi-asserted-by":"publisher","first-page":"122731","DOI":"10.1016\/j.eswa.2023.122731","volume":"242","author":"C Gan","year":"2024","unstructured":"Gan, C., Fu, X., Feng, Q., Zhu, Q., Cao, Y., & Zhu, Y. (2024). A multimodal fusion network with attention mechanisms for visual\u2013textual sentiment analysis. Expert Systems with Applications, 242, 122731.","journal-title":"Expert Systems with Applications"},{"issue":"21","key":"374_CR18","doi-asserted-by":"publisher","first-page":"60171","DOI":"10.1007\/s11042-023-17685-9","volume":"83","author":"Z Yin","year":"2024","unstructured":"Yin, Z., Du, Y., Liu, Y., & Wang, Y. (2024). Multi-layer cross-modality attention fusion network for multimodal sentiment analysis. Multimedia Tools and Applications, 83(21), 60171\u201360187. https:\/\/doi.org\/10.1007\/s11042-023-17685-9","journal-title":"Multimedia Tools and Applications"},{"key":"374_CR19","doi-asserted-by":"publisher","first-page":"110467","DOI":"10.1016\/j.knosys.2023.110467","volume":"268","author":"Y Liu","year":"2023","unstructured":"Liu, Y., Li, Z., Zhou, K., Zhang, L., Li, L., Tian, P., & Shen, S. (2023). Scanning, attention, and reasoning multimodal content for sentiment analysis. Knowledge-Based Systems, 268, 110467.","journal-title":"Knowledge-Based Systems"},{"issue":"4","key":"374_CR20","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s10462-023-10685-z","volume":"57","author":"Y Li","year":"2024","unstructured":"Li, Y., Ding, H., Lin, Y., Feng, X., & Chang, L. (2024). Multi-level textual-visual alignment and fusion network for multimodal aspect-based sentiment analysis. Artificial Intelligence Review, 57(4), 1\u201326.","journal-title":"Artificial Intelligence Review"},{"key":"374_CR21","doi-asserted-by":"crossref","unstructured":"Wang, D., Tian , C., Liang , X., Zhao , L., He , L., & Wang, Q. (2023). Dual-Perspective Fusion Network for Aspect-based Multimodal Sentiment Analysis. IEEE Transactions on Multimedia.","DOI":"10.1109\/TMM.2023.3321435"},{"issue":"2","key":"374_CR22","doi-asserted-by":"publisher","first-page":"661","DOI":"10.3390\/s23020661","volume":"23","author":"S Hou","year":"2023","unstructured":"Hou, S., Tuerhong, G., & Wushouer, M. (2023). Visdanet: Visual distillation and attention network for multimodal sentiment classification. Sensors, 23(2), 661.","journal-title":"Sensors"},{"key":"374_CR23","doi-asserted-by":"crossref","unstructured":"Tsai, Y., Bai , S., Liang , P., Kolter , J., Morency, L., & Salakhutdinov, R. (2019). Multi- modal transformer for unaligned multimodal language sequences. Proceedings of the Annual Meeting of the Association for Computational Linguistics, (pp. 6558\u20136569).","DOI":"10.18653\/v1\/P19-1656"},{"key":"374_CR24","doi-asserted-by":"publisher","first-page":"61","DOI":"10.1016\/j.knosys.2019.04.018","volume":"178","author":"J Xu","year":"2019","unstructured":"Xu, J., Huang, F., Zhang, X., Wang, S., Li, C., Li, Z., & He, Y. (2019). Visual-textual sentiment classification with bi-directional multi-level attention networks. Knowledge-Based System, 178, 61\u201373. https:\/\/doi.org\/10.1016\/j.knosys.2019.04.018","journal-title":"Knowledge-Based System"},{"key":"374_CR25","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1016\/j.knosys.2019.01.019","volume":"167","author":"F Huang","year":"2019","unstructured":"Huang, F., Zhang, X., Zhao, Z., Xu, J., & Li, Z. (2019). Image\u2013text sentiment analysis via deep multimodal attentive fusion. Knowledge-Based System, 167, 26\u201337. https:\/\/doi.org\/10.1016\/j.knosys.2019.01.019","journal-title":"Knowledge-Based System"},{"issue":"1","key":"374_CR26","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3517139","volume":"19","author":"A Yadav","year":"2023","unstructured":"Yadav, A., & Vishwakarma, D. (2023). A deep multi-level attentive network for multimodal sentiment analysis. ACM Transactions on Multimedia Computing, Communications and Applications, 19(1), 1\u201319.","journal-title":"ACM Transactions on Multimedia Computing, Communications and Applications"},{"key":"374_CR27","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2021.106803","author":"K Zhang","year":"2021","unstructured":"Zhang, K., Zhu, Y., Zhang, W., & Zhu, Y. (2021). Cross-modal image sentiment analysis via deep correlation of textual semantic. Knowledge-Based System. https:\/\/doi.org\/10.1016\/j.knosys.2021.106803","journal-title":"Knowledge-Based System"},{"key":"374_CR28","first-page":"1","volume":"32","author":"J Lu","year":"2019","unstructured":"Lu, J., Batra, D., Parikh, D., & Lee, S. (2019). ViLBERT: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. Advances in Neural Information Processing Systems, 32, 1\u201311.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"374_CR29","doi-asserted-by":"publisher","unstructured":"Khan, Z., & Fu, Y. (2021). Exploiting BERT for multimodal target sentiment classification through input space translation. https:\/\/doi.org\/10.1145\/3474085.3475692","DOI":"10.1145\/3474085.3475692"},{"key":"374_CR30","doi-asserted-by":"publisher","first-page":"3375","DOI":"10.1109\/TMM.2022.3160060","volume":"25","author":"T Zhu","year":"2022","unstructured":"Zhu, T., Li, L., Yang, J., Zhao, S., Liu, H., & Qian, J. (2022). Multimodal sentiment analysis with image-text interaction network. IEEE Transactions on Multimedia, 25, 3375\u20133385.","journal-title":"IEEE Transactions on Multimedia"},{"key":"374_CR31","unstructured":"Keila , D., Firooz , H., Mohan , A., Goswami , V., Singh , A., Ringshia , P., & Testuggine, D. (2020). The hateful memes challenge: Detecting hate speech in multimodal memes. Annual Conference on Neural Information Processing Systems, (pp. 2611\u20132624)."},{"key":"374_CR32","doi-asserted-by":"crossref","unstructured":"Liu , Z., Lin , Y., Cao , Y., Hu , H., Wei , Y., Zhang , Z., Guo, B. (2021). Swin Transformer: Hierarchical vision transformer using shifted windows. arXiv preprint arXiv:2103.14030.","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"374_CR33","unstructured":"Vaswani, A., Shazeer , N., Parmar , N., Uszkoreit , J., Jones, L., Gomez , A., & Polosukhin, I. (2017). Attention is all you need. Advances in neural information processing systems, 30."},{"issue":"34","key":"374_CR34","doi-asserted-by":"publisher","first-page":"80351","DOI":"10.1007\/s11042-024-18748-1","volume":"83","author":"Z Pakdaman","year":"2024","unstructured":"Pakdaman, Z., Koochari, A., & Sharifi, A. (2024). Bimodal sentiment analysis in social media: a one-shot learning approach. Multimedia Tools and Applications, 83(34), 80351\u201380372. https:\/\/doi.org\/10.1007\/s11042-024-18748-1","journal-title":"Multimedia Tools and Applications"},{"key":"374_CR35","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J., Dhariwal, P., & Agarwal, S. (2020). Language models are few-shot learners. Advances in neural information processing systems, 33, 1877\u20131901.","journal-title":"Advances in neural information processing systems"},{"key":"374_CR36","unstructured":"Devlin, J., Chang, M., Lee, K., & Toutanova, K. (2018). Devlin, J., Chang, M. W., Lee, K., & Toutanova, K. (2018). Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805."},{"issue":"5\u20136","key":"374_CR37","doi-asserted-by":"publisher","first-page":"602","DOI":"10.1016\/j.neunet.2005.06.042","volume":"18","author":"A Graves","year":"2005","unstructured":"Graves, A., & Schmidhuber, J. (2005). Framewise phoneme classification with bidirectional LSTM and other neural network architectures. Neural Networks, 18(5\u20136), 602\u2013610.","journal-title":"Neural Networks"},{"issue":"8","key":"374_CR38","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter, S., & Schmidhuber, J. (1997). Long short-term memory. Neural Computation, 9(8), 1735\u20131780.","journal-title":"Neural Computation"},{"key":"374_CR39","doi-asserted-by":"crossref","unstructured":"Sundermeyer, M., Schl\u00fcter, R., & Ney, H. (2012). LSTM neural networks for language modeling. Thirteenth annual conference of the international speech communication association.","DOI":"10.21437\/Interspeech.2012-65"},{"key":"374_CR40","doi-asserted-by":"crossref","unstructured":"Britz, D., Goldie, A., Luong, M., & Le, Q. (2017). Massive Exploration of Neural Machine Translation Architectures. Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, (pp. 1442\u20131451).","DOI":"10.18653\/v1\/D17-1151"},{"key":"374_CR41","unstructured":"Karpathy, A., Johnson, J., & Fei-Fei, L. (2015). Visualizing and Understanding Recurrent Networks. arXiv preprint arXiv:1506.02078."},{"key":"374_CR42","doi-asserted-by":"crossref","unstructured":"Clark, K., Khandelwal, U., Levy, O., & Manning, C. (2019). What does bert look at? an analysis of bert's attention. arXiv preprint arXiv:1906.04341.","DOI":"10.18653\/v1\/W19-4828"},{"key":"374_CR43","doi-asserted-by":"crossref","unstructured":"Artetxe , M., & Schwenk, H. (2019). Massively multilingual sentence embeddings for zero-shot cross-lingual transfer and beyond. arXiv:1812.10464.","DOI":"10.1162\/tacl_a_00288"},{"key":"374_CR44","doi-asserted-by":"publisher","first-page":"15","DOI":"10.1007\/978-3-319-27674-8_2","volume-title":"Multimedia Modeling","author":"T Niu","year":"2016","unstructured":"Niu, T., Zhu, S., Pang, L., & El Saddik, A. (2016). Sentiment analysis on multi-view social data. In Q. Tian, N. Sebe, G. J. Qi, B. Huet, R. Hong, & X. Liu (Eds.), Multimedia Modeling (pp. 15\u201327). Cham: Springer International Publishing. https:\/\/doi.org\/10.1007\/978-3-319-27674-8_2"},{"key":"374_CR45","doi-asserted-by":"crossref","unstructured":"Xu , N., & Mao, W. (2017). MultiSentiNet: A Deep Semantic Network for Multimodal Sentiment Analysis. In Proceeding of the 2017 ACM on Conference on Information and Knowledge Management (CIKM\u201917), (pp. 2399\u20132402).","DOI":"10.1145\/3132847.3133142"},{"key":"374_CR46","doi-asserted-by":"publisher","unstructured":"Le , Q., & Mikolov, T. (2014). Distributed Representations of Sentences and Documents. https:\/\/doi.org\/10.48550\/arXiv.1405.4053.","DOI":"10.48550\/arXiv.1405.4053"},{"key":"374_CR47","first-page":"5776","volume":"33","author":"W Wang","year":"2020","unstructured":"Wang, W., Wei, F., Dong, L., Bao, H., Yang, N., & Zhau, M. (2020). MINILM: Deep self-attention distillation for task-agnostic compression of pre-trained transformers. Advances in Neural Information Processing Systems, 33, 5776\u20135788.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"374_CR48","doi-asserted-by":"crossref","unstructured":"Reimers, N., & Gurevych, I. (2019). Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084.","DOI":"10.18653\/v1\/D19-1410"},{"key":"374_CR49","doi-asserted-by":"publisher","unstructured":"Li, Z., Xu, B., Zhu, C., & Zhao, T. (2022). CLMLF: A contrastive learning and multi-layer fusion method for multimodal sentiment detection. https:\/\/doi.org\/10.48550\/arXiv.2204.05515.","DOI":"10.48550\/arXiv.2204.05515"},{"issue":"4","key":"374_CR50","doi-asserted-by":"publisher","first-page":"664","DOI":"10.26599\/TST.2021.9010055","volume":"27","author":"C Peng","year":"2022","unstructured":"Peng, C., Zhang, C., Xue, X., Gao, J., Liang, H., & Niu, Z. (2022). Cross-modal complementary network with hierarchical fusion for multimodal sentiment classification. Tsinghua Science and Technology, 27(4), 664\u2013679.","journal-title":"Tsinghua Science and Technology"},{"key":"374_CR51","doi-asserted-by":"publisher","first-page":"106874","DOI":"10.1016\/j.engappai.2023.106874","volume":"126","author":"J An","year":"2023","unstructured":"An, J., & Zainon, W. (2023). Integrating color cues to improve multimodal sentiment analysis in social media. Engineering Applications of Artificial Intelligence, 126, 106874.","journal-title":"Engineering Applications of Artificial Intelligence"},{"key":"374_CR52","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-023-18105-8","author":"B Hung","year":"2024","unstructured":"Hung, B., & Thu, N. (2024). Novelty fused image and text models based on deep neural network and transformer for multimodal sentiment analysis. Multimedia Tools and Applications. https:\/\/doi.org\/10.1007\/s11042-023-18105-8","journal-title":"Multimedia Tools and Applications"}],"container-title":["Journal of Computational Social Science"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s42001-025-00374-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s42001-025-00374-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s42001-025-00374-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,15]],"date-time":"2025-05-15T13:27:02Z","timestamp":1747315622000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s42001-025-00374-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,2,21]]},"references-count":52,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2025,5]]}},"alternative-id":["374"],"URL":"https:\/\/doi.org\/10.1007\/s42001-025-00374-y","relation":{},"ISSN":["2432-2717","2432-2725"],"issn-type":[{"value":"2432-2717","type":"print"},{"value":"2432-2725","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,2,21]]},"assertion":[{"value":"13 July 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 February 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 February 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"On behalf of all authors, the corresponding author states that there is no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"37"}}