{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,24]],"date-time":"2026-04-24T17:23:46Z","timestamp":1777051426201,"version":"3.51.4"},"reference-count":45,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,11,18]],"date-time":"2025-11-18T00:00:00Z","timestamp":1763424000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,11,18]],"date-time":"2025-11-18T00:00:00Z","timestamp":1763424000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100004242","name":"Princess Nourah Bint Abdulrahman University","doi-asserted-by":"publisher","award":["PNURSP2025R817"],"award-info":[{"award-number":["PNURSP2025R817"]}],"id":[{"id":"10.13039\/501100004242","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Big Data"],"DOI":"10.1186\/s40537-025-01300-9","type":"journal-article","created":{"date-parts":[[2025,11,18]],"date-time":"2025-11-18T10:59:43Z","timestamp":1763463583000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["A multimodal fusion model for real-time environment emotion recognition using audio-visual-textual features"],"prefix":"10.1186","volume":"12","author":[{"given":"Chhaya","family":"Gupta","sequence":"first","affiliation":[]},{"given":"Nasib Singh","family":"Gill","sequence":"additional","affiliation":[]},{"given":"Preeti","family":"Gulia","sequence":"additional","affiliation":[]},{"given":"Abhinav","family":"Kumar","sequence":"additional","affiliation":[]},{"given":"Hanen","family":"Karamti","sequence":"additional","affiliation":[]},{"given":"Demmelash Mollalign","family":"Moges","sequence":"additional","affiliation":[]},{"given":"Imen","family":"Safra","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,18]]},"reference":[{"key":"1300_CR1","doi-asserted-by":"publisher","first-page":"424","DOI":"10.1016\/J.INFFUS.2022.09.025","volume":"91","author":"A Gandhi","year":"2023","unstructured":"Gandhi A, Adhvaryu K, Poria S, Cambria E, Hussain A. Multimodal sentiment analysis: a systematic review of history, datasets, multimodal fusion methods, applications, challenges and future directions. Inf Fusion. 2023;91:424\u201344. https:\/\/doi.org\/10.1016\/J.INFFUS.2022.09.025.","journal-title":"Inf Fusion"},{"key":"1300_CR2","doi-asserted-by":"publisher","first-page":"16359","DOI":"10.1007\/S11042-022-14185-0\/METRICS","volume":"82","author":"G Tang","year":"2023","unstructured":"Tang G, Xie Y, Li K, Liang R, Zhao L. Multimodal emotion recognition from facial expression and speech based on feature fusion, multimed. Tools Appl. 2023;82:16359\u201373. https:\/\/doi.org\/10.1007\/S11042-022-14185-0\/METRICS.","journal-title":"Tools Appl"},{"key":"1300_CR3","doi-asserted-by":"publisher","unstructured":"Pan J, Fang W, Zhang Z, Chen B, Zhang Z, Wang S. Multimodal emotion recognition based on facial Expressions, Speech, and EEG. IEEE Open J Eng Med Biol PP. 2023;1\u20138. https:\/\/doi.org\/10.1109\/OJEMB.2023.3240280.","DOI":"10.1109\/OJEMB.2023.3240280"},{"key":"1300_CR4","doi-asserted-by":"publisher","first-page":"14742","DOI":"10.1109\/ACCESS.2023.3244390","volume":"11","author":"HD Le","year":"2023","unstructured":"Le HD, Lee GS, Kim SH, Kim S, Yang HJ. Multi-label multimodal emotion recognition with transformer-based fusion and emotion-level representation learning. IEEE Access. 2023;11:14742\u201351. https:\/\/doi.org\/10.1109\/ACCESS.2023.3244390.","journal-title":"IEEE Access"},{"key":"1300_CR5","doi-asserted-by":"publisher","unstructured":"Li M, Qiu X, Peng S, Tang L, Li Q, Yang W, Ma Y. Multimodal emotion recognition model based on a deep neural network with multiobjective optimization. Wirel Commun Mob Comput. 2021;2021. https:\/\/doi.org\/10.1155\/2021\/6971100.","DOI":"10.1155\/2021\/6971100"},{"key":"1300_CR6","doi-asserted-by":"publisher","unstructured":"Ranganathan H, Chakraborty S, Panchanathan S. Multimodal emotion recognition using deep learning architectures, 2016 IEEE Winter Conf. Appl. Comput. Vision, WACV 2016 (2016) 1\u20139. https:\/\/doi.org\/10.1109\/WACV.2016.7477679","DOI":"10.1109\/WACV.2016.7477679"},{"key":"1300_CR7","doi-asserted-by":"publisher","unstructured":"Liu X, Xu Z, Huang K. Multimodal Emotion Recognition Based on Cascaded Multichannel and Hierarchical Fusion, Comput. Intell. Neurosci. 2023 (2023) 1\u201318. https:\/\/doi.org\/10.1155\/2023\/9645611","DOI":"10.1155\/2023\/9645611"},{"key":"1300_CR8","doi-asserted-by":"publisher","unstructured":"Hu Y, Wang F, Signal. Https:\/\/Doi.Org\/10.1142\/S0218126623501256 32 (2022). https:\/\/doi.org\/10.1142\/S0218126623501256.","DOI":"10.1142\/S0218126623501256"},{"key":"1300_CR9","doi-asserted-by":"publisher","DOI":"10.3389\/fnbot.2023.1181598","author":"Y Wang","year":"2023","unstructured":"Wang Y, Gu Y, Yin Y, Han Y, Zhang H, Wang S, et al. Multimodal transformer augmented fusion for speech emotion recognition. Front Neurorobot. 2023. https:\/\/doi.org\/10.3389\/fnbot.2023.1181598.","journal-title":"Front Neurorobot"},{"key":"1300_CR10","doi-asserted-by":"publisher","DOI":"10.3390\/electronics12020288","author":"A Chaudhari","year":"2023","unstructured":"Chaudhari A, Bhatt C, Krishna A, Travieso-Gonz\u00e1lez CM. Facial emotion recognition with inter-modality-attention-transformer-based self-supervised learning. Electronics. 2023. https:\/\/doi.org\/10.3390\/electronics12020288.","journal-title":"Electronics"},{"key":"1300_CR11","doi-asserted-by":"publisher","unstructured":"Wu Y, Daoudi M, Amad A. Transformer-Based Self-Supervised multimodal representation learning for wearable emotion recognition. IEEE Trans Affect Comput PP. 2023;1\u201316. https:\/\/doi.org\/10.1109\/TAFFC.2023.3263907.","DOI":"10.1109\/TAFFC.2023.3263907"},{"key":"1300_CR12","doi-asserted-by":"publisher","first-page":"123","DOI":"10.1016\/j.inffus.2022.10.009","volume":"91","author":"J Wen","year":"2023","unstructured":"Wen J, Jiang D, Tu G, Liu C, Cambria E. Dynamic interactive multiview memory network for emotion recognition in conversation. Inf Fusion. 2023;91:123\u201333. https:\/\/doi.org\/10.1016\/j.inffus.2022.10.009.","journal-title":"Inf Fusion"},{"key":"1300_CR13","doi-asserted-by":"publisher","first-page":"22935","DOI":"10.1007\/S00521-022-06913-2\/METRICS","volume":"35","author":"A Sharma","year":"2023","unstructured":"Sharma A, Sharma K, Kumar A. Real-time emotional health detection using fine-tuned transfer networks with multimodal fusion. Neural Comput Appl. 2023;35:22935\u201348. https:\/\/doi.org\/10.1007\/S00521-022-06913-2\/METRICS.","journal-title":"Neural Comput Appl"},{"issue":"1","key":"1300_CR14","doi-asserted-by":"publisher","first-page":"951","DOI":"10.1007\/s40747-022-00841-3","volume":"9","author":"Y Xu","year":"2023","unstructured":"Xu Y, Su H, Ma G, Liu X. A novel dual-modal emotion recognition algorithm with fusing hybrid features of audio signal and speech context. Complex Intell Syst. 2023;9(1):951\u201363. https:\/\/doi.org\/10.1007\/s40747-022-00841-3.","journal-title":"Complex Intell Syst"},{"key":"1300_CR15","doi-asserted-by":"publisher","first-page":"11239","DOI":"10.1007\/s11042-022-13557-w","volume":"82","author":"E Ghaleb","year":"2023","unstructured":"Ghaleb E, Niehues J, Asteriadis S. Joint modelling of audio-visual cues using attention mechanisms for emotion recognition. Multimed Tools Appl. 2023;82:11239\u201364. https:\/\/doi.org\/10.1007\/s11042-022-13557-w.","journal-title":"Multimed Tools Appl"},{"key":"1300_CR16","doi-asserted-by":"publisher","unstructured":"Wang Y, Shen Y, Liu Z, Liang PP, Zadeh A, Morency LP. Words can shift: Dynamically adjusting word representations using nonverbal behaviors, 33rd AAAI Conf. Artif. Intell. AAAI 2019, 31st Innov. Appl. Artif. Intell. Conf. IAAI 2019 9th AAAI Symp. Educ. Adv. Artif. Intell. EAAI 2019 (2019) 7216\u20137223. https:\/\/doi.org\/10.1609\/aaai.v33i01.33017216","DOI":"10.1609\/aaai.v33i01.33017216"},{"key":"1300_CR17","doi-asserted-by":"crossref","unstructured":"Audebert N, Herold C, Slimani K, Vidal C. Multimodal deep networks for text and image-based document classification, (2020).","DOI":"10.1007\/978-3-030-43823-4_35"},{"key":"1300_CR18","doi-asserted-by":"publisher","first-page":"46","DOI":"10.1016\/j.inffus.2020.10.011","volume":"68","author":"P Tzirakis","year":"2021","unstructured":"Tzirakis P, Chen J, Zafeiriou S, Schuller B. End-to-end multimodal affect recognition in real-world environments. Inf Fusion. 2021;68:46\u201353. https:\/\/doi.org\/10.1016\/j.inffus.2020.10.011.","journal-title":"Inf Fusion"},{"key":"1300_CR19","unstructured":"Chauhan DS, Ghosal D, Poria S, Ekbal A, Bhattacharyya P. Multi-task Learning for Multi-modal Emotion Recognition and Sentiment Analysis, (2019)."},{"key":"1300_CR20","doi-asserted-by":"publisher","first-page":"20727","DOI":"10.1109\/ACCESS.2022.3149214","volume":"10","author":"J Heredia","year":"2022","unstructured":"Heredia J, Lopes-silva E, Cardinale Y, Diaz-amado J, Dongo I, Graterol W, et al. Adaptive multimodal emotion detection architecture for social robots. IEEE Access. 2022;10:20727\u201344. https:\/\/doi.org\/10.1109\/ACCESS.2022.3149214.","journal-title":"IEEE Access"},{"key":"1300_CR21","doi-asserted-by":"publisher","first-page":"7037","DOI":"10.1109\/ICASSP43922.2022.9747397","volume":"2022\u2013May","author":"D Hu","year":"2022","unstructured":"Hu D, Hou X, Wei L, Jiang L, Mo Y. Mm-Dfn: multimodal dynamic fusion network for emotion recognition in conversations. ICASSP IEEE Int Conf Acoust Speech Signal Process - Proc. 2022;2022\u2013May:7037\u201341. https:\/\/doi.org\/10.1109\/ICASSP43922.2022.9747397.","journal-title":"ICASSP IEEE Int Conf Acoust Speech Signal Process - Proc"},{"key":"1300_CR22","doi-asserted-by":"publisher","unstructured":"Chen F, Shao J, Zhu S, Shen HT. Multivariate, Multi-Frequency and Multimodal: Rethinking Graph Neural Networks for Emotion Recognition in Conversation, Proc. IEEE Comput. Soc. Conf. Comput. Vis. Pattern Recognit. 2023-June (2023) 10761\u201310770. https:\/\/doi.org\/10.1109\/CVPR52729.2023.01036","DOI":"10.1109\/CVPR52729.2023.01036"},{"key":"1300_CR23","doi-asserted-by":"publisher","first-page":"103268","DOI":"10.1016\/J.INFFUS.2025.103268","volume":"123","author":"X Zhu","year":"2025","unstructured":"Zhu X, Wang Y, Cambria E, Rida I, L\u00f3pez JS, Cui L, Wang R. Robust multimodal emotion recognition in conversational contexts based on diffusion and Transformers. Inf Fusion. 2025;123:103268. https:\/\/doi.org\/10.1016\/J.INFFUS.2025.103268.","journal-title":"Inf Fusion"},{"key":"1300_CR24","unstructured":"Zhang T, Patil SG, Jain N, Shen S, Zaharia M, Stoica I, Gonzalez JE. RAFT: Adapting Language Model to Domain Specific RAG, (2024). https:\/\/arxiv.org\/pdf\/2403.10131 (accessed September 6, 2025)."},{"key":"1300_CR25","doi-asserted-by":"publisher","DOI":"10.1109\/TCSS.2025.3572495","author":"R Wang","year":"2025","unstructured":"Wang R, Guo C, Shabaz M, Rida I, Cambria E, Zhu X. Contextual Interaction-Based multimodal emotion analysis with enhanced semantic information. IEEE Trans Comput Soc Syst. 2025. https:\/\/doi.org\/10.1109\/TCSS.2025.3572495.","journal-title":"IEEE Trans Comput Soc Syst"},{"key":"1300_CR26","doi-asserted-by":"publisher","unstructured":"Huang Y, Zhu X, Wang R, Xie Y, Fong S. A Dynamic Global\u2013Local Spatiotemporal Graph Framework for Multi-City PM2.5 Long-Term Forecasting, Remote Sens. 2025, Vol. 17, Page 2750 17 (2025) 2750. https:\/\/doi.org\/10.3390\/RS17162750","DOI":"10.3390\/RS17162750"},{"key":"1300_CR27","doi-asserted-by":"publisher","DOI":"10.1016\/J.PATCOG.2025.111993","volume":"169","author":"Y Ye","year":"2026","unstructured":"Ye Y, Liu N, Zhao Y, Zhu X, Wang J, Liu Y. Advancing federated domain generalization in ophthalmology: vision enhancement and consistency assurance for multicenter fundus image segmentation. Pattern Recognit. 2026;169:111993. https:\/\/doi.org\/10.1016\/J.PATCOG.2025.111993.","journal-title":"Pattern Recognit"},{"key":"1300_CR28","doi-asserted-by":"publisher","unstructured":"Gupta C, Gill NS, Gulia P, Yadav S, Pau G, Ieee M. A Real-time 3-Dimensional object detection based human action recognition model. IEEE Open J Comput Soc PP. 2023;1\u201312. https:\/\/doi.org\/10.1109\/OJCS.2023.3334528.","DOI":"10.1109\/OJCS.2023.3334528"},{"key":"1300_CR29","first-page":"1","volume":"2017 \u2013 Work Tra","author":"B Krause","year":"2019","unstructured":"Krause B, Murray I, Renals S, Lu L. Multiplicative LSTM for sequence modelling. 5th Int Conf Learn Represent ICLR. 2019;2017 \u2013 Work Track Proc:1\u201311.","journal-title":"5th Int Conf Learn Represent ICLR"},{"key":"1300_CR30","doi-asserted-by":"publisher","first-page":"111","DOI":"10.1016\/j.aiopen.2022.10.001","volume":"3","author":"T Lin","year":"2022","unstructured":"Lin T, Wang Y, Liu X, Qiu X. A survey of transformers. AI Open. 2022;3:111\u201332. https:\/\/doi.org\/10.1016\/j.aiopen.2022.10.001.","journal-title":"AI Open"},{"key":"1300_CR31","doi-asserted-by":"publisher","DOI":"10.1007\/s11554-023-01299-3","author":"C Gupta","year":"2023","unstructured":"Gupta C, Gill NS, Gulia P, Chatterjee JM. A novel finetuned YOLOv6 transfer learning model for real-time object detection. J Real-Time Image Process. 2023. https:\/\/doi.org\/10.1007\/s11554-023-01299-3.","journal-title":"J Real-Time Image Process"},{"key":"1300_CR32","unstructured":"Home IEMOCAP-, editor. (n.d.). https:\/\/sail.usc.edu\/iemocap\/ (accessed December 27, 2023)."},{"key":"1300_CR33","doi-asserted-by":"publisher","first-page":"335","DOI":"10.1007\/S10579-008-9076-6\/METRICS","volume":"42","author":"C Busso","year":"2008","unstructured":"Busso C, Bulut M, Lee CC, Kazemzadeh A, Mower E, Kim S, Chang JN, Lee S, Narayanan SS. Interactive emotional dyadic motion capture database. Lang Resour Eval. 2008;42:335\u201359. https:\/\/doi.org\/10.1007\/S10579-008-9076-6\/METRICS.","journal-title":"Lang Resour Eval"},{"key":"1300_CR34","first-page":"343","volume":"7","author":"FG Maciej Serda","year":"2013","unstructured":"Maciej Serda FG, Becker M, Cleary RM, Team H, Holtermann D, The N, Agenda P, Science SK, Sk R, Hinnebusch R, Hinnebusch A, Rabinovich I, Olmert Y, D.Q.G.L.Q. Uld WKHU, Ri V, Lq WKH, Frxqwu E, Zklfk LV, Edvhg RQ, Wkh FG, Becker N, Aboueldahab R, Khalaf LR, De Elvira T, Zintl R, Hinnebusch M, Karimi SM, Mousavi Shafaee D, O \u2019driscoll S, Watts J, Kavanagh B, Frederick T, Norlen A, O\u2019Mahony P, Voorhies T, Szayna N, Spalding MO, Jackson M, Morelli B, Satpathy B, Muniapan M, Dass P, Katsamunska Y, Pamuk A, Stahn E, Commission TED, Piccone MK, Annan S, Djankov M, Reynal-Querol M, Couttenier R, Soubeyran P, Vym E, Prague W, Bank C, Bodea N, Sambanis A, Florea A, Florea M, Karimi SM. Mousavi Shafaee, N. Spalding, N. Sambanis, \u062d. \u0641\u0627\u0637\u0645\u06cc, synteza i aktywno\u015b\u0107 Biologiczna Nowych analog\u00f3w Tiosemikarbazonowych chelator\u00f3w \u017celaza. Uniw \u015al\u0105ski. 2013;7:343\u201354.","journal-title":"Uniw \u015al\u0105ski"},{"key":"1300_CR35","doi-asserted-by":"publisher","unstructured":"Zadeh A, Liang PP, Vanbriesen J, Poria S, Tong E, Cambria E, Chen M, Morency LP. Multimodal Language analysis in the wild: CMU-MOSEI dataset and interpretable dynamic fusion graph. ACL 2018\u201356th Annu Meet Assoc Comput Linguist Proc Conf (Long Pap 1. 2018;2236\u20132246. https:\/\/doi.org\/10.18653\/v1\/p18-1208.","DOI":"10.18653\/v1\/p18-1208"},{"key":"1300_CR36","unstructured":"Chen SY, Hsu CC, Kuo CC, Huang THK, Ku LW. Emotionlines: An emotion corpus of multi-party conversations, Lr. 2018\u201311th Int. Conf. Lang. Resour. Eval. (2019) 1597\u20131601. https:\/\/www.kaggle.com\/datasets\/zaber666\/meld-dataset (accessed March 3, 2025)."},{"key":"1300_CR37","doi-asserted-by":"publisher","DOI":"10.17694\/BAJECE.1372107","author":"H Farooq","year":"2024","unstructured":"Farooq H, Al-Saadawi T, Das\u00b8abstractdas\u00b8 R. Balk J Electr Comput Eng. 2024;12:36\u201346. https:\/\/doi.org\/10.17694\/BAJECE.1372107. Multimodal Emotion Recognition Using Bi-LG-GCN for MELD Dataset."},{"key":"1300_CR38","doi-asserted-by":"publisher","first-page":"2736","DOI":"10.1093\/cid\/ciz437","volume":"70","author":"Y Liu","year":"2020","unstructured":"Liu Y, Tsalik EL, Jiang Y, Ko ER, Woods CW, Henao R, et al. Average weighted accuracy: pragmatic analysis for a rapid diagnostics in categorizing acute lung infections (RADICAL) study. Clin Infect Dis. 2020;70:2736\u201342. https:\/\/doi.org\/10.1093\/cid\/ciz437.","journal-title":"Clin Infect Dis"},{"key":"1300_CR39","doi-asserted-by":"publisher","first-page":"102951","DOI":"10.1016\/J.DSP.2020.102951","volume":"110","author":"M Shah Fahad","year":"2021","unstructured":"Shah Fahad M, Ranjan A, Yadav J, Deepak A. A survey of speech emotion recognition in natural environment. Digit Signal Process. 2021;110:102951. https:\/\/doi.org\/10.1016\/J.DSP.2020.102951.","journal-title":"Digit Signal Process"},{"key":"1300_CR40","unstructured":"Paszke A, Gross S, Massa F, Lerer A, Bradbury J, Chanan G, Killeen T, Lin Z, Gimelshein N, Antiga L, Desmaison A, K\u00f6pf A, Yang E, DeVito Z, Raison M, Tejani A, Chilamkurthy S, Steiner B, Fang L, Bai J, Chintala S. PyTorch: An imperative style, high-performance deep learning library, Adv. Neural Inf. Process. Syst. 32 (2019)."},{"key":"1300_CR41","unstructured":"Lan Z, Chen M, Goodman S, Gimpel K, Sharma P, Soricut R. Albert: a Lite Bert for Self-Supervised Learning of Language Representations, 8th Int. Conf. Learn. Represent. ICLR 2020 (2020) 1\u201317."},{"key":"1300_CR42","first-page":"1","volume":"2015 - Conf. Tr","author":"DP Kingma","year":"2015","unstructured":"Kingma DP. J.L. Ba 2015 Adam: A method for stochastic optimization. 3rd Int Conf Learn Represent ICLR 2015 - Conf. Track Proc. 1\u201315.","journal-title":"3rd Int Conf Learn Represent ICLR"},{"key":"1300_CR43","doi-asserted-by":"publisher","unstructured":"Hina I, Shaukat A, Akram MU. Multimodal Emotion Recognition using Deep Learning Architectures, 2022 2nd Int. Conf. Digit. Futur. Transform. Technol. ICoDT2 2022 (2022). https:\/\/doi.org\/10.1109\/ICODT255437.2022.9787437","DOI":"10.1109\/ICODT255437.2022.9787437"},{"key":"1300_CR44","doi-asserted-by":"publisher","first-page":"756","DOI":"10.1109\/TAFFC.2019.2961089","volume":"13","author":"I Kansizoglou","year":"2022","unstructured":"Kansizoglou I, Bampis L, Gasteratos A. An active learning paradigm for online audio-visual emotion recognition. IEEE Trans Affect Comput. 2022;13:756\u201368. https:\/\/doi.org\/10.1109\/TAFFC.2019.2961089.","journal-title":"IEEE Trans Affect Comput"},{"key":"1300_CR45","doi-asserted-by":"publisher","DOI":"10.3390\/s21227665","author":"C Luna-Jim\u00e9nez","year":"2021","unstructured":"Luna-Jim\u00e9nez C, Griol D, Callejas Z, Kleinlein R, Montero JM, Fern\u00e1ndez-Mart\u00ednez F. Sens (Switzerland). 2021;21:1\u201329. https:\/\/doi.org\/10.3390\/s21227665. Multimodal Emotion Recognition on RAVDESS Dataset Using Transfer Learning."}],"container-title":["Journal of Big Data"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1186\/s40537-025-01300-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1186\/s40537-025-01300-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1186\/s40537-025-01300-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,18]],"date-time":"2025-11-18T10:59:47Z","timestamp":1763463587000},"score":1,"resource":{"primary":{"URL":"https:\/\/journalofbigdata.springeropen.com\/articles\/10.1186\/s40537-025-01300-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,18]]},"references-count":45,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2025,12]]}},"alternative-id":["1300"],"URL":"https:\/\/doi.org\/10.1186\/s40537-025-01300-9","relation":{"references":[{"id-type":"doi","id":"10.17694\/BAJECE.1372107","asserted-by":"subject"},{"id-type":"doi","id":"10.3390\/s21227665","asserted-by":"subject"}]},"ISSN":["2196-1115"],"issn-type":[{"value":"2196-1115","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,18]]},"assertion":[{"value":"26 April 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 October 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 November 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"256"}}