{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T09:05:02Z","timestamp":1775552702032,"version":"3.50.1"},"reference-count":54,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,12,13]],"date-time":"2025-12-13T00:00:00Z","timestamp":1765584000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2026,1,19]],"date-time":"2026-01-19T00:00:00Z","timestamp":1768780800000},"content-version":"vor","delay-in-days":37,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J AUDIO SPEECH MUSIC PROC."],"DOI":"10.1186\/s13636-025-00439-w","type":"journal-article","created":{"date-parts":[[2025,12,13]],"date-time":"2025-12-13T08:45:55Z","timestamp":1765615555000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Estimating depression and anxiety scores from conversational speech in females with and without comorbidity"],"prefix":"10.1186","volume":"2026","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6550-7353","authenticated-orcid":false,"given":"Asl\u0131","family":"Be\u015firli","sequence":"first","affiliation":[]},{"given":"Tu\u011frahan","family":"Karakad\u0131o\u011flu","sequence":"additional","affiliation":[]},{"given":"Cenk","family":"Demiro\u011flu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,12,13]]},"reference":[{"key":"439_CR1","unstructured":"World Health Organization: Depression. (2024). https:\/\/www.who.int\/health-topics\/depression. Accessed 14 Apr 2025"},{"issue":"4","key":"439_CR2","doi-asserted-by":"publisher","first-page":"357","DOI":"10.1109\/TASSP.1980.1163420","volume":"28","author":"S Davis","year":"1980","unstructured":"S. Davis, P. Mermelstein, Comparison of parametric representations for monosyllabic word recognition in continuously spoken sentences. IEEE Trans. Acoust. Speech Signal Process. 28(4), 357\u2013366 (1980)","journal-title":"IEEE Trans. Acoust. Speech Signal Process."},{"issue":"2","key":"439_CR3","doi-asserted-by":"publisher","first-page":"190","DOI":"10.1109\/TAFFC.2015.2457417","volume":"7","author":"F Eyben","year":"2015","unstructured":"F. Eyben, K.R. Scherer, B.W. Schuller, J. Sundberg, E. Andr\u00e9, C. Busso, L.Y. Devillers, J. Epps, P. Laukka, S.S. Narayanan et al., The geneva minimalistic acoustic parameter set (gemaps) for voice research and affective computing. IEEE Trans. Affect. Comput. 7(2), 190\u2013202 (2015)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"439_CR4","doi-asserted-by":"crossref","unstructured":"B. Schuller, S. Steidl, A. Batliner, J. Hirschberg, J.K. Burgoon, A. Baird et al., in Proceedings of Interspeech 2016. The interspeech 2016 computational paralinguistics challenge: Deception, sincerity & native language (ISCA, San Francisco, 2016), pp. 2001\u20132005","DOI":"10.21437\/Interspeech.2016-129"},{"key":"439_CR5","doi-asserted-by":"publisher","first-page":"10","DOI":"10.1016\/j.specom.2015.03.004","volume":"71","author":"N Cummins","year":"2015","unstructured":"N. Cummins, S. Scherer, J. Krajewski, S. Schnieder, J. Epps, T.F. Quatieri, A review of depression and suicide risk assessment using speech analysis. Speech Commun. 71, 10\u201349 (2015)","journal-title":"Speech Commun."},{"key":"439_CR6","doi-asserted-by":"crossref","unstructured":"Seneviratne. N, Espy-Wilson. C, Multimodal depression severity score prediction using articulatory coordination features and hierarchical attention based text embeddings. In Proc. INTERSPEECH, (ISCA, Baixas, France, 2022) pp. 3353\u20133357.","DOI":"10.21437\/Interspeech.2022-11099"},{"key":"439_CR7","doi-asserted-by":"crossref","unstructured":"Fan, C. Lv, Z. Pei, S. Niu, M. CSENet: Complex squeeze-and-excitation network for speech depression level prediction. In Proc. ICASSP, (IEEE, Piscataway, NJ, USA, 2022) pp. 546\u2013550.","DOI":"10.1109\/ICASSP43922.2022.9746011"},{"key":"439_CR8","doi-asserted-by":"crossref","unstructured":"Ringeval. F, Valstar. M, Schuller. B, Cowie. D, Cowie. R, Pantic. M, et al. AVEC 2019 workshop and challenge: State-of-mind, detecting depression with AI, and cross-cultural affect recognition. In Proceedings of the 9th International Audio\/Visual Emotion Challenge and Workshop (ACM, New York, NY, USA, AVEC 2019) pp. 3\u201312.","DOI":"10.1145\/3347320.3357688"},{"key":"439_CR9","doi-asserted-by":"crossref","unstructured":"Egas-L\u00f3pez. J.V, Kiss. G, Sztah\u00f3. D, Gosztolya. G, Automatic assessment of the degree of clinical depression from speech using x-vectors. In Proc. ICASSP 2022 \u2013 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (IEEE, Piscataway, NJ, USA 2022) pp. 8502\u20138506.","DOI":"10.1109\/ICASSP43922.2022.9746068"},{"key":"439_CR10","doi-asserted-by":"crossref","unstructured":"Zhang. P, Wu. M, Yu. K, ReCLR: Reference-enhanced contrastive learning of audio representation for depression detection. In Proc. INTERSPEECH (ISCA, Baixas, France 2023) pp. 2998\u20133002.","DOI":"10.21437\/Interspeech.2023-2474"},{"key":"439_CR11","doi-asserted-by":"crossref","unstructured":"Tasnim. M, Ramos. R.D, Stroulia. E, Trejo. L.A. A machine-learning model for detecting depression, anxiety, and stress from speech. In Proc. ICASSP 2024 \u2013 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (IEEE, Piscataway, NJ, USA 2024) pp. 7085\u20137089.","DOI":"10.1109\/ICASSP48485.2024.10446567"},{"key":"439_CR12","doi-asserted-by":"crossref","unstructured":"Seneviratne. N, Espy-Wilson. C. Generalized dilated CNN models for depression detection using inverted vocal tract variables. In Proc. INTERSPEECH (ISCA, Baixas, France 2021) pp. 4513\u20134517.","DOI":"10.21437\/Interspeech.2021-1960"},{"key":"439_CR13","doi-asserted-by":"crossref","unstructured":"Dumpala. S.H, Rempel. S, Dikaios. K, Sajjadian. M, Uher. R, Oore. S. Estimating severity of depression from acoustic features and embeddings of natural speech. In Proc. ICASSP 2021 \u2013 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (IEEE, Piscataway, NJ, USA 2021) pp. 7278\u20137282.","DOI":"10.1109\/ICASSP39728.2021.9414129"},{"key":"439_CR14","doi-asserted-by":"crossref","unstructured":"Shen. Y, Yang. H, Lin, L. Automatic depression detection: an emotional audio-textual corpus and a GRU\/BiLSTM-based model. In Proc. ICASSP 2022 \u2013 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (IEEE, Piscataway, NJ, USA 2022) pp. 6247\u20136251.","DOI":"10.1109\/ICASSP43922.2022.9746569"},{"key":"439_CR15","doi-asserted-by":"crossref","unstructured":"Seneviratne. N, Espy-Wilson, C. Speech based depression severity level classification using a multi-stage dilated CNN-LSTM model. In Proc. INTERSPEECH (ISCA, Baixas, France 2021) pp. 2526\u20132530.","DOI":"10.21437\/Interspeech.2021-1967"},{"key":"439_CR16","doi-asserted-by":"crossref","unstructured":"Zhao. Z, Bao. Z, Zhang. Z, Cummins. N, Wang. H, Schuller, B. Hierarchical attention transfer networks for depression assessment from speech. In Proc. ICASSP 2020 \u2013 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (IEEE, Piscataway, NJ, USA 2020) pp. 7159\u20137163.","DOI":"10.1109\/ICASSP40776.2020.9053207"},{"key":"439_CR17","doi-asserted-by":"crossref","unstructured":"Tao. F, Ge. X, Ma. W, Esposito. A, Vinciarelli, A. Multi-local attention for speech-based depression detection. In Proc. ICASSP 2023 \u2013 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (IEEE, Piscataway, NJ, USA 2023) pp. 1\u20135.","DOI":"10.1109\/ICASSP49357.2023.10095757"},{"key":"439_CR18","doi-asserted-by":"crossref","unstructured":"Campbell. E.L, Dineley. J, Conde. P, Matcham. F, White. K.M, Oetzmann, C. et al. Classifying depression symptom severity: assessment of speech representations in personalized and generalized machine learning models. In Proc. INTERSPEECH (ISCA, Baixas, France 2023) pp. 1738\u20131742.","DOI":"10.21437\/Interspeech.2023-1721"},{"key":"439_CR19","doi-asserted-by":"crossref","unstructured":"Ravi. V, Wang. J, Flint. J, Alwan. A. Fraug: A frame rate based data augmentation method for depression detection from speech signals. In Proc. ICASSP 2022 \u2013 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (IEEE, Piscataway, NJ, USA, 2022) pp. 6267\u20136271.","DOI":"10.1109\/ICASSP43922.2022.9746307"},{"key":"439_CR20","doi-asserted-by":"crossref","unstructured":"Zhao. Z, Li. Q, Cummins. N, Liu. B, Wang. H, Tao. J. Schuller, B.W. Hybrid network feature extraction for depression assessment from speech. In Proc. INTERSPEECH (ISCA, Baixas, France 2020) pp. 4956\u20134960.","DOI":"10.21437\/Interspeech.2020-2396"},{"key":"439_CR21","doi-asserted-by":"crossref","unstructured":"Li. Y, Niu. M, Zhao. Z, Tao, J. Automatic depression level assessment from speech by long-term global information embedding. In Proc. ICASSP 2022 \u2013 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (IEEE, Piscataway, NJ, USA 2022) pp. 8507\u20138511.","DOI":"10.1109\/ICASSP43922.2022.9747292"},{"key":"439_CR22","doi-asserted-by":"crossref","unstructured":"Li. Q, Wang. D, Ren. Y, Gao. Y, Li. Y. FTA-net: A frequency and time attention network for speech depression detection. In Proc. INTERSPEECH (ISCA, Baixas, France 2023) pp. 1723\u20131727.","DOI":"10.21437\/Interspeech.2023-296"},{"key":"439_CR23","doi-asserted-by":"crossref","unstructured":"Wu. W, Zhang. C, Woodland. P.C. Self-supervised representations in speech-based depression detection. In Proc. ICASSP 2023 \u2013 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (IEEE, Piscataway, NJ, USA 2023) pp. 1\u20135.","DOI":"10.1109\/ICASSP49357.2023.10094910"},{"key":"439_CR24","doi-asserted-by":"crossref","unstructured":"Schneider. S, Baevski. A, Collobert. R, Auli. M. wav2vec: Unsupervised pre-training for speech recognition. Proc. Interspeech (Graz, Austria 2019) pp. 3465\u20133469.","DOI":"10.21437\/Interspeech.2019-1873"},{"key":"439_CR25","doi-asserted-by":"crossref","unstructured":"Hsu. W. N, Bolte. B, Tsai. Y. H. H, Lakhotia. K, Salakhutdinov. R, Mohamed, A. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE\/ACM transactions on audio, speech, and language processing.\u00a029, 3451-3460 (2021)","DOI":"10.1109\/TASLP.2021.3122291"},{"issue":"6","key":"439_CR26","doi-asserted-by":"publisher","first-page":"1505","DOI":"10.1109\/JSTSP.2022.3188113","volume":"16","author":"S Chen","year":"2022","unstructured":"S. Chen, C. Wang, Y. Wu, S. Liu, Z. Chen, Z. Chen et al., Wavlm: Large-scale self-supervised pre-training for full stack speech processing. IEEE Booktitle Sel. Top. Signal Process. 16(6), 1505\u20131518 (2022)","journal-title":"IEEE Booktitle Sel. Top. Signal Process."},{"key":"439_CR27","doi-asserted-by":"crossref","unstructured":"Wang. J, Ravi. V, Flint. J, Alwan. A. Unsupervised instance discriminative learning for depression detection from speech signals. In Proc. INTERSPEECH (ISCA, Baixas, France 2022) pp. 2018\u20132022.","DOI":"10.21437\/Interspeech.2022-10814"},{"key":"439_CR28","doi-asserted-by":"crossref","unstructured":"Valstar. M, Schuller. B, Smith. K, Eyben. F, Jiang. B, Bilakhia. S, et al. AVEC 2013: the continuous audio\/visual emotion and depression recognition challenge. In Proceedings of the 3rd ACM International Workshop on Audio\/Visual Emotion Challenge (AVEC \u201913), Barcelona, Spain, (ACM, New York, NY, USA 2013) pp. 3\u201310.","DOI":"10.1145\/2512530.2512533"},{"key":"439_CR29","doi-asserted-by":"crossref","unstructured":"Valstar. M, Schuller. B, Smith. K, Almaev. T, Eyben. F, Krajewski. J, Cowie. R, Pantic, M. AVEC 2014: 3D dimensional affect and depression recognition challenge. In Proceedings of the 4th International Workshop on Audio\/Visual Emotion Challenge (AVEC \u201914), Orlando, FL, USA, (ACM, New York, NY, USA 2014) pp. 3\u201310.","DOI":"10.1145\/2661806.2661807"},{"key":"439_CR30","unstructured":"Gratch. J, Artstein. R, Lucas. G.M, Stratou. G, Scherer. S, Nazarian. A, Wood. R, Boberg. J, DeVault. D, Marsella. S, et al. The distress analysis interview corpus of human and computer interviews. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC\u201914), (ELRA,\u00a0Reykjavik, Iceland, 2014) pp. 3123\u20133128."},{"key":"439_CR31","doi-asserted-by":"publisher","first-page":"561","DOI":"10.1001\/archpsyc.1961.01710120031004","volume":"4","author":"AT Beck","year":"1961","unstructured":"A.T. Beck, C.H. Ward, M. Mendelson, J. Mock, J. Erbaugh, An inventory for measuring depression. Arch. Gen. Psychiatr. 4, 561\u2013571 (1961)","journal-title":"Arch. Gen. Psychiatr."},{"key":"439_CR32","doi-asserted-by":"crossref","unstructured":"Liu, S. Multi-head self-attention network for depression level estimation from speech. In Proc. ICSP 2024 \u2013 2024 9th International Conference on Intelligent Computing and Signal Processing (IEEE, Piscataway, NJ, USA, 2024) pp. 1088\u20131091.","DOI":"10.1109\/ICSP62122.2024.10743391"},{"key":"439_CR33","doi-asserted-by":"crossref","unstructured":"Li. S, Xie. Z, Naqvi. S.M. Efficient long speech sequence modelling for time-domain depression level estimation. In Proc. ICASSP 2025 \u2013 2025 IEEE International Conference on Acoustics, Speech and Signal Processing; Hyderabad, India, (IEEE, Piscataway, NJ, USA 2025) pp. 1\u20135.","DOI":"10.1109\/ICASSP49660.2025.10889290"},{"key":"439_CR34","doi-asserted-by":"crossref","unstructured":"Yu. J, Kaya. H. Using emotionally rich speech segments for depression prediction. In Proc. ICASSP 2025 \u2013 2025 IEEE International Conference on Acoustics, Speech and Signal Processing; Hyderabad, India, (IEEE, Piscataway, NJ, USA 2025) pp. 1\u20135.","DOI":"10.1109\/ICASSP49660.2025.10889722"},{"key":"439_CR35","doi-asserted-by":"crossref","unstructured":"Baird. A, Cummins. N, Schnieder. S, Krajewski. J. Schuller, B.W. An evaluation of the effect of anxiety on speech\u2014computational prediction of anxiety from sustained vowels. In Proc. INTERSPEECH (ISCA, Baixas, France 2020) p. 4951\u20134955.","DOI":"10.21437\/Interspeech.2020-1801"},{"key":"439_CR36","unstructured":"Agarwal, P., Jindal, A., & Singh, S. Detecting anxiety from short clips of free-form speech. (2023). arXiv preprint arXiv:2312.15272."},{"key":"439_CR37","doi-asserted-by":"crossref","unstructured":"Eyben. F, W\u00f6llmer. M, Schuller. B. openSMILE: The Munich versatile and fast open-source audio feature extractor. In Proceedings of the 18th ACM International Conference on Multimedia (MM \u201910) (ACM, New York, NY, USA 2010) pp. 1459\u20131462.","DOI":"10.1145\/1873951.1874246"},{"key":"439_CR38","doi-asserted-by":"crossref","unstructured":"S. Amiriparian, M. Gerczuk, S. Ottl, N. Cummins, M. Freitag, S. Pugachevskiy et al., in Interspeech 2017. Snore sound classification using image-based deep spectrum features (ISCA, 2017), pp. 3512\u20133516","DOI":"10.21437\/Interspeech.2017-434"},{"key":"439_CR39","doi-asserted-by":"crossref","unstructured":"Choi, K.W. Kim, Y.K. Jeon, H.J. Comorbid anxiety and depression: Clinical and conceptual consideration and transdiagnostic treatment. In Anxiety Disorders: Rethinking Understanding of Recent Discoveries. Adv Exp Med Biol\u00a01191, 219\u2013235 (2020)","DOI":"10.1007\/978-981-32-9705-0_14"},{"key":"439_CR40","doi-asserted-by":"crossref","unstructured":"Bailey, A. Plumbley, M.D. Gender bias in depression detection using audio features. In Proc. EUSIPCO 2021 \u2013 29th European Signal Processing Conference (IEEE, Piscataway, NJ, USA 2021) pp. 596\u2013600.","DOI":"10.23919\/EUSIPCO54536.2021.9615933"},{"issue":"6","key":"439_CR41","doi-asserted-by":"publisher","first-page":"486","DOI":"10.1192\/bjp.177.6.486","volume":"177","author":"M Piccinelli","year":"2000","unstructured":"M. Piccinelli, G. Wilkinson, Gender differences in depression: Critical review. Br. J. Psychiatry 177(6), 486\u2013492 (2000)","journal-title":"Br. J. Psychiatry"},{"key":"439_CR42","unstructured":"V. Uloza, V. Saferis, I. Uloziene, Smoking and voice: Acoustic analysis in men and women. Medicina (Kaunas) 41(9), 757\u2013765 (2005). Demonstrates acoustic changes in smokers\u2019 voices compared to non-smokers"},{"key":"439_CR43","doi-asserted-by":"publisher","unstructured":"K. Nemr, A. Amar, M. Abrah\u00e3o, G. Leite, H. Kohler, A. Santos, Smoking and voice: Correlation between acoustic analysis and self-assessment. Braz. J. Otorhinolaryngol. 79(4), 461\u2013466 (2013). https:\/\/doi.org\/10.5935\/1808-8694.20130083. Explores both objective acoustic parameters and subjective self-assessment in smokers","DOI":"10.5935\/1808-8694.20130083"},{"key":"439_CR44","doi-asserted-by":"publisher","unstructured":"P.H.R. Amaral, L.W. Lopes, A.S. Ferreira, The impact of smoking on vocal quality: A systematic review. J. Voice 33(5), 765\u20132376534 (2019). https:\/\/doi.org\/10.1016\/j.jvoice.2018.02.006. Summarizes acoustic and perceptual changes in smokers\u2019 voices","DOI":"10.1016\/j.jvoice.2018.02.006"},{"key":"439_CR45","doi-asserted-by":"publisher","first-page":"893","DOI":"10.1037\/0022-006X.56.6.893","volume":"56","author":"AT Beck","year":"1988","unstructured":"A.T. Beck, N. Epstein, G. Brown, R.A. Steer, An inventory for measuring clinical anxiety: Psychometric properties. J. Consult. Clin. Psychol. 56, 893\u2013897 (1988)","journal-title":"J. Consult. Clin. Psychol."},{"key":"439_CR46","first-page":"12449","volume":"33","author":"A Baevski","year":"2020","unstructured":"A. Baevski, Y. Zhou, A. Mohamed, M. Auli, wav2vec 2.0: A framework for self-supervised learning of speech representations. Adv. Neural Inf. Process. Syst. 33, 12449\u201312460 (2020)","journal-title":"Adv. Neural Inf. Process. Syst."},{"issue":"2","key":"439_CR47","first-page":"3","volume":"1","author":"EJ Hu","year":"2022","unstructured":"E.J. Hu, Y. Shen, P. Wallis, Z. Allen-Zhu, Y. Li, S. Wang, L. Wang, W. Chen et al., Lora: Low-rank adaptation of large language models. ICLR 1(2), 3 (2022)","journal-title":"ICLR"},{"key":"439_CR48","unstructured":"Vaswani. A, Shazeer. N, Parmar. N, Uszkoreit. J, Jones. L, Gomez. A.N, Kaiser. \u0141, Polosukhin, I. Attention is all you need. Adv Neural Inf Process Syst. 30, 5998\u20136008\u00a0(2017)"},{"issue":"5\u20136","key":"439_CR49","doi-asserted-by":"publisher","first-page":"602","DOI":"10.1016\/j.neunet.2005.06.042","volume":"18","author":"A Graves","year":"2005","unstructured":"A. Graves, J. Schmidhuber, Framewise phoneme classification with bidirectional lstm and other neural network architectures. Neural Netw. 18(5\u20136), 602\u2013610 (2005)","journal-title":"Neural Netw."},{"key":"439_CR50","doi-asserted-by":"crossref","unstructured":"Bredin, H. pyannote.audio 2.1 speaker diarization pipeline: principle, benchmark, and recipe. In Proc. INTERSPEECH 2023, (ISCA, Baixas, France 2023)","DOI":"10.21437\/Interspeech.2023-105"},{"key":"439_CR51","doi-asserted-by":"crossref","unstructured":"Plaquet, A. Bredin, H. Powerset multi-class cross entropy loss for neural speaker diarization. In Proc. INTERSPEECH 2023, (ISCA, Baixas, France 2023)","DOI":"10.21437\/Interspeech.2023-205"},{"key":"439_CR52","doi-asserted-by":"crossref","unstructured":"Chen, T. Guestrin, C. XGBoost: A scalable tree boosting system. In: Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining. KDD \u201916, (ACM, New York, NY, USA,\u00a02016) pp. 785\u2013794.","DOI":"10.1145\/2939672.2939785"},{"issue":"3","key":"439_CR53","doi-asserted-by":"publisher","first-page":"199","DOI":"10.1023\/B:STCO.0000035301.49549.88","volume":"14","author":"AJ Smola","year":"2004","unstructured":"A.J. Smola, B. Sch\u00f6lkopf, A tutorial on support vector regression. Stat. Comput. 14(3), 199\u2013222 (2004)","journal-title":"Stat. Comput."},{"key":"439_CR54","doi-asserted-by":"crossref","unstructured":"Breiman, L.: Random forests. Mach. Learn. 45(1), 5\u201332 (2001)","DOI":"10.1023\/A:1010933404324"}],"container-title":["EURASIP Journal on Audio, Speech, and Music Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1186\/s13636-025-00439-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1186\/s13636-025-00439-w","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1186\/s13636-025-00439-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,19]],"date-time":"2026-01-19T19:30:41Z","timestamp":1768851041000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1186\/s13636-025-00439-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,13]]},"references-count":54,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2026,12]]}},"alternative-id":["439"],"URL":"https:\/\/doi.org\/10.1186\/s13636-025-00439-w","relation":{},"ISSN":["1687-4722"],"issn-type":[{"value":"1687-4722","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,12,13]]},"assertion":[{"value":"21 May 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"13 November 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"13 December 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"6"}}