{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T15:44:37Z","timestamp":1778082277930,"version":"3.51.4"},"reference-count":53,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2023,8,25]],"date-time":"2023-08-25T00:00:00Z","timestamp":1692921600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,8,25]],"date-time":"2023-08-25T00:00:00Z","timestamp":1692921600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Speech Technol"],"published-print":{"date-parts":[[2023,9]]},"DOI":"10.1007\/s10772-023-10038-9","type":"journal-article","created":{"date-parts":[[2023,8,25]],"date-time":"2023-08-25T18:02:25Z","timestamp":1692986545000},"page":"609-625","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":15,"title":["Speaker and gender dependencies in within\/cross linguistic Speech Emotion Recognition"],"prefix":"10.1007","volume":"26","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5280-6020","authenticated-orcid":false,"given":"Adil","family":"Chakhtouna","sequence":"first","affiliation":[]},{"given":"Sara","family":"Sekkate","sequence":"additional","affiliation":[]},{"given":"Abdellah","family":"Adib","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,8,25]]},"reference":[{"key":"10038_CR1","doi-asserted-by":"publisher","DOI":"10.3389\/fpubh.2021.781827","volume":"9","author":"ST Ahmed","year":"2021","unstructured":"Ahmed, S. T., Singh, D. K., Basha, S. M., Abouel Nasr, E., Kamrani, A. K., & Aboudaif, M. K. (2021). Neural network based mental depression identification and sentiments classification technique from speech signals: A covid-19 focused pandemic study. Frontiers in Public Health, 9, 781827.","journal-title":"Frontiers in Public Health"},{"key":"10038_CR2","unstructured":"Akil, S., Sekkate, S., & Adib, A. (2021). Feature selection based on machine learning for credit scoring: An evaluation of filter and embedded methods. In 2021 International conference on innovations in intelligent systems and applications (INISTA) (pp.\u00a01\u20136). IEEE."},{"key":"10038_CR3","doi-asserted-by":"publisher","DOI":"10.1016\/j.apacoust.2021.108046","volume":"179","author":"J Ancilin","year":"2021","unstructured":"Ancilin, J., & Milton, A. (2021). Improved speech emotion recognition with Mel frequency magnitude coefficient. Applied Acoustics, 179, 108046.","journal-title":"Applied Acoustics"},{"issue":"4","key":"10038_CR4","first-page":"15","volume":"16","author":"G Assun\u00e7\u00e3o","year":"2020","unstructured":"Assun\u00e7\u00e3o, G., Menezes, P., & Perdig\u00e3o, F. (2020). Speaker awareness for speech emotion recognition. International Journal of Online and Biomedical Engineering, 16(4), 15\u201322.","journal-title":"International Journal of Online and Biomedical Engineering"},{"key":"10038_CR5","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2019.104886","volume":"184","author":"A Bhavan","year":"2019","unstructured":"Bhavan, A., Chauhan, P., Shah, R. R., et al. (2019). Bagged support vector machines for emotion recognition from speech. Knowledge-Based Systems, 184, 104886.","journal-title":"Knowledge-Based Systems"},{"key":"10038_CR6","first-page":"1517","volume":"5","author":"F Burkhardt","year":"2005","unstructured":"Burkhardt, F., Paeschke, A., Rolfes, M., Sendlmeier, W. F., Weiss, B., et al. (2005). A database of German emotional speech. Interspeech, 5, 1517\u20131520.","journal-title":"Interspeech"},{"key":"10038_CR7","volume-title":"Introduction to wavelets and wavelet transforms: A primer","author":"CS Burrus","year":"1997","unstructured":"Burrus, C. S., Gopinath, R. A., Guo, H., Odegard, J. E., & Selesnick, I. W. (1997). Introduction to wavelets and wavelet transforms: A primer. Pentice Hall."},{"key":"10038_CR8","doi-asserted-by":"crossref","unstructured":"Chakhtouna, A., Sekkate, S., & Adib, A. (2021). Improving speech emotion recognition system using spectral and prosodic features. In 2021 International conference on intelligent systems design and applications (ISDA) (pp.\u00a01\u201310). Springer.","DOI":"10.1007\/978-3-030-96308-8_37"},{"key":"10038_CR9","doi-asserted-by":"crossref","unstructured":"Chakhtouna, A., Sekkate, S., & Adib, A. (2022). Improving speaker-dependency\/independency of wavelet-based speech emotion recognition. In Emerging trends in intelligent systems & network security (pp.\u00a0281\u2013291). Springer.","DOI":"10.1007\/978-3-031-15191-0_27"},{"key":"10038_CR10","doi-asserted-by":"crossref","unstructured":"Chakhtouna, A., Sekkate, S., & Adib, A. (2023). Speech emotion recognition using pre-trained and fine-tuned transfer learning approaches. In Innovations in smart cities applications volume 6: The proceedings of the 7th international conference on smart city applications (pp.\u00a0365\u2013374). Springer.","DOI":"10.1007\/978-3-031-26852-6_35"},{"issue":"3","key":"10038_CR11","doi-asserted-by":"publisher","first-page":"273","DOI":"10.1007\/BF00994018","volume":"20","author":"C Cortes","year":"1995","unstructured":"Cortes, C., & Vapnik, V. (1995). Support-vector networks. Machine Learning, 20(3), 273\u2013297.","journal-title":"Machine Learning"},{"key":"10038_CR12","unstructured":"Costantini, G., Iaderola, I., Paoloni, A., & Todisco, M. (2014). Emovo corpus: An Italian emotional speech database. In International conference on language resources and evaluation (LREC 2014) (pp.\u00a03501\u20133504). European Language Resources Association (ELRA)."},{"issue":"4","key":"10038_CR13","doi-asserted-by":"publisher","first-page":"357","DOI":"10.1109\/TASSP.1980.1163420","volume":"28","author":"S Davis","year":"1980","unstructured":"Davis, S., & Mermelstein, P. (1980). Comparison of parametric representations for monosyllabic word recognition in continuously spoken sentences. IEEE Transactions on Acoustics, Speech, and Signal Processing, 28(4), 357\u2013366.","journal-title":"IEEE Transactions on Acoustics, Speech, and Signal Processing"},{"issue":"20","key":"10038_CR14","doi-asserted-by":"publisher","first-page":"4495","DOI":"10.3390\/s19204495","volume":"19","author":"T Dissanayake","year":"2019","unstructured":"Dissanayake, T., Rajapaksha, Y., Ragel, R., & Nawinne, I. (2019). An ensemble learning approach for electrocardiogram sensor based human emotion recognition. Sensors, 19(20), 4495.","journal-title":"Sensors"},{"key":"10038_CR15","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2021.102468","volume":"67","author":"S Evain","year":"2021","unstructured":"Evain, S., Lecouteux, B., Schwab, D., Contesse, A., Pinchaud, A., & Bernardoni, N. H. (2021). Human beatbox sound recognition using an automatic speech recognition toolkit. Biomedical Signal Processing and Control, 67, 102468.","journal-title":"Biomedical Signal Processing and Control"},{"issue":"2","key":"10038_CR16","doi-asserted-by":"publisher","first-page":"190","DOI":"10.1109\/TAFFC.2015.2457417","volume":"7","author":"F Eyben","year":"2015","unstructured":"Eyben, F., Scherer, K. R., Schuller, B. W., Sundberg, J., Andr\u00e9, E., Busso, C., Devillers, L. Y., Epps, J., Laukka, P., Narayanan, S. S., et al. (2015). The Geneva minimalistic acoustic parameter set (GeMAPS) for voice research and affective computing. IEEE Transactions on Affective Computing, 7(2), 190\u2013202.","journal-title":"IEEE Transactions on Affective Computing"},{"issue":"4","key":"10038_CR17","doi-asserted-by":"publisher","first-page":"723","DOI":"10.1137\/0515056","volume":"15","author":"A Grossmann","year":"1984","unstructured":"Grossmann, A., & Morlet, J. (1984). Decomposition of hardy functions into square integrable wavelets of constant shape. SIAM Journal on Mathematical Analysis, 15(4), 723\u2013736.","journal-title":"SIAM Journal on Mathematical Analysis"},{"key":"10038_CR18","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2020.101894","volume":"59","author":"D Issa","year":"2020","unstructured":"Issa, D., Demirci, M. F., & Yazici, A. (2020). Speech emotion recognition with deep convolutional neural networks. Biomedical Signal Processing and Control, 59, 101894.","journal-title":"Biomedical Signal Processing and Control"},{"issue":"1","key":"10038_CR19","first-page":"3124","volume":"3","author":"PV Janse","year":"2014","unstructured":"Janse, P. V., Magre, S. B., Kurzekar, P. K., & Deshmukh, R. (2014). A comparative study between MFCC and DWT feature extraction technique. International Journal of Engineering Research and Technology, 3(1), 3124\u20133127.","journal-title":"International Journal of Engineering Research and Technology"},{"key":"10038_CR20","doi-asserted-by":"publisher","first-page":"125830","DOI":"10.1109\/ACCESS.2021.3111659","volume":"9","author":"S Kanwal","year":"2021","unstructured":"Kanwal, S., & Asghar, S. (2021). Speech emotion recognition using clustering based GA-optimized feature set. IEEE Access, 9, 125830\u2013125842.","journal-title":"IEEE Access"},{"issue":"2","key":"10038_CR21","doi-asserted-by":"publisher","first-page":"215","DOI":"10.1007\/s10772-012-9176-y","volume":"16","author":"S Karimi","year":"2013","unstructured":"Karimi, S., & Sedaaghi, M. H. (2013). Robust emotional speech classification in the presence of babble noise. International Journal of Speech Technology, 16(2), 215\u2013227.","journal-title":"International Journal of Speech Technology"},{"key":"10038_CR22","doi-asserted-by":"publisher","first-page":"187","DOI":"10.1016\/j.neucom.2020.07.056","volume":"417","author":"M Khalil","year":"2020","unstructured":"Khalil, M., Adib, A., et al. (2020). An end-to-end multi-level wavelet convolutional neural networks for heart diseases diagnosis. Neurocomputing, 417, 187\u2013201.","journal-title":"Neurocomputing"},{"key":"10038_CR23","doi-asserted-by":"crossref","unstructured":"Kishore, K.\u00a0K., & Satish, P.\u00a0K. (2013). Emotion recognition in speech using MFCC and wavelet features. In 2013 3rd IEEE international advance computing conference (IACC) (pp.\u00a0842\u2013847). IEEE.","DOI":"10.1109\/IAdCC.2013.6514336"},{"issue":"9\u201310","key":"10038_CR24","doi-asserted-by":"publisher","first-page":"1172","DOI":"10.1016\/j.specom.2011.01.007","volume":"53","author":"M Kockmann","year":"2011","unstructured":"Kockmann, M., Burget, L., et al. (2011). Application of speaker-and language identification state-of-the-art techniques for emotion recognition. Speech Communication, 53(9\u201310), 1172\u20131185.","journal-title":"Speech Communication"},{"key":"10038_CR25","doi-asserted-by":"crossref","unstructured":"Kurpukdee, N., Kasuriya, S., Chunwijitra, V., Wutiwiwatchai, C., & Lamsrichan, P. (2017). A study of support vector machines for emotional speech recognition. In 2017 8th international conference of information and communication technology for embedded systems (IC-ICTES) (pp.\u00a01\u20136). IEEE.","DOI":"10.1109\/ICTEmSys.2017.7958773"},{"issue":"11","key":"10038_CR26","doi-asserted-by":"publisher","first-page":"1","DOI":"10.18637\/jss.v036.i11","volume":"36","author":"MB Kursa","year":"2010","unstructured":"Kursa, M. B., Rudnicki, W. R., et al. (2010). Feature selection with the Boruta package. Journal of Statistical Software, 36(11), 1\u201313.","journal-title":"Journal of Statistical Software"},{"issue":"3","key":"10038_CR27","doi-asserted-by":"publisher","first-page":"497","DOI":"10.1007\/s10772-018-09572-8","volume":"22","author":"S Lalitha","year":"2019","unstructured":"Lalitha, S., Tripathi, S., & Gupta, D. (2019). Enhanced speech emotion detection using deep neural networks. International Journal of Speech Technology, 22(3), 497\u2013510.","journal-title":"International Journal of Speech Technology"},{"key":"10038_CR28","doi-asserted-by":"crossref","unstructured":"Latif, S., Qayyum, A., Usman, M., & Qadir, J. (2018). Cross lingual speech emotion recognition: Urdu vs. Western languages. In 2018 International conference on frontiers of information technology (FIT) (pp. 88\u201393). IEEE.","DOI":"10.1109\/FIT.2018.00023"},{"issue":"5","key":"10038_CR29","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0196391","volume":"13","author":"SR Livingstone","year":"2018","unstructured":"Livingstone, S. R., & Russo, F. A. (2018). The Ryerson audio-visual database of emotional speech and song (RAVDESS): A dynamic, multimodal set of facial and vocal expressions in North American English. PLoS ONE, 13(5), e0196391.","journal-title":"PLoS ONE"},{"key":"10038_CR30","doi-asserted-by":"crossref","unstructured":"McFee, B., Raffel, C., Liang, D., Ellis, D. P., McVicar, M., Battenberg, E., & Nieto, O. (2015). Librosa: Audio and music signal analysis in Python. In Proceedings of the 14th Python in science conference (Vol. 8, pp. 18\u201325). Citeseer.","DOI":"10.25080\/Majora-7b98e3ed-003"},{"issue":"2","key":"10038_CR31","doi-asserted-by":"publisher","first-page":"74","DOI":"10.22266\/ijies2020.0430.08","volume":"13","author":"HMS Naing","year":"2020","unstructured":"Naing, H. M. S., Hidayat, R., Hartanto, R., & Miyanaga, Y. (2020). Discrete wavelet denoising into MFCC for noise suppressive in automatic speech recognition system. International Journal of Intelligent Engineering and Systems, 13(2), 74\u201382.","journal-title":"International Journal of Intelligent Engineering and Systems"},{"issue":"4","key":"10038_CR32","doi-asserted-by":"publisher","first-page":"603","DOI":"10.1016\/S0167-6393(03)00099-2","volume":"41","author":"TL Nwe","year":"2003","unstructured":"Nwe, T. L., Foo, S. W., & De Silva, L. C. (2003). Speech emotion recognition using hidden Markov models. Speech Communication, 41(4), 603\u2013623.","journal-title":"Speech Communication"},{"issue":"2","key":"10038_CR33","first-page":"55","volume":"10","author":"C Praksah","year":"2015","unstructured":"Praksah, C., & Gaikwad, V. (2015). Analysis of emotion recognition system through speech signal using KNN, GMM & SVM classifier. IOSR Journal of Electronics and Communication Engineering (IOSR-JECE), 10(2), 55\u201367.","journal-title":"IOSR Journal of Electronics and Communication Engineering (IOSR-JECE)"},{"issue":"3","key":"10038_CR34","doi-asserted-by":"publisher","first-page":"1467","DOI":"10.1007\/s11235-011-9624-z","volume":"52","author":"S Ramakrishnan","year":"2013","unstructured":"Ramakrishnan, S., & El Emary, I. M. (2013). Speech emotion recognition approaches in human computer interaction. Telecommunication Systems, 52(3), 1467\u20131478.","journal-title":"Telecommunication Systems"},{"key":"10038_CR35","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2021.102839","volume":"69","author":"J Ramya","year":"2021","unstructured":"Ramya, J., Vijaylakshmi, H., & Saifuddin, H. M. (2021). Segmentation of skin lesion images using discrete wavelet transform. Biomedical Signal Processing and Control, 69, 102839.","journal-title":"Biomedical Signal Processing and Control"},{"issue":"2","key":"10038_CR36","doi-asserted-by":"publisher","first-page":"143","DOI":"10.1007\/s10772-012-9172-2","volume":"16","author":"KS Rao","year":"2013","unstructured":"Rao, K. S., Koolagudi, S. G., & Vempada, R. R. (2013). Emotion recognition from speech using global and local prosodic features. International Journal of Speech Technology, 16(2), 143\u2013160.","journal-title":"International Journal of Speech Technology"},{"key":"10038_CR37","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2021.102747","volume":"68","author":"M Riyad","year":"2021","unstructured":"Riyad, M., Khalil, M., & Adib, A. (2021). A novel multi-scale convolutional neural network for motor imagery classification. Biomedical Signal Processing and Control, 68, 102747.","journal-title":"Biomedical Signal Processing and Control"},{"key":"10038_CR38","doi-asserted-by":"crossref","unstructured":"Rybka, J., & Janicki, A. (2013). Comparison of speaker dependent and speaker independent emotion recognition. International Journal of Applied Mathematics and Computer Science,23(4).","DOI":"10.2478\/amcs-2013-0060"},{"key":"10038_CR39","doi-asserted-by":"crossref","unstructured":"Schuller, B., Steidl, S., Batliner, A., Burkhardt, F., Devillers, L., M\u00fcller, C., & Narayanan, S. (2010). The interspeech 2010 paralinguistic challenge. In Proceedings of INTERSPEECH 2010 (pp.\u00a02794\u20132797).","DOI":"10.21437\/Interspeech.2010-739"},{"key":"10038_CR40","doi-asserted-by":"crossref","unstructured":"Sekkate, S., Khalil, M., & Adib, A. (2017). Speaker identification: A way to reduce call-sign confusion events. In 2017 International conference on advanced technologies for signal and image processing (ATSIP) (pp.\u00a01\u20136). IEEE.","DOI":"10.1109\/ATSIP.2017.8075593"},{"key":"10038_CR41","doi-asserted-by":"crossref","unstructured":"Sekkate, S., Khalil, M., Adib, A., & Ben Jebara, S. (2019a). A multiresolution-based fusion strategy for improving speech emotion recognition efficiency. In International conference on mobile, secure, and programmable networking (pp.\u00a096\u2013109). Springer.","DOI":"10.1007\/978-3-030-22885-9_10"},{"issue":"4","key":"10038_CR42","doi-asserted-by":"publisher","first-page":"91","DOI":"10.3390\/computers8040091","volume":"8","author":"S Sekkate","year":"2019","unstructured":"Sekkate, S., Khalil, M., Adib, A., & Ben Jebara, S. (2019b). An investigation of a feature-level fusion for noisy speech emotion recognition. Computers, 8(4), 91.","journal-title":"Computers"},{"key":"10038_CR43","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2020.101867","volume":"58","author":"R Sharma","year":"2020","unstructured":"Sharma, R., Pachori, R. B., & Sircar, P. (2020). Automated emotion recognition based on higher order statistics and deep learning algorithm. Biomedical Signal Processing and Control, 58, 101867.","journal-title":"Biomedical Signal Processing and Control"},{"issue":"10","key":"10038_CR44","doi-asserted-by":"publisher","first-page":"2464","DOI":"10.1109\/78.157290","volume":"40","author":"MJ Shensa","year":"1992","unstructured":"Shensa, M. J., et al. (1992). The discrete wavelet transform: Wedding the a Trous and Mallat algorithms. IEEE Transactions on Signal Processing, 40(10), 2464\u20132482.","journal-title":"IEEE Transactions on Signal Processing"},{"key":"10038_CR45","doi-asserted-by":"publisher","first-page":"190784","DOI":"10.1109\/ACCESS.2020.3031763","volume":"8","author":"Y\u00dc S\u00f6nmez","year":"2020","unstructured":"S\u00f6nmez, Y. \u00dc., & Varol, A. (2020). A speech emotion recognition model based on multi-level local binary and local ternary patterns. IEEE Access, 8, 190784\u2013190796.","journal-title":"IEEE Access"},{"key":"10038_CR46","doi-asserted-by":"publisher","first-page":"80","DOI":"10.1016\/j.bspc.2014.10.008","volume":"18","author":"Y Sun","year":"2015","unstructured":"Sun, Y., Wen, G., & Wang, J. (2015). Weighted spectral features based on local Hu moments for speech emotion recognition. Biomedical Signal Processing and Control, 18, 80\u201390.","journal-title":"Biomedical Signal Processing and Control"},{"key":"10038_CR47","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2021.103029","volume":"70","author":"Y Tan","year":"2021","unstructured":"Tan, Y., Sun, Z., Duan, F., Sol\u00e9-Casals, J., & Caiafa, C. F. (2021). A multimodal emotion recognition method based on facial expressions and electroencephalography. Biomedical Signal Processing and Control, 70, 103029.","journal-title":"Biomedical Signal Processing and Control"},{"key":"10038_CR48","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2020.106547","volume":"211","author":"T Tuncer","year":"2021","unstructured":"Tuncer, T., Dogan, S., & Acharya, U. R. (2021). Automated accurate speech emotion recognition system using twine shuffle pattern and iterative neighborhood component analysis techniques. Knowledge-Based Systems, 211, 106547.","journal-title":"Knowledge-Based Systems"},{"key":"10038_CR49","doi-asserted-by":"publisher","first-page":"293","DOI":"10.1016\/j.bspc.2018.07.019","volume":"46","author":"SS Upadhya","year":"2018","unstructured":"Upadhya, S. S., Cheeran, A., & Nirmal, J. H. (2018). Thomson multitaper MFCC and PLP voice features for early detection of Parkinson disease. Biomedical Signal Processing and Control, 46, 293\u2013301.","journal-title":"Biomedical Signal Processing and Control"},{"key":"10038_CR50","doi-asserted-by":"publisher","first-page":"257","DOI":"10.1016\/j.neucom.2020.02.085","volume":"398","author":"K Wang","year":"2020","unstructured":"Wang, K., Su, G., Liu, L., & Wang, S. (2020). Wavelet packet analysis for speaker-independent emotion recognition. Neurocomputing, 398, 257\u2013264.","journal-title":"Neurocomputing"},{"key":"10038_CR51","doi-asserted-by":"crossref","unstructured":"Zehra, W., Javed, A.\u00a0R., Jalil, Z., Khan, H.\u00a0U., & Gadekallu, T.\u00a0R. (2021). Cross corpus multi-lingual speech emotion recognition using ensemble learning. Complex & Intelligent Systems, 1\u201310.","DOI":"10.1007\/s40747-020-00250-4"},{"key":"10038_CR52","doi-asserted-by":"publisher","first-page":"312","DOI":"10.1016\/j.bspc.2018.08.035","volume":"47","author":"J Zhao","year":"2019","unstructured":"Zhao, J., Mao, X., & Chen, L. (2019). Speech emotion recognition using deep 1d & 2d CNN LSTM networks. Biomedical Signal Processing and Control, 47, 312\u2013323.","journal-title":"Biomedical Signal Processing and Control"},{"issue":"7","key":"10038_CR53","doi-asserted-by":"publisher","first-page":"1694","DOI":"10.3390\/s17071694","volume":"17","author":"L Zhu","year":"2017","unstructured":"Zhu, L., Chen, L., Zhao, D., Zhou, J., & Zhang, W. (2017). Emotion recognition from Chinese speech for smart affective services using a combination of SVM and DBN. Sensors, 17(7), 1694.","journal-title":"Sensors"}],"container-title":["International Journal of Speech Technology"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10772-023-10038-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10772-023-10038-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10772-023-10038-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,11,10]],"date-time":"2023-11-10T14:09:40Z","timestamp":1699625380000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10772-023-10038-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,25]]},"references-count":53,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2023,9]]}},"alternative-id":["10038"],"URL":"https:\/\/doi.org\/10.1007\/s10772-023-10038-9","relation":{},"ISSN":["1381-2416","1572-8110"],"issn-type":[{"value":"1381-2416","type":"print"},{"value":"1572-8110","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,8,25]]},"assertion":[{"value":"10 July 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 August 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 August 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no financial or proprietary interests in any material discussed in this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}