{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T06:01:26Z","timestamp":1742968886375,"version":"3.40.3"},"publisher-location":"Cham","reference-count":36,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031547256"},{"type":"electronic","value":"9783031547263"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-54726-3_7","type":"book-chapter","created":{"date-parts":[[2024,2,21]],"date-time":"2024-02-21T06:03:20Z","timestamp":1708495400000},"page":"111-123","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Gender-Aware Speech Emotion Recognition in\u00a0Multiple Languages"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-5137-2361","authenticated-orcid":false,"given":"Marco","family":"Nicolini","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3482-9215","authenticated-orcid":false,"given":"Stavros","family":"Ntalampiras","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,2,22]]},"reference":[{"key":"7_CR1","doi-asserted-by":"crossref","unstructured":"Burkhardt, F., Paeschke, A., Rolfes, M., Sendlmeier, W.F., Weiss, B., et al.: A database of German emotional speech. In: Interspeech, vol. 5, pp. 1517\u20131520 (2005)","DOI":"10.21437\/Interspeech.2005-446"},{"issue":"4","key":"7_CR2","doi-asserted-by":"publisher","first-page":"377","DOI":"10.1109\/TAFFC.2014.2336244","volume":"5","author":"H Cao","year":"2014","unstructured":"Cao, H., Cooper, D.G., Keutmann, M.K., Gur, R.C., Nenkova, A., Verma, R.: Crema-d: crowd-sourced emotional multimodal actors dataset. IEEE Trans. Affect. Comput. 5(4), 377\u2013390 (2014)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"7_CR3","series-title":"LNNS","doi-asserted-by":"publisher","first-page":"741","DOI":"10.1007\/978-981-16-0739-4_70","volume-title":"Information and Communication Technology for Competitive Strategies (ICTCS 2020)","author":"K Chachadi","year":"2021","unstructured":"Chachadi, K., Nirmala, S.R.: Voice-based gender recognition using neural network. In: Joshi, A., Mahmud, M., Ragel, R.G., Thakur, N.V. (eds.) Information and Communication Technology for Competitive Strategies (ICTCS 2020). LNNS, vol. 191, pp. 741\u2013749. Springer, Singapore (2021). https:\/\/doi.org\/10.1007\/978-981-16-0739-4_70"},{"issue":"1","key":"7_CR4","doi-asserted-by":"publisher","first-page":"1016","DOI":"10.1109\/TIE.2022.3150097","volume":"70","author":"L Chen","year":"2023","unstructured":"Chen, L., Wang, K., Li, M., Wu, M., Pedrycz, W., Hirota, K.: K-means clustering-based kernel canonical correlation analysis for multimodal emotion recognition in human-robot interaction. IEEE Trans. Industr. Electron. 70(1), 1016\u20131024 (2023). https:\/\/doi.org\/10.1109\/TIE.2022.3150097","journal-title":"IEEE Trans. Industr. Electron."},{"key":"7_CR5","unstructured":"Costantini, G., Iaderola, I., Paoloni, A., Todisco, M.: EMOVO corpus: an Italian emotional speech database. In: International Conference on Language Resources and Evaluation (LREC 2014), pp. 3501\u20133504. European Language Resources Association (ELRA) (2014)"},{"key":"7_CR6","first-page":"250","volume":"29","author":"Z Dair","year":"2022","unstructured":"Dair, Z., Donovan, R., O\u2019Reilly, R.: Linguistic and gender variation in speech emotion recognition using spectral features. IEEE Signal Process. Lett. 29, 250\u2013254 (2022)","journal-title":"IEEE Signal Process. Lett."},{"key":"7_CR7","volume-title":"Introduction to Audio Analysis: A MATLAB Approach","author":"T Giannakopoulos","year":"2014","unstructured":"Giannakopoulos, T., Pikrakis, A.: Introduction to Audio Analysis: A MATLAB Approach, 1st edn. Academic Press Inc, USA (2014)","edition":"1"},{"key":"7_CR8","doi-asserted-by":"crossref","unstructured":"Han, K., Yu, D., Tashev, I.: Speech emotion recognition using deep neural network and extreme learning machine. In: Interspeech 2014 (2014)","DOI":"10.21437\/Interspeech.2014-57"},{"issue":"3","key":"7_CR9","doi-asserted-by":"publisher","first-page":"1372","DOI":"10.14419\/ijet.v7i3.12656","volume":"7","author":"S Hota","year":"2018","unstructured":"Hota, S., Pathak, S.: KNN classifier based approach for multi-class sentiment analysis of twitter data. Int. J. Eng. Technol. 7(3), 1372 (2018). https:\/\/doi.org\/10.14419\/ijet.v7i3.12656","journal-title":"Int. J. Eng. Technol."},{"key":"7_CR10","doi-asserted-by":"crossref","unstructured":"James, J., Tian, L., Watson, C.I.: An open source emotional speech corpus for human robot interaction applications. In: INTERSPEECH, pp. 2768\u20132772 (2018)","DOI":"10.21437\/Interspeech.2018-1349"},{"key":"7_CR11","doi-asserted-by":"crossref","unstructured":"Latif, S., Qayyum, A., Usman, M., Qadir, J.: Cross lingual speech emotion recognition: Urdu vs. western languages. In: 2018 International Conference on Frontiers of Information Technology (FIT), pp. 88\u201393. IEEE (2018)","DOI":"10.1109\/FIT.2018.00023"},{"key":"7_CR12","doi-asserted-by":"publisher","unstructured":"Latif, S., Rana, R., Khalifa, S., Jurdak, R., Schuller, B.W.: Self supervised adversarial domain adaptation for cross-corpus and cross-language speech emotion recognition. IEEE Trans. Affect. Comput. 1\u20131 (2022). https:\/\/doi.org\/10.1109\/TAFFC.2022.3167013","DOI":"10.1109\/TAFFC.2022.3167013"},{"issue":"5","key":"7_CR13","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0196391","volume":"13","author":"SR Livingstone","year":"2018","unstructured":"Livingstone, S.R., Russo, F.A.: The ryerson audio-visual database of emotional speech and song (ravdess): a dynamic, multimodal set of facial and vocal expressions in north American English. PLoS ONE 13(5), e0196391 (2018)","journal-title":"PLoS ONE"},{"key":"7_CR14","doi-asserted-by":"publisher","DOI":"10.4135\/9781483346274","volume-title":"The Sage Encyclopedia of Theory in Psychology","author":"HL Miller Jr","year":"2016","unstructured":"Miller, H.L., Jr.: The Sage Encyclopedia of Theory in Psychology. SAGE Publications, Thousand Oaks (2016)"},{"key":"7_CR15","doi-asserted-by":"crossref","unstructured":"Mirsamadi, S., Barsoum, E., Zhang, C.: Automatic speech emotion recognition using recurrent neural networks with local attention. In: 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2227\u20132231. IEEE (2017)","DOI":"10.1109\/ICASSP.2017.7952552"},{"issue":"1","key":"7_CR16","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s10579-018-9427-x","volume":"53","author":"OM Nezami","year":"2019","unstructured":"Nezami, O.M., Lou, P.J., Karami, M.: ShEMO: a large-scale validated database for Persian speech emotion detection. Lang. Resour. Eval. 53(1), 1\u201316 (2019)","journal-title":"Lang. Resour. Eval."},{"key":"7_CR17","doi-asserted-by":"publisher","unstructured":"Nicolini, M., Ntalampiras, S.: A hierarchical approach for multilingual speech emotion recognition. In: Proceedings of the 12th International Conference on Pattern Recognition Applications and Methods. SCITEPRESS - Science and Technology Publications (2023). https:\/\/doi.org\/10.5220\/0011714800003411","DOI":"10.5220\/0011714800003411"},{"key":"7_CR18","doi-asserted-by":"publisher","first-page":"76","DOI":"10.1016\/j.ecoinf.2018.01.006","volume":"44","author":"S Ntalampiras","year":"2018","unstructured":"Ntalampiras, S.: Bird species identification via transfer learning from music genres. Eco. Inform. 44, 76\u201381 (2018). https:\/\/doi.org\/10.1016\/j.ecoinf.2018.01.006","journal-title":"Eco. Inform."},{"key":"7_CR19","doi-asserted-by":"publisher","unstructured":"Ntalampiras, S.: Toward language-agnostic speech emotion recognition. J. Audio Eng. Soc. 68(1\/2), 7\u201313 (2020). https:\/\/doi.org\/10.17743\/jaes.2019.0045","DOI":"10.17743\/jaes.2019.0045"},{"key":"7_CR20","doi-asserted-by":"publisher","first-page":"21","DOI":"10.1016\/j.patrec.2021.01.018","volume":"144","author":"S Ntalampiras","year":"2021","unstructured":"Ntalampiras, S.: Speech emotion recognition via learning analogies. Pattern Recogn. Lett. 144, 21\u201326 (2021)","journal-title":"Pattern Recogn. Lett."},{"key":"7_CR21","doi-asserted-by":"publisher","unstructured":"Ntalampiras, S.: Model ensemble for predicting heart and respiration rate from speech. IEEE Internet Comput. 1\u20137 (2023). https:\/\/doi.org\/10.1109\/MIC.2023.3257862","DOI":"10.1109\/MIC.2023.3257862"},{"issue":"16","key":"7_CR22","doi-asserted-by":"publisher","first-page":"5554","DOI":"10.3390\/s21165554","volume":"21","author":"S Pal","year":"2021","unstructured":"Pal, S., Mukhopadhyay, S., Suryadevara, N.: Development and progress in sensors and technologies for human emotion recognition. Sensors 21(16), 5554 (2021). https:\/\/doi.org\/10.3390\/s21165554","journal-title":"Sensors"},{"issue":"3","key":"7_CR23","doi-asserted-by":"publisher","first-page":"1590","DOI":"10.1109\/TCE.2009.5278031","volume":"55","author":"JS Park","year":"2009","unstructured":"Park, J.S., Kim, J.H., Oh, Y.H.: Feature vector classification based speech emotion recognition for service robots. IEEE Trans. Consum. Electron. 55(3), 1590\u20131596 (2009)","journal-title":"IEEE Trans. Consum. Electron."},{"issue":"7","key":"7_CR24","doi-asserted-by":"publisher","first-page":"677","DOI":"10.1109\/34.598226","volume":"19","author":"V Pavlovic","year":"1997","unstructured":"Pavlovic, V., Sharma, R., Huang, T.: Visual interpretation of hand gestures for human-computer interaction: a review. IEEE Trans. Pattern Anal. Mach. Intell. 19(7), 677\u2013695 (1997). https:\/\/doi.org\/10.1109\/34.598226","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"7_CR25","first-page":"2020","volume":"1","author":"MK Pichora-Fuller","year":"2020","unstructured":"Pichora-Fuller, M.K., Dupuis, K.: Toronto emotional speech set (TESS). Scholars Portal Dataverse 1, 2020 (2020)","journal-title":"Scholars Portal Dataverse"},{"key":"7_CR26","doi-asserted-by":"crossref","unstructured":"Saitta, A., Ntalampiras, S.: Language-agnostic speech anger identification. In: 2021 44th International Conference on Telecommunications and Signal Processing (TSP), pp. 249\u2013253. IEEE (2021)","DOI":"10.1109\/TSP52935.2021.9522606"},{"key":"7_CR27","doi-asserted-by":"publisher","first-page":"79861","DOI":"10.1109\/ACCESS.2020.2990538","volume":"8","author":"M Sajjad","year":"2020","unstructured":"Sajjad, M., Kwon, S.: Clustering-based speech emotion recognition by incorporating learned features and deep BiLSTM. IEEE Access 8, 79861\u201379875 (2020). https:\/\/doi.org\/10.1109\/ACCESS.2020.2990538","journal-title":"IEEE Access"},{"key":"7_CR28","doi-asserted-by":"publisher","unstructured":"Sang, D.V., Cuong, L.T.B., Ha, P.T.: Discriminative deep feature learning for facial emotion recognition. In: 2018 1st International Conference on Multimedia Analysis and Pattern Recognition (MAPR), pp. 1\u20136 (2018). https:\/\/doi.org\/10.1109\/MAPR.2018.8337514","DOI":"10.1109\/MAPR.2018.8337514"},{"key":"7_CR29","doi-asserted-by":"crossref","unstructured":"Scheidwasser-Clow, N., Kegler, M., Beckmann, P., Cernak, M.: SERAB: a multi-lingual benchmark for speech emotion recognition. In: ICASSP 2022\u20132022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7697\u20137701. IEEE (2022)","DOI":"10.1109\/ICASSP43922.2022.9747348"},{"key":"7_CR30","doi-asserted-by":"crossref","unstructured":"Sharma, M.: Multi-lingual multi-task speech emotion recognition using wav2vec 2.0. In: Proceedings of the 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6907\u20136911. IEEE (2022)","DOI":"10.1109\/ICASSP43922.2022.9747417"},{"issue":"1","key":"7_CR31","doi-asserted-by":"publisher","first-page":"16","DOI":"10.1109\/TASLP.2015.2487051","volume":"24","author":"M Tahon","year":"2015","unstructured":"Tahon, M., Devillers, L.: Towards a small set of robust acoustic features for emotion recognition: challenges. IEEE\/ACM Trans. Audio, Speech, Lang. Process. 24(1), 16\u201328 (2015)","journal-title":"IEEE\/ACM Trans. Audio, Speech, Lang. Process."},{"key":"7_CR32","doi-asserted-by":"publisher","unstructured":"Venkata Subbarao, M., Terlapu, S.K., Geethika, N., Harika, K.D.: Speech emotion recognition using k-nearest neighbor classifiers. In: Shetty D., P., Shetty, S. (eds.) Recent Advances in Artificial Intelligence and Data Engineering. AISC, vol. 1386, pp. 123\u2013131. Springer, Singapore (2022). https:\/\/doi.org\/10.1007\/978-981-16-3342-3_10","DOI":"10.1007\/978-981-16-3342-3_10"},{"key":"7_CR33","doi-asserted-by":"crossref","unstructured":"Vlasenko, B., Schuller, B., Wendemuth, A., Rigoll, G.: Combining frame and turn-level information for robust recognition of emotions within speech. In: Proceedings of Interspeech, pp. 2249\u20132252 (2007)","DOI":"10.21437\/Interspeech.2007-611"},{"key":"7_CR34","unstructured":"Vogt, T., Andr\u00e9, E.: Improving automatic emotion recognition from speech via gender differentiation. In: Proceedings of the 5th Language Resources and Evaluation Conference (LREC), pp. 1123\u20131126 (2006)"},{"issue":"6","key":"7_CR35","doi-asserted-by":"publisher","first-page":"457","DOI":"10.17743\/jaes.2018.0036","volume":"66","author":"N Vryzas","year":"2018","unstructured":"Vryzas, N., Kotsakis, R., Liatsou, A., Dimoulas, C.A., Kalliris, G.: Speech emotion recognition for performance interaction. J. Audio Eng. Soc. 66(6), 457\u2013467 (2018)","journal-title":"J. Audio Eng. Soc."},{"key":"7_CR36","doi-asserted-by":"crossref","unstructured":"Vryzas, N., Matsiola, M., Kotsakis, R., Dimoulas, C., Kalliris, G.: Subjective evaluation of a speech emotion recognition interaction framework. In: Proceedings of the Audio Mostly 2018 on Sound in Immersion and Emotion, pp. 1\u20137. Association for Computing Machinery (2018)","DOI":"10.1145\/3243274.3243294"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition Applications and Methods"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-54726-3_7","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,21]],"date-time":"2024-02-21T06:04:22Z","timestamp":1708495462000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-54726-3_7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031547256","9783031547263"],"references-count":36,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-54726-3_7","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"22 February 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICPRAM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Pattern Recognition Applications and Methods","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Lisbon","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Portugal","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 February 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24 February 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icpram2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icpram.scitevents.org\/Home.aspx?y=2023","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"PRIMORIS","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"157","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"42","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"18% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}