{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,7]],"date-time":"2026-05-07T10:23:27Z","timestamp":1778149407351,"version":"3.51.4"},"reference-count":170,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2018,1,19]],"date-time":"2018-01-19T00:00:00Z","timestamp":1516320000000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Speech Technol"],"published-print":{"date-parts":[[2018,3]]},"DOI":"10.1007\/s10772-018-9491-z","type":"journal-article","created":{"date-parts":[[2018,1,19]],"date-time":"2018-01-19T11:22:23Z","timestamp":1516360943000},"page":"93-120","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":293,"title":["Databases, features and classifiers for speech emotion recognition: a review"],"prefix":"10.1007","volume":"21","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3768-857X","authenticated-orcid":false,"given":"Monorama","family":"Swain","sequence":"first","affiliation":[]},{"given":"Aurobinda","family":"Routray","sequence":"additional","affiliation":[]},{"given":"P.","family":"Kabisatpathy","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2018,1,19]]},"reference":[{"key":"9491_CR1","unstructured":"Abrilian, S., Devillers, L., & Martin, J. C. (2006). Annotation of emotions in real-life video interviews: Variability between coders. In 5th international conference on language resources and evaluation (LREC 06), Genoa, pp.\u00a02004\u20132009."},{"key":"9491_CR2","doi-asserted-by":"crossref","unstructured":"Agrawal, S. S. (2011). Emotions in Hindi speech-analysis, perception and recognition. In International conference on speech database and assessments (Oriental COCOSDA).","DOI":"10.1109\/ICSDA.2011.6085972"},{"key":"9491_CR3","unstructured":"Agrawal, S. S., Jain, A., & Arora, S. (2009). Acoustic and perceptual features of intonation patterns in Hindi speech. In International workshop on spoken language prosody (IWSLPR-09), Kolkata, pp.\u00a025\u201327."},{"key":"9491_CR4","doi-asserted-by":"publisher","first-page":"9554","DOI":"10.1016\/j.eswa.2015.07.062","volume":"42","author":"JB Alonso","year":"2015","unstructured":"Alonso, J. B., Cabrera, J., Medina, M., & Travieso, C. M. (2015). New approach in quantification of emotional intensity from the speech signal: Emotional temperature. Experts Systems with Applications, 42, 9554\u20139564.","journal-title":"Experts Systems with Applications"},{"key":"9491_CR5","doi-asserted-by":"crossref","unstructured":"Amer, M. R., Siddiquie, B., Richey, C., & Divakaran, A. (2014). Emotion detection in speech using deep networks. In IEEE international conference on acoustics, speech and signal processing (ICASSP), pp.\u00a03724\u20133728.","DOI":"10.1109\/ICASSP.2014.6854297"},{"key":"9491_CR6","unstructured":"Amir, N., Ron, S., & Laor, N. (2000). Analysis of an emotional speech corpus in Hebrew based on objective criteria. In Proceedings of ISCA workshop speech and emotion, Belfast, Vol.\u00a01, pp.\u00a029\u201333."},{"issue":"6","key":"9491_CR7","doi-asserted-by":"publisher","first-page":"1687","DOI":"10.1121\/1.1913303","volume":"52","author":"BS Atal","year":"1972","unstructured":"Atal, B. S. (1972). Automatic speaker recognition based on pitch contours. The Journal of the Acoustical Society of America, 52(6), 1687\u20131697.","journal-title":"The Journal of the Acoustical Society of America"},{"key":"9491_CR8","doi-asserted-by":"crossref","unstructured":"Atassi, H., & Esposito, A. (2008). A speaker independent approach to the classification of emotional vocal expressions. In IEEE international conference on tools with artificial intelligence (ICTAI\u201908), Dayton, Ohio, USA, Vol\u00a02, pp\u00a0147\u2013152.","DOI":"10.1109\/ICTAI.2008.158"},{"issue":"3","key":"9491_CR9","doi-asserted-by":"publisher","first-page":"614","DOI":"10.1037\/0022-3514.70.3.614","volume":"70","author":"R Banse","year":"1996","unstructured":"Banse, R., & Scherer, K. R. (1996). Acoustic profiles in vocal emotion expression. Journal of Personality and Social Psychology, 70(3), 614\u2013636.","journal-title":"Journal of Personality and Social Psychology"},{"key":"9491_CR10","doi-asserted-by":"crossref","unstructured":"Bapineedu, G., Avinash, B., Gangashetty, S. V., & Yegnanarayana, B. (2009). Analysis of Lombard speech using excitation source information. In INTERSPEECH-09, Brighton, UK, pp.\u00a01091\u20131094.","DOI":"10.21437\/Interspeech.2009-34"},{"key":"9491_CR11","doi-asserted-by":"crossref","unstructured":"Batliner, A., Biersack, S., & Steidl, S. (2006). The prosody of pet robot directed speech: Evidence from children. In Speech prosody, Dresden, pp.\u00a01\u20134.","DOI":"10.21437\/SpeechProsody.2006-201"},{"key":"9491_CR12","unstructured":"Batliner, A., Hacker, C., Steidl, S., Noth, E., D\u2019Arcy, S., Russell, M., & Wong, M. (2004). You stupid tin box\u2014children interacting with the AIBO robot: A cross-linguistic emotional speech corpus. In Proceedings of language resources and evaluation (LREC 04), Lisbon."},{"key":"9491_CR13","doi-asserted-by":"crossref","unstructured":"Batliner, A., Huber, R., Niemann, H., N\u00f6th, E., Spilker, J., & Fischer, K. (2000). The recognition of emotion. In Verbmobil: Foundations of speech-to-speech translation, pp.\u00a0122\u2013130.","DOI":"10.1007\/978-3-662-04230-4_9"},{"key":"9491_CR14","doi-asserted-by":"crossref","unstructured":"Bitouk, D., Verma, R., & Nenkova, A. (2010). Class-level spectral features for emotion recognition. Speech Communication, 52(7\u20138), 613\u2013625.","DOI":"10.1016\/j.specom.2010.02.010"},{"key":"9491_CR15","volume-title":"Speech science primer: Physiology, acoustics, and perception of speech","author":"G Borden","year":"1994","unstructured":"Borden, G., Harris, K., & Raphael, L. (1994). Speech science primer: Physiology, acoustics, and perception of speech (3rd\u00a0ed.). Baltimore: Williams and Wilkins.","edition":"3"},{"key":"9491_CR16","doi-asserted-by":"crossref","unstructured":"Bozkurt, E., Erzin, E., & Erdem, A. T. (2009). Improving automatic emotion recognition from speech signals. In 10th annual conference of the international speech communication association (INTERSPEECH), Brighton, UK, pp.\u00a0324\u2013327.","DOI":"10.21437\/Interspeech.2009-106"},{"issue":"4","key":"9491_CR17","first-page":"243","volume":"6","author":"C Brester","year":"2016","unstructured":"Brester, C., Semenkin, E., & Sidorov, M. (2016). Multi-objective heuristic feature selection for speech-based multilingual emotion recognition. JAISCR, 6(4), 243\u2013253.","journal-title":"JAISCR"},{"issue":"2","key":"9491_CR18","doi-asserted-by":"publisher","first-page":"301","DOI":"10.1037\/0033-295X.106.2.301","volume":"106","author":"R Buck","year":"1999","unstructured":"Buck, R. (1999). The biological affects, a typology. Psychological Review, 106(2), 301\u2013336.","journal-title":"Psychological Review"},{"key":"9491_CR19","doi-asserted-by":"crossref","unstructured":"Bulut, M., Narayanan, S. S., & Syrdal, A. K. (2002). Expressive speech synthesis using a concatenative synthesizer. In Proceedings of international conference on spoken language processing (ICSLP\u201902), Vol.\u00a02, pp.\u00a01265\u20131268.","DOI":"10.21437\/ICSLP.2002-389"},{"key":"9491_CR20","doi-asserted-by":"crossref","unstructured":"Burkhardt, F., Paeschke, A., Rolfes, M., Sendlmeier, W., & Weiss, B. (2005). A database of German emotional speech. In Proceedings of the INTERSPEECH 2005, Lissabon, Portugal, pp.\u00a01517\u20131520.","DOI":"10.21437\/Interspeech.2005-446"},{"key":"9491_CR21","doi-asserted-by":"crossref","unstructured":"Busso, C., Bulut, M., Lee, C.-C., Kazemzadeh, A., Mower, E., Kim, S. et al. (2008) IEMOCAP: Interactive emotional dyadic motion capture database. In: Language resources and evaluation.","DOI":"10.1007\/s10579-008-9076-6"},{"key":"9491_CR22","doi-asserted-by":"crossref","unstructured":"Caballero-Morales, S. O. (2013) Recognition of emotions in Mexican Spanish speech: An approach based on acoustic modelling of emotion-specific vowels. The Scientific World Journal, 2013,\u00a01\u201313.","DOI":"10.1155\/2013\/162093"},{"key":"9491_CR23","doi-asserted-by":"publisher","first-page":"173","DOI":"10.1016\/j.specom.2004.10.012","volume":"44","author":"EM Caldognetto","year":"2004","unstructured":"Caldognetto, E. M., Cosi, P., Drioli, C., Tisato, G., & Cavicchio, F. (2004). Modifications of phonetic labial targets in emotive speech: Effects of the co-production of speech and emotions. Speech Communication, 44, 173\u2013185.","journal-title":"Speech Communication"},{"key":"9491_CR24","doi-asserted-by":"crossref","unstructured":"Chauhan, A., Koolagudi, S. G., Kafley, S. & Rao, K. S. (2010). Emotion recognition using LP residual. In Proceedings of the 2010 IEEE students\u2019 technology symposium, IIT Kharagpu.","DOI":"10.1109\/TECHSYM.2010.5469162"},{"issue":"6","key":"9491_CR25","doi-asserted-by":"publisher","first-page":"1154","DOI":"10.1016\/j.dsp.2012.05.007","volume":"22","author":"L Chen","year":"2012","unstructured":"Chen, L., Mao, X., Xue, Y., & Lung, L. (2012). Speech emotion recognition: Features and classification models. Digital Signal Processing, 22(6), 1154\u20131160.","journal-title":"Digital Signal Processing"},{"key":"9491_CR26","doi-asserted-by":"crossref","unstructured":"Chuang, Z.-J., & Wu, C.-H. (2002). Emotion recognition from textual input using an emotional semantic network. In Proceedings of international conference on spoken language processing (ICSLP\u201902), Vol.\u00a03, pp.\u00a02033\u20132036.","DOI":"10.21437\/ICSLP.2002-558"},{"key":"9491_CR27","doi-asserted-by":"crossref","unstructured":"Cichosz, J., & Slot, K. (2005). Low-dimensional feature space derivation for emotion recognition. In INTERSPEECH\u201905, Lisbon, Portugal, pp.\u00a0477\u2013480.","DOI":"10.21437\/Interspeech.2005-320"},{"key":"9491_CR28","unstructured":"Costantini, G., Iaderola, I., Paoloni, A., & Todisco, M. (2014). EMOVO Corpus: An Italian emotional speech database. In Proceedings of the 9th international conference on language resources and evaluation\u2014LREC 14, pp.\u00a03501\u20133504."},{"key":"9491_CR29","doi-asserted-by":"crossref","unstructured":"Cummings, K. E., & Clements, M. A. (1998). Analysis of the glottal excitation of emotionally styled and stressed speech. The Journal of the Acoustical Society of America, 98, \u00a088\u201398.","DOI":"10.1121\/1.413664"},{"key":"9491_CR30","doi-asserted-by":"crossref","unstructured":"Darwin, C. (1872\/1965). The expression of the emotions in man and animals. Chicago University Press, Chicago.","DOI":"10.1037\/10001-000"},{"key":"9491_CR31","doi-asserted-by":"crossref","unstructured":"Dellaert, F., Polzin, T., & Waibel, A. (1996a). Recognising emotions in speech. In ICSLP 96.","DOI":"10.21437\/ICSLP.1996-462"},{"key":"9491_CR32","doi-asserted-by":"crossref","unstructured":"Dellert, F., Polzin, T., & Waibel, A. (1996b). Recognizing emotion in speech. In 4th international conference on spoken language processing, Philadelphia, PA, USA, pp.\u00a01970\u20131973.","DOI":"10.1109\/ICSLP.1996.608022"},{"key":"9491_CR33","doi-asserted-by":"publisher","first-page":"33","DOI":"10.1016\/S0167-6393(02)00070-5","volume":"40","author":"E Douglas-Cowie","year":"2003","unstructured":"Douglas-Cowie, E., Campbell, N., Cowie, R., & Roach, P. (2003). Emotional speech: Towards a new generation of databases. Speech Communication, 40, 33\u201360.","journal-title":"Speech Communication"},{"key":"9491_CR34","doi-asserted-by":"publisher","first-page":"169","DOI":"10.1080\/02699939208411068","volume":"6","author":"P Eckman","year":"1992","unstructured":"Eckman, P. (1992). An argument for basic emotions. Cognition and Emotion, 6, 169\u2013200.","journal-title":"Cognition and Emotion"},{"key":"9491_CR35","volume-title":"Handbook of cognition and emotion","author":"P Ekman","year":"1999","unstructured":"Ekman, P. (1999). Basic emotions. In T. Dalgleish & M. Power (Eds.), Handbook of cognition and emotion. Sussex: Wiley."},{"issue":"44","key":"9491_CR36","doi-asserted-by":"publisher","first-page":"572","DOI":"10.1016\/j.patcog.2010.09.020","volume":"1","author":"M Ayadi EI","year":"2011","unstructured":"EI Ayadi M, Kamel MS, Karray F (2011). Survey on speech emotion recognition: Features, classification schemes, and databases. Pattern Recognition, 1(44), 572\u2013587.","journal-title":"Pattern Recognition"},{"key":"9491_CR37","unstructured":"Engberg, I., & Hansen, A. (1996). \u201cDocumentation of the Danish emotional speech database\u201d des. Retrieved from http:\/\/cpk.auc.dk\/tb\/speech\/Emotions\/ ."},{"issue":"1","key":"9491_CR38","first-page":"79","volume":"27","author":"Z Esmaileyan","year":"2014","unstructured":"Esmaileyan, Z., & Marvi, H. (2014). A database for automatic Persian speech emotion recognition: Collection, processing and evaluation. IJE Transactions A: Bascis, 27(1), 79\u201390.","journal-title":"IJE Transactions A: Bascis"},{"key":"9491_CR39","unstructured":"Espinosa, H. P., Garcia, J. O., & Pineda, L. V. (2010). Features selection for primitives estimation on emotional speech. In ICASSP, Florence, Italy, pp.\u00a05138\u20135141"},{"key":"9491_CR40","doi-asserted-by":"publisher","first-page":"145","DOI":"10.1016\/S0167-6393(02)00080-8","volume":"40","author":"R Fernandez","year":"2003","unstructured":"Fernandez, R., & Picard, R. W. (2003). Modeling driver\u2019s speech under stress. Speech Communication, 40, 145\u2013159.","journal-title":"Speech Communication"},{"key":"9491_CR41","unstructured":"Shah, A. F., Vimal Krishnan, V. R., Sukumar, A. R., Jayakumar, A., & Anto, P. B. (2009). Speaker independent automatic emotion recognition in speech: A comparison of MFCCs and discrete wavelet transforms. In International conference on advances in recent technologies in communication and computing, ARTCom \u201809."},{"key":"9491_CR42","doi-asserted-by":"publisher","first-page":"1050","DOI":"10.1111\/j.1467-9280.2007.02024.x","volume":"13","author":"JR Fontaine","year":"2007","unstructured":"Fontaine, J. R., Scherer, K. R., Roesch, E. B., & Ellsworth, P. C. (2007). The world of emotion is not two dimensional. Psychological Science, 13, 1050\u20131057.","journal-title":"Psychological Science"},{"key":"9491_CR43","doi-asserted-by":"publisher","first-page":"829","DOI":"10.1109\/10.846676","volume":"7","author":"DJ France","year":"2000","unstructured":"France, D. J., Shiavi, R. G., Silverman, S., Silverman, M., & Wilkes, M. (2000). Acoustical properties of speech as indicators of depression and suicidal risk. IEEE Transactions on Biomedical Engineering, 7, 829\u2013837.","journal-title":"IEEE Transactions on Biomedical Engineering"},{"key":"9491_CR44","unstructured":"Gangamohan, P., Kadiri, S. R., Gangashetty, S.\u00a0V., & Yegnanarayana, B. (2014). Excitation source features for discrimination of anger and happy emotions. In: INTERSPEECH, Singapore, pp.\u00a01253\u20131257."},{"key":"9491_CR45","unstructured":"Gangamohan, P., Kadiri, S. R., & Yegnanarayana, B. (2013). Analysis of emotional speech at sub segmental level. In Interspeech, Lyon, France, pp.\u00a01916\u20131920."},{"issue":"2","key":"9491_CR46","doi-asserted-by":"publisher","first-page":"377","DOI":"10.1037\/1528-3542.7.2.377","volume":"7","author":"P Gomez","year":"2004","unstructured":"Gomez, P., & Danuser, B. (2004). Relationships between musical structure and physiological measures of emotion. Emotion, 7(2), 377\u2013387.","journal-title":"Emotion"},{"key":"9491_CR47","unstructured":"Grimm, M., Kroschel, K., & Narayanan, S. (2008). The Vera Ammittag German audio-visual emotional speech database. In International conference on multimedia and expo, pp.\u00a0865\u2013868."},{"key":"9491_CR48","unstructured":"Grimm, M., Mower, E., Kroschel, K., & Narayanan, S. (2006). Combining categorical and primitives-based emotion recognition. In 14th European signal processing conference (EUSIPCO 2006), Florence, Italy."},{"key":"9491_CR49","unstructured":"Haq, S., & Jackson, P. J. B. (2009). Speaker-dependent audio-visual emotion recognition. In Proceedings of international conference on auditory-visual speech processing, pp.\u00a053\u201358."},{"key":"9491_CR50","doi-asserted-by":"crossref","unstructured":"He, L., Lech, M., & Allen, N. (2010). On the importance of glottal flow spectral energy for the recognition of emotions in speech. In INTERSPEECH 2010, Makuhari, Chiba, Japan, pp.\u00a026\u201330.","DOI":"10.21437\/Interspeech.2010-642"},{"key":"9491_CR51","volume-title":"Improved emotion recognition with large set of stastical features","author":"V Hozjan","year":"2003","unstructured":"Hozjan, V., & Kacic, Z. (2003). Improved emotion recognition with large set of stastical features. Geneva: Eurospecch."},{"key":"9491_CR52","unstructured":"Hozjan, V., Kacic, Z., Moreno, A., Bonafonte, A., & Nogueiras, A. (2002). Interface databases: Design and collection of a multilingual emotional speech database. In Proceedings of the 3rd international conference on language (LREC\u201902) Las Palmas de Gran Canaria, Spain, pp.\u00a02019\u20132023."},{"issue":"3","key":"9491_CR53","doi-asserted-by":"publisher","first-page":"445","DOI":"10.1016\/j.csl.2009.02.005","volume":"24","author":"AI Iliev","year":"2010","unstructured":"Iliev, A. I., Scordilis, M. S., Papa, J. P., & Falco, A. X. (2010). Spoken emotion recognition through optimum-path forest classification using glottal features. Computer Speech and Language, 24(3), 445\u2013460.","journal-title":"Computer Speech and Language"},{"key":"9491_CR54","doi-asserted-by":"crossref","unstructured":"Iliou, T., & Anagnostopoulos, C.-N. (2009). Statistical evaluation of speech features for emotion recognition. In Fourth international conference on digital telecommunications, Colmar, France, pp.\u00a0121\u2013126.","DOI":"10.1109\/ICDT.2009.30"},{"key":"9491_CR55","unstructured":"Iriondo, I., Guaus, R., & Rodriguez, A. (2000). Validation of an acoustical modeling of emotional expression in Spanish using speech synthesis techniques. In Proceedings of ISCA workshop speech and emotion, Belfast, Vol.\u00a01, pp.\u00a0161\u2013166."},{"key":"9491_CR56","doi-asserted-by":"publisher","first-page":"561","DOI":"10.1037\/0033-295X.99.3.561","volume":"99","author":"CE Izard","year":"1992","unstructured":"Izard, C. E. (1992). Basic emotions, relations among emotions, and emotion-cognition relations. Psychological Review, 99, 561\u2013565.","journal-title":"Psychological Review"},{"key":"9491_CR57","doi-asserted-by":"crossref","unstructured":"Jeon, J. H., Le, D., Xia, R., & Liu, Y. (2013). A preliminary study of cross-lingual emotion recognition from speech: Automatic classification versus human perception. In Interspeech, Layon, France, pp.\u00a02837\u20132840.","DOI":"10.21437\/Interspeech.2013-246"},{"key":"9491_CR58","doi-asserted-by":"crossref","unstructured":"Jiang, D.-N., & Cai, L. H. (2004). Classifying emotion in Chinese speech by decomposing prosodic features. In International conference on speech and language processing (ICSLP), Jeju, Korea.","DOI":"10.21437\/Interspeech.2004-326"},{"key":"9491_CR59","unstructured":"Jiang, D.-N., Zhang, W., Shen, L.-Q., & Cai, L.-H. (2005). Prosody analysis and modelling for emotional speech synthesis. In IEEE proceedings of ICASSP 2005, pp.\u00a0281\u2013284."},{"key":"9491_CR60","first-page":"397","volume-title":"An emotion space model for recognition of emotions in spoken Chinese","author":"X Jin","year":"2005","unstructured":"Jin, X., & Wang, Z. (2005). An emotion space model for recognition of emotions in spoken Chinese (pp.\u00a0397\u2013402). Berlin: Springer."},{"key":"9491_CR61","unstructured":"Jovi\u010di\u0107, S. T., Ka\u0161i\u0107, Z., \u0110or\u0111evi\u0107, M., & Rajkovi\u0107, M. (2004). Serbian emotional speech database: Design, processing and evaluation. In SPECOM 9th conference speech and computer, St. Petersburg, Russia."},{"key":"9491_CR62","doi-asserted-by":"crossref","unstructured":"Kadiri, S. R., Gangamohan, P., Gangashetty, S.\u00a0V., & Yegnanarayana, B. (2015). Analysis of excitation source features of speech for emotion recognition. In INTERSPEECH 2015, Dresden, pp.\u00a01324\u20131328.","DOI":"10.21437\/Interspeech.2015-329"},{"key":"9491_CR63","doi-asserted-by":"crossref","unstructured":"Kandali, A. B., Routray, A., & Basu, T. K. (2008a). Emotion recognition from Assamese speeches using MFCC features and GMM classifier. In Proceedings of IEEE region 10 conference on TENCHON.","DOI":"10.1109\/TENCON.2008.4766487"},{"key":"9491_CR64","unstructured":"Kandali, A. B., Routray, A., & Basu, T. K. (2008b). Emotion recognition from speeches of some native languages of ASSAM independent of text and speaker. In National seminar on Devices, Circuits, and Communications, B. I. T. Mesra, Ranchi, pp.\u00a06\u20137."},{"key":"9491_CR65","doi-asserted-by":"crossref","unstructured":"Kao, Y.-H., & Lee, L.-S. (2006). Feature analysis for emotion recognition from Mandarin speech considering the special characteristics of Chinese language. In INTERSPEECH-ICSLP, Pittsburgh, Pennsylvania, pp.\u00a01814\u20131817.","DOI":"10.21437\/Interspeech.2006-501"},{"key":"9491_CR66","doi-asserted-by":"crossref","unstructured":"Kim, J. B., Park, J. S., Oh, Y. H. (2011). On-line speaker adaptation based emotion recognition using incremental emotional information. In ICASSP, Prague, Czech Republic, pp.\u00a04948\u20134951.","DOI":"10.1109\/ICASSP.2011.5947466"},{"key":"9491_CR67","doi-asserted-by":"publisher","first-page":"3409","DOI":"10.1016\/j.proeng.2012.06.394","volume":"38","author":"SG Koolagudi","year":"2012","unstructured":"Koolagudi, S. G., Devliyal, S., Chawla, B., Barthwal, A., & Rao, K. S. (2012). Recognition of emotions from speech using excitation source features. Procedia Engineering, 38, 3409\u20133417.","journal-title":"Procedia Engineering"},{"issue":"4","key":"9491_CR68","doi-asserted-by":"publisher","first-page":"495","DOI":"10.1007\/s10772-012-9150-8","volume":"15","author":"SG Koolagudi","year":"2012","unstructured":"Koolagudi, S. G., & Krothapalli, S. R. (2012). Emotion recognition from speech using sub-syllabic and pitch synchronous spectral features. International Journal of Speech Technology, 15(4), 495\u2013511.","journal-title":"International Journal of Speech Technology"},{"key":"9491_CR69","first-page":"485","volume-title":"IITKGP-SESC: Speech database for emotion analysis. Communications in computer and information science, LNCS","author":"SG Koolagudi","year":"2009","unstructured":"Koolagudi, S. G., Maity, S., Kumar, V. A., Chakrabati, S., & Rao, K. S. (2009). IITKGP-SESC: Speech database for emotion analysis. Communications in computer and information science, LNCS (pp.\u00a0485\u2013492). Berlin: Springer."},{"key":"9491_CR70","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1007\/s10772-011-9125-1","volume":"15","author":"SG Koolagudi","year":"2012","unstructured":"Koolagudi, S. G., & Rao, K. S. (2012a). Emotion recognition from speech: A review. International Journal of Speech Technology, 15, 99\u2013117.","journal-title":"International Journal of Speech Technology"},{"issue":"2","key":"9491_CR71","doi-asserted-by":"publisher","first-page":"265","DOI":"10.1007\/s10772-012-9139-3","volume":"15","author":"SG Koolagudi","year":"2012","unstructured":"Koolagudi, S. G., & Rao, K. S. (2012b). Emotion recognition from speech using source, system, and prosodic features. International Journal of Speech Technology, 15(2), 265\u2013289.","journal-title":"International Journal of Speech Technology"},{"key":"9491_CR72","doi-asserted-by":"crossref","unstructured":"Koolagudi, S. G., Reddy, R., & Rao, K. S. (2010). Emotion recognition from speech signal using epoch parameters. In International conference on signal processing and communications (SPCOM).","DOI":"10.1109\/SPCOM.2010.5560541"},{"issue":"2","key":"9491_CR73","doi-asserted-by":"publisher","first-page":"181","DOI":"10.1007\/s10772-012-9175-z","volume":"16","author":"SR Krothapalli","year":"2013","unstructured":"Krothapalli, S. R., & Koolagudi, S. G. (2013). Characterization and recognition of emotions from speech using excitation source information. International Journal of Speech Technology, 16(2), 181\u2013201.","journal-title":"International Journal of Speech Technology"},{"key":"9491_CR74","doi-asserted-by":"crossref","unstructured":"Kwon, O.-W., Chan, K., Hao, J., & Lee, T.-W. (2003). Emotion recognition by speech signals. In EUROSPEECH, pp.\u00a0125\u2013128,.","DOI":"10.21437\/Eurospeech.2003-80"},{"key":"9491_CR75","doi-asserted-by":"publisher","first-page":"50","DOI":"10.1016\/j.procs.2015.04.226","volume":"49","author":"RB Lanjewar","year":"2015","unstructured":"Lanjewar, R. B., Mauhurkar, S., & Patel, N. (2015). Implementation and comparison of speech emotion recognition system using Gaussian mixture model and K-nearest neighbor techniques. Procedia Computer Science, 49, 50\u201357.","journal-title":"Procedia Computer Science"},{"key":"9491_CR76","doi-asserted-by":"crossref","DOI":"10.1093\/oso\/9780195069945.001.0001","volume-title":"Emotion & adaptation","author":"RS Lazarus","year":"1991","unstructured":"Lazarus, R. S. (1991). Emotion & adaptation. New York: Oxford University Press."},{"key":"9491_CR77","doi-asserted-by":"crossref","unstructured":"Lee, C. M., & Narayanan, S. (2003). Emotion recognition using a data-driven fuzzy inference system. In European conference on speech and language processing (EUROSPEECH), Geneva, Switzerland, pp.\u00a0157\u2013160.","DOI":"10.21437\/Eurospeech.2003-88"},{"issue":"2","key":"9491_CR78","doi-asserted-by":"publisher","first-page":"293","DOI":"10.1109\/TSA.2004.838534","volume":"13","author":"CM Lee","year":"2005","unstructured":"Lee, C. M., & Narayanan, S. S. (2005). Toward detecting emotions in spoken dialogs. IEEE Transactions on Speech and Audio Processing, 13(2), 293\u2013303.","journal-title":"IEEE Transactions on Speech and Audio Processing"},{"key":"9491_CR79","unstructured":"Lee, C. M., Narayanan, S., & Pieraccini, R. (2001). Recognition of negative emotion in the human speech signals. In Workshop on auto, speech recognition and understanding."},{"key":"9491_CR80","doi-asserted-by":"crossref","unstructured":"Lee, C. M., Yildirim, S., Bulut, M., Kazemzadeh, A., Busso, C., Deng, Z. et al. (2004). Emotion recognition based on phoneme classes. In 8th international conference on spoken language processing, INTERSPEECH 2004, Korea.","DOI":"10.21437\/Interspeech.2004-322"},{"key":"9491_CR81","doi-asserted-by":"publisher","first-page":"1162","DOI":"10.1016\/j.specom.2011.06.004","volume":"53","author":"C-C Lee","year":"2011","unstructured":"Lee, C.-C., Mower, E., Busso, C., Lee, S., & Narayanan, S. (2011). Emotion recognition using a hierarchical binary decision tree approach. Speech Communication, 53, 1162\u20131171.","journal-title":"Speech Communication"},{"key":"9491_CR82","doi-asserted-by":"publisher","first-page":"161","DOI":"10.1016\/S0167-6393(02)00081-X","volume":"40","author":"A Lida","year":"2003","unstructured":"Lida, A., Campbell, N., Higuchi, F., & Yasumura, M. (2003). A corpus based synthesis system with emotion. Speech Communication, 40, 161\u2013187.","journal-title":"Speech Communication"},{"key":"9491_CR83","unstructured":"Lin, Y.-L., & Wei, G. (2005). Speech emotion recognition based on HMM and SVM. In: Fourth International conference on machine learning and cybernetics, Guangzhou, pp.\u00a04898\u20134901."},{"key":"9491_CR84","doi-asserted-by":"crossref","unstructured":"Lotfian, R., & Busso, C. (2015). Emotion recognition using synthetic speech as neutral reference. In IEEE International conference on ICASSP, pp.\u00a04759\u20134763.","DOI":"10.1109\/ICASSP.2015.7178874"},{"key":"9491_CR85","doi-asserted-by":"crossref","unstructured":"Luengo, I., Navas, E., Hern\u00e1ez, I., & S\u00e1nchez, J. (2005). Automatic emotion recognition using prosodic parameters. In INTERSPEECH, Lisbon, Portugal, pp.\u00a0493\u2013496.","DOI":"10.21437\/Interspeech.2005-324"},{"key":"9491_CR86","doi-asserted-by":"crossref","unstructured":"Lugger, M., & Yang, B. (2007). The relevance of voice quality features in speaker independent emotion recognition. In ICASSP, Honolulu, Hawaii, pp.\u00a0IV17\u2013IV20.","DOI":"10.1109\/ICASSP.2007.367152"},{"key":"9491_CR87","doi-asserted-by":"crossref","unstructured":"Makarova, V., & Petrushin, V. A. (2002). RUSLANA: A database of Russian emotional utterances. In 7th International conference on spoken language processing (ICSLP 02), pp.\u00a02041\u20132044.","DOI":"10.21437\/ICSLP.2002-560"},{"issue":"4","key":"9491_CR88","doi-asserted-by":"publisher","first-page":"561","DOI":"10.1109\/PROC.1975.9792","volume":"63","author":"J Makhoul","year":"1975","unstructured":"Makhoul, J. (1975). Linear prediction: A tutorial review. Proceedings of the IEEE, 63(4), 561\u2013580.","journal-title":"Proceedings of the IEEE"},{"key":"9491_CR89","unstructured":"McGilloway, S., Cowie, R., Douglas-Cowie, E., Gielen, S., Westerdijk, M., & Stroeve, S. (2000) Approaching automatic recognition of emotion from voice: A rough benchmark. In Proceedings of ISCA workshop speech emotion, pp.\u00a0207\u2013212."},{"issue":"1","key":"9491_CR90","first-page":"1","volume":"6","author":"G McKeown","year":"2007","unstructured":"McKeown, G., Valstar, M., Cowie, R., Pantic, M., & Schroder, M. (2007). The SEMAINE database: Annotated multimodal records of emotionally coloured conversations between a person and a limited agent. Journal of LATEX Class Files, 6(1), 1\u201314.","journal-title":"Journal of LATEX Class Files"},{"key":"9491_CR91","doi-asserted-by":"publisher","first-page":"68","DOI":"10.1016\/j.knosys.2014.03.019","volume":"63","author":"A Mencattini","year":"2014","unstructured":"Mencattini, A., Martinelli, E., Costantini, G., Todisco, M., Basile, B., Bozzali, M., & Di Natale, C. (2014). Speech emotion recognition using amplitude modulation parameters and a combined feature selection procedure. Knowledge-Based Systems, 63, 68\u201381.","journal-title":"Knowledge-Based Systems"},{"key":"9491_CR92","doi-asserted-by":"crossref","unstructured":"Mirsamadi, S., Barsoum, E., & Zhang, C. (2017). Automatic speech emotion recognition using recurrent neural networks with local attention. In Proceedings of IEEE conference on ICASSP, pp.\u00a02227\u20132231.","DOI":"10.1109\/ICASSP.2017.7952552"},{"key":"9491_CR93","unstructured":"Mohanty, S., & Swain, B. K. (2010). Emotion recognition using fuzzy K-means from Oriya speech. In International Conference [ACCTA-2010] on Special Issue of IJCCT, Vol.\u00a01 Issue 2\u20134."},{"key":"9491_CR94","unstructured":"Montero, J. M., Guti\u00e9rrez-Arriola, J., Col\u00e1s, J., Enr\u00edquez, E., & Pardo, J. M. (1999). Analysis and modeling of emotional speech in Spanish. In Proceedings of international conference on phonetic sciences, pp.\u00a0957\u2013960."},{"key":"9491_CR95","doi-asserted-by":"publisher","first-page":"98","DOI":"10.1016\/j.specom.2006.11.004","volume":"49","author":"D Morrison","year":"2007","unstructured":"Morrison, D., Wang, R., & De Silva, L. C. (2007). Ensemble methods for spoken emotion recognition in call-centres. Speech Communication, 49, 98\u2013112.","journal-title":"Speech Communication"},{"key":"9491_CR96","doi-asserted-by":"publisher","first-page":"497","DOI":"10.1016\/S0950-7051(00)00070-8","volume":"13","author":"R Nakatsu","year":"2000","unstructured":"Nakatsu, R., Nicholson, J., & Tosa, N. (2000). Emotion recognition and its application to computer agents with spontaneous interactive capabilities. Knowledge-Based Systems, 13, 497\u2013504.","journal-title":"Knowledge-Based Systems"},{"key":"9491_CR97","doi-asserted-by":"publisher","first-page":"88","DOI":"10.1016\/j.csl.2016.05.001","volume":"41","author":"D Nandi","year":"2017","unstructured":"Nandi, D., Pati, D., & Rao, K. S. (2017). Parametric representation of excitation source information for language identification. Computer Speech and Language, 41, 88\u2013115.","journal-title":"Computer Speech and Language"},{"key":"9491_CR98","doi-asserted-by":"crossref","unstructured":"Neiberg, D., Elenius, K., & Laskowski, K. (2006). Emotion recognition in spontaneous speech using GMMs. In INTERSPEECH 2006, ICSLP, Pittsburgh, Pennsylvania, pp.\u00a0809\u2013812.","DOI":"10.21437\/Interspeech.2006-277"},{"key":"9491_CR99","unstructured":"New, T. L., Wei, F. S., & De Silva, L. C. (2001). Speech based emotion classification. In Proceedings of the IEEE region 10 international conference on electrical and electronic technology (TENCON), Phuket Island, Singapore, Vol.\u00a01, pp\u00a0297\u2013301."},{"key":"9491_CR100","doi-asserted-by":"publisher","first-page":"603","DOI":"10.1016\/S0167-6393(03)00099-2","volume":"41","author":"TL New","year":"2003","unstructured":"New, T. L., Wei, F. S., & De Silva, L. C. (2003). Speech emotion recognition using hidden Markov models. Speech Communication, 41, 603\u2013623.","journal-title":"Speech Communication"},{"key":"9491_CR101","first-page":"290","volume":"11","author":"J Nicholson","year":"2006","unstructured":"Nicholson, J., Takahashi, K., & Nakatsu, R. (2006). Emotion recognition in speech using neural networks. Neural Computing & Applications, 11, 290\u2013296.","journal-title":"Neural Computing & Applications"},{"key":"9491_CR102","doi-asserted-by":"crossref","unstructured":"Nogueiras, A., Marino, J. B., Moreno, A., & Bonafonte, A. (2001). Speech emotion recognition using hidden Markov models. In Proceedings of European conference on speech communication and technology (Eurospeech\u201901), Denmark.","DOI":"10.21437\/Eurospeech.2001-627"},{"key":"9491_CR103","doi-asserted-by":"publisher","first-page":"187","DOI":"10.1016\/j.specom.2004.09.003","volume":"44","author":"L Nordstrand","year":"2004","unstructured":"Nordstrand, L., Svanfeld, G., Granstrom, B., & House, D. (2004). Measurements of ariculatory variation in expressive speech for a set of Swedish vowels. Speech Communication, 44, 187\u2013196.","journal-title":"Speech Communication"},{"key":"9491_CR104","doi-asserted-by":"publisher","first-page":"5858","DOI":"10.1016\/j.eswa.2014.03.026","volume":"41","author":"CS Ooi","year":"2014","unstructured":"Ooi, C. S., Seng, K. P., Ang, L.-M., & Chew, L. W. (2014). A new approach of audio emotion recognition. Experts Systems with Applications, 41, 5858\u20135869.","journal-title":"Experts Systems with Applications"},{"key":"9491_CR105","doi-asserted-by":"crossref","unstructured":"Pao, T.-L., Chen, Y.-T., Yeh, J.-H., & Liao, W.-Y. (2005). Combining acoustic features for improved emotion recognition in Mandarin speech. In International conference on affective computing and intelligent interaction, pp.\u00a0279\u2013285.","DOI":"10.1007\/11573548_36"},{"key":"9491_CR106","unstructured":"Park, C.-H., & Sim, K.-B. (2003). Emotion recognition and acoustic analysis from speech signal. In Proceedings of the international joint conference on neural networks, pp.\u00a02594\u20132598."},{"key":"9491_CR107","unstructured":"Pereira, C. (2000). Dimensions of emotional meaning in speech. In Proceedings of ISCA workshop speech and emotion, Belfast, Vol.\u00a01, pp.\u00a025\u201328."},{"key":"9491_CR108","unstructured":"Petrushin, V. A. (1999). Emotion in speech: Recognition and application to call centers. In Proceedings of the 1999 conference on artificial neural networks in engineering (ANNIE 99)."},{"key":"9491_CR109","doi-asserted-by":"publisher","DOI":"10.1037\/e526112012-054","volume-title":"Affective computing","author":"RW Picard","year":"1997","unstructured":"Picard, R. W. (1997). Affective computing. Cambridge: The MIT Press."},{"key":"9491_CR110","doi-asserted-by":"publisher","first-page":"1175","DOI":"10.1109\/34.954607","volume":"23","author":"RW Picard","year":"2001","unstructured":"Picard, R. W., Vyzas, E., & Healey, J. (2001). Toward machine emotional intelligence: Analysis of affective physiological state. IEEE Transactions on Pattern Analysis and Machine Intelligence, 23, 1175\u20131191.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"9491_CR111","volume-title":"Cognition and emotion from order to disorder","author":"M Power","year":"2000","unstructured":"Power, M., & Dalgleish, T. (2000). Cognition and emotion from order to disorder. New York: Psychology Press."},{"key":"9491_CR112","doi-asserted-by":"crossref","unstructured":"Prasanna, S. R. M., & Govind, D. (2010). Analysis of excitation source information in emotional speech. In INTERSPEECH 2010, Makuhari, Chiba, Japan, pp.\u00a0781\u2013784.","DOI":"10.21437\/Interspeech.2010-284"},{"key":"9491_CR113","doi-asserted-by":"publisher","first-page":"1243","DOI":"10.1016\/j.specom.2006.06.002","volume":"48","author":"SRM Prasanna","year":"2006","unstructured":"Prasanna, S. R. M., Gupta, C. S., & Yegnanarayana, B. (2006). Extraction of speaker-specific excitation information from linear prediction residual of speech. Speech Communication, 48, 1243\u20131261.","journal-title":"Speech Communication"},{"issue":"4","key":"9491_CR114","doi-asserted-by":"publisher","first-page":"787","DOI":"10.1007\/s10772-017-9445-x","volume":"20","author":"D Pravena","year":"2017","unstructured":"Pravena, D., & Govind, D. (2017). Significance of incorporating excitation source parameters for improved emotion recognition from speech and electroglottographic signals. International Journal of Speech Technology, 20(4), 787\u2013797.","journal-title":"International Journal of Speech Technology"},{"key":"9491_CR115","doi-asserted-by":"publisher","first-page":"327","DOI":"10.1007\/s10772-017-9407-3","volume":"20","author":"D Pravena","year":"2017","unstructured":"Pravena, D., & Govind, D. (2017). Development of simulated emotion speech database for excitation source analysis. International Journal of Speech Technology, 20, 327\u2013338.","journal-title":"International Journal of Speech Technology"},{"key":"9491_CR116","unstructured":"Quiros-Ramirez, M. A., Polikovsky, S., Kameda, Y., & Onisawa, T. (2014). A spontaneous cross-cultural emotion database: Latin-America vs. Japan. In International conference on Kansei Engineering and emotion research, pp.\u00a01127\u20131134."},{"key":"9491_CR117","volume-title":"Fundamentals of speech recognition","author":"L Rabiner","year":"1993","unstructured":"Rabiner, L., & Juang, B.-H. (1993). Fundamentals of speech recognition. Englewood Cliffs: Prentice-Hall."},{"key":"9491_CR118","doi-asserted-by":"crossref","unstructured":"Rahurkar, M. A., & Hansen, J. H. (2002). Frequency band analysis for stress detection using a Teager energy operator based feature. Proceedings of International Conference on Spoken Language Processing (ICSLP\u2019), Vol.\u00a03, issue 02, pp.\u00a02021\u20132024.","DOI":"10.21437\/ICSLP.2002-555"},{"key":"9491_CR119","doi-asserted-by":"crossref","unstructured":"Ramamohan, S., & Dandapat, S. (2006). Sinusoidal model-based analysis and classification of stressed speech. In IEEE transactions on audio, speech and language processing, Vol.\u00a014, p.\u00a03.","DOI":"10.1109\/TSA.2005.858071"},{"issue":"4","key":"9491_CR120","first-page":"24","volume":"9","author":"KS Rao","year":"2011","unstructured":"Rao, K. S., & Koolagudi, S. G. (2011). Identification of Hindi dialects and emotions using spectral and prosodic features of speech. Systemics, Cybernetics, and Informatics, 9(4), 24\u201333.","journal-title":"Systemics, Cybernetics, and Informatics"},{"issue":"2","key":"9491_CR121","doi-asserted-by":"publisher","first-page":"143","DOI":"10.1007\/s10772-012-9172-2","volume":"16","author":"KS Rao","year":"2013","unstructured":"Rao, K. S., Koolagudi, S. G., & Vempada, R. R. (2013). Emotion recognition from speech using global and local prosodic features. International Journal of Speech Technology, 16(2), 143\u2013160.","journal-title":"International Journal of Speech Technology"},{"key":"9491_CR122","first-page":"3603","volume":"3","author":"KS Rao","year":"2012","unstructured":"Rao, K. S., Kumar, T. P., Anusha, K., Leela, B., Bhavana, I., & Gowtham, S. V. S. K. (2012). Emotion recognition from speech. International Journal of Computer Science and Information Technologies, 3, 3603\u20133607.","journal-title":"International Journal of Computer Science and Information Technologies"},{"key":"9491_CR123","doi-asserted-by":"publisher","first-page":"762","DOI":"10.1109\/LSP.2007.896454","volume":"14","author":"KS Rao","year":"2007","unstructured":"Rao, K. S., Prasanna, S. R. M., & Yegnanarayana, B. (2007). Determination of instants of significant excitation in speech using Hilbert envelope and group delay function. IEEE Signal Processing Letters, 14, 762\u2013765.","journal-title":"IEEE Signal Processing Letters"},{"key":"9491_CR124","doi-asserted-by":"crossref","unstructured":"Rao, K. S., & Yegnanarayana, B. (2006). Prosody modification using instants of significant excitation. In IEEE transactions on audio and speech, pp.\u00a0972\u2013980.","DOI":"10.1109\/TSA.2005.858051"},{"key":"9491_CR125","doi-asserted-by":"publisher","first-page":"315","DOI":"10.1016\/j.ipm.2008.09.003","volume":"45","author":"J Rong","year":"2009","unstructured":"Rong, J., Li, G., & Chen, Y. P. P. (2009). Acoustic feature selection for automatic emotion recognition from speech. Information Processing and Management, 45, 315\u2013328.","journal-title":"Information Processing and Management"},{"key":"9491_CR126","doi-asserted-by":"crossref","unstructured":"Rozgic, V., Ananthakrishnan, S., Saleem, S., Kumar, R., Vembu, A. N., & Prasad, R. (2012). Emotion recognition using acoustic and lexical features. In INTERSPEECH, Portland, USA.","DOI":"10.21437\/Interspeech.2012-118"},{"key":"9491_CR127","doi-asserted-by":"publisher","first-page":"1161","DOI":"10.1037\/h0077714","volume":"39","author":"JA Russell","year":"1980","unstructured":"Russell, J. A. (1980). A circumplex model of affect. Journal of Personality and Social Psychology, 39, 1161\u20131178.","journal-title":"Journal of Personality and Social Psychology"},{"key":"9491_CR128","doi-asserted-by":"publisher","first-page":"805","DOI":"10.1037\/0022-3514.76.5.805","volume":"76","author":"JA Russell","year":"1999","unstructured":"Russell, J. A., & Barrett, L. F. (1999). Core affect, prototypical emotional episodes, and other things called emotion: Dissecting the elephant. Journal of Personality and Social Psychology, 76, 805\u2013819.","journal-title":"Journal of Personality and Social Psychology"},{"key":"9491_CR129","doi-asserted-by":"publisher","first-page":"273","DOI":"10.1016\/0092-6566(77)90037-X","volume":"11","author":"JA Russell","year":"1977","unstructured":"Russell, J. A., & Mehrabian, A. (1977). Evidence for a three-factor theory of emotions. Journal of Research in Personality, 11, 273\u2013294.","journal-title":"Journal of Research in Personality"},{"key":"9491_CR130","doi-asserted-by":"publisher","first-page":"321","DOI":"10.1017\/CBO9780511806582.019","volume-title":"Feelings and emotions: The Amsterdam symposium","author":"P Salovey","year":"2004","unstructured":"Salovey, P., Kokkonen, M., Lopes, P., & Mayer, J. (2004). Emotional Intelligence: What do we know? In ASR Manstead, N. H. Frijda & A. H. Fischer (Eds.), Feelings and emotions: The Amsterdam symposium (pp.\u00a0321\u2013340). Cambridge: Cambridge University Press."},{"key":"9491_CR131","doi-asserted-by":"publisher","first-page":"379","DOI":"10.1037\/h0046234","volume":"69","author":"S Schachter","year":"1962","unstructured":"Schachter, S., & Singer, J. (1962). Cognitive, social, and physiological determinants of emotional state. Psychological Review, 69, 379\u2013399.","journal-title":"Psychological Review"},{"key":"9491_CR132","doi-asserted-by":"crossref","unstructured":"Scherer, K. R., Grandjean, D., Johnstone, T., Klasmeyer, G., & Banziger, T. (2002). Acoustic correlates of task load and stress. In Proceedings of international conference on spoken language processing (ICSLP\u201902), Colorado, Vol.\u00a03, pp.\u00a02017\u20132020.","DOI":"10.21437\/ICSLP.2002-554"},{"key":"9491_CR133","unstructured":"Schroder, M. (2000). Experimental study of affect bursts. In Proceedings of ISCA workshop speech and emotion, Vol.\u00a01, pp.\u00a0132\u2013137."},{"key":"9491_CR134","unstructured":"Schroder, M., & Grice, M. (2003). Expressing vocal effort in concatenative synthesis. In Proceedings of international conference on phonetic sciences (ICPhS\u201903), Barcelona, pp.\u00a02589\u20132592."},{"key":"9491_CR135","unstructured":"Schubert, E. (1999). Measurement and time series analysis of emotion in music, Ph.D dissertation, school of Music education, University of New South Wales, Sydeny, Australia."},{"key":"9491_CR136","unstructured":"Schuller, B., Rigoll, G., & Lang, M. (2003). Hidden Markov model based speech emotion recognition. In Proceedings of the International conference on multimedia and Expo, ICME."},{"key":"9491_CR137","unstructured":"Schuller, B., Rigoll, G., & Lang, M. (2004). Speech emotion recognition combining acoustic features and linguistis information in a hybrid support vector machine-belief network architecture. In Proceedings of international conference on acoustics, speech and signal processing (ICASSP\u201904), Vol.\u00a01, pp.\u00a0557\u2013560."},{"issue":"1","key":"9491_CR138","doi-asserted-by":"publisher","first-page":"215","DOI":"10.1007\/s00521-012-0814-8","volume":"23","author":"M Sheikhan","year":"2013","unstructured":"Sheikhan, M., Bejani, M., & Gharavian, D. (2013). Modular neural-SVM scheme for speech emotion recognition using ANOVA feature selection method. Neural Computing and Applications, 23(1), 215\u2013227.","journal-title":"Neural Computing and Applications"},{"key":"9491_CR139","doi-asserted-by":"publisher","first-page":"367","DOI":"10.1016\/S0167-6393(02)00049-3","volume":"39","author":"M Slaney","year":"2003","unstructured":"Slaney, M., & McRoberts, G. (2003). Babyears: A recognition system for affective vocalizations. Speech Comunnication, 39, 367\u2013384.","journal-title":"Speech Comunnication"},{"key":"9491_CR140","doi-asserted-by":"crossref","unstructured":"Song, P., Ou, S., Zheng, W., Jin, Y., & Zhao, L. (2016). Speech emotion recognition using transfer non-negative matrix factorization. In Proceedings of IEEE international conference ICASSP, pp.\u00a05180\u20135184.","DOI":"10.1109\/ICASSP.2016.7472665"},{"key":"9491_CR141","doi-asserted-by":"crossref","unstructured":"Sun, R., & Moore, E. (2011). Investigating glottal parameters and teager energy operators in emotion recognition. In Affective Computing and Intelligent Interaction, pp.\u00a0425\u2013434.","DOI":"10.1007\/978-3-642-24571-8_54"},{"key":"9491_CR142","doi-asserted-by":"crossref","unstructured":"Takahashi, K. (2004). Remarks on SVM-based emotion recognition from multi-modal bio-potential signals. In 13th IEEE international workshop on robot and human interactive communication, Roman.","DOI":"10.1109\/ROMAN.2004.1374736"},{"key":"9491_CR143","doi-asserted-by":"crossref","unstructured":"Tao, J., & Kang, Y. (2005). Features importance analysis for emotional speech classification. In Affective Computing and Intelligent Interaction, pp.\u00a0449\u2013457.","DOI":"10.1007\/11573548_58"},{"key":"9491_CR144","doi-asserted-by":"crossref","unstructured":"Tato, R., Santos, R., Kompe, R., & Pardo, J. M. (2002). Emotional space improves emotion recognition. In Proceedings of international conference on spoken language processing (ICSLP\u201902), Colorado, Vol.\u00a03, pp.\u00a02029\u20132032.","DOI":"10.21437\/ICSLP.2002-557"},{"key":"9491_CR145","volume-title":"Affect imagery and consciousness: The positive affects","author":"S Tomkins","year":"1962","unstructured":"Tomkins, S. (1962). Affect imagery and consciousness: The positive affects, Vol.\u00a01. New York: Springer."},{"key":"9491_CR146","unstructured":"University of Pennsylvania Linguistic Data Consortium. (2002). Emotional prosody speech and transcripts. Retrieved from http:\/\/www.Idc.upenn.edu\/Catalog\/CatalogEntry.jsp?CatalogId=LDC2002S28 ."},{"key":"9491_CR147","doi-asserted-by":"publisher","first-page":"1162","DOI":"10.1016\/j.specom.2006.04.003","volume":"48","author":"D Ververidis","year":"2006","unstructured":"Ververidis, D., & Kotropoulos, C. (2006). Emotional speech recognition: Resources, features and methods. Speech Communication, 48, 1162\u20131181.","journal-title":"Speech Communication"},{"key":"9491_CR148","doi-asserted-by":"crossref","unstructured":"Ververidis, D., Kotropoulos, C., & Pitas, I. (2004). Automatic emotional speech classification. In Proceedings of international conference on acoustics, speech and signal processing (ICASSP\u201904), Montreal, Vol.\u00a01, pp.\u00a0593\u2013596.","DOI":"10.1109\/ICASSP.2004.1326055"},{"key":"9491_CR149","doi-asserted-by":"crossref","unstructured":"Vidrascu, L., & Devillers, L. (2005). Detection of real-life emotions in call centers. In INTERSPEECH, Lisbon, Portugal, pp.\u00a01841\u20131844.","DOI":"10.21437\/Interspeech.2005-582"},{"key":"9491_CR150","unstructured":"Vogt, T., & Andr\u00e9, E. (2006). Improving automatic from speech via gender differentiation. In Proceedings of language resources and evaluation conference (LREC 2006), Genoa."},{"key":"9491_CR151","doi-asserted-by":"publisher","first-page":"270","DOI":"10.1109\/TASSP.1976.1162797","volume":"24","author":"H Wakita","year":"1976","unstructured":"Wakita, H. (1976). Residual energy of linear prediction to vowel and speaker recognition. IEEE Transactions on Acoustics, Speech, and Signal Processing, 24, 270\u2013271.","journal-title":"IEEE Transactions on Acoustics, Speech, and Signal Processing"},{"key":"9491_CR152","doi-asserted-by":"crossref","unstructured":"Wang, K., An, N., Li, B. N., Zhang, Y., & Li, L. (2015). Speech emotion recognition using Fourier parameters. IEEE Transcations on Affective Computing, 6(1), 69\u201375.","DOI":"10.1109\/TAFFC.2015.2392101"},{"key":"9491_CR153","doi-asserted-by":"crossref","unstructured":"Wang, Y., Du, S., & Zhan, Y. (2008). Adaptive and optimal classification of speech emotion recognition. In Fourth international conference on natural computation, pp.\u00a0407\u2013411.","DOI":"10.1109\/ICNC.2008.713"},{"key":"9491_CR154","unstructured":"Wang, Y., & Guan, L. (2004). An investigation of speech based human emotion recognition. In IEEE 6th workshop on multimedia signal processing."},{"key":"9491_CR155","first-page":"23","volume-title":"Fundamentals of speech synthesis and speech recognition: Basic concepts, state of the art, the future challenges","author":"S Werner","year":"1994","unstructured":"Werner, S., & Keller, E. (1994). Prosodic aspects of speech. In E. Keller (Ed.), Fundamentals of speech synthesis and speech recognition: Basic concepts, state of the art, the future challenges (pp.\u00a023\u201340). Chichester: Wiley."},{"issue":"5","key":"9491_CR156","doi-asserted-by":"publisher","first-page":"768","DOI":"10.1016\/j.specom.2010.08.013","volume":"53","author":"S Wu","year":"2011","unstructured":"Wu, S., Falk, T. H., & Chan, W.-Y. (2011). Automatic speech emotion recognition using modulation spectral features. Speech Communication, 53(5), 768\u2013785.","journal-title":"Speech Communication"},{"key":"9491_CR157","doi-asserted-by":"crossref","unstructured":"Wu, T., Yang, Y., Wu, Z., & Li, D. (2006). MASC: a speech corpus in mandarin for emotion analysis and affective speaker recognition. In Speaker and language recognition workshop.","DOI":"10.1109\/ODYSSEY.2006.248084"},{"key":"9491_CR158","doi-asserted-by":"crossref","unstructured":"Wu, W., Zheng, T. F., Xu, M.-X., & Bao, H.-J. (2006). Study on speaker verification on emotional speech. In INTERSPEECH\u201906, Piisburgh, Pennsylvania, pp.\u00a02102\u20132105.","DOI":"10.21437\/Interspeech.2006-191"},{"key":"9491_CR159","unstructured":"Wundt, W. (2013). An introduction to psychology. Read Books Ltd."},{"key":"9491_CR160","volume-title":"Emotion recognition using a data-driven fuzzy inference system","author":"J Yamagishi","year":"2003","unstructured":"Yamagishi, J., Onishi, K., Maskko, T., & Kobayashi, T. (2003). Emotion recognition using a data-driven fuzzy inference system. Geneva: Eurospeech."},{"issue":"5","key":"9491_CR161","first-page":"651","volume":"36","author":"B Yegnanarayana","year":"2011","unstructured":"Yegnanarayana, B., & Gangashetty, S. (2011). Epoch-based analysis of speech signals. S\u00afadhan\u00af a, 36(5), 651\u2013697.","journal-title":"S\u00afadhan\u00af a"},{"issue":"6","key":"9491_CR162","doi-asserted-by":"publisher","first-page":"1196","DOI":"10.1109\/TASL.2009.2016230","volume":"17","author":"B Yegnanarayana","year":"2009","unstructured":"Yegnanarayana, B., Swamy, R. K., & Murty, K. S. R. (2009). Determining mixing parameters from multispeaker data using speechspecific information. IEEE Transactions on Audio, Speech, and Language Processing, 17(6), 1196\u20131207.","journal-title":"IEEE Transactions on Audio, Speech, and Language Processing"},{"key":"9491_CR163","doi-asserted-by":"crossref","unstructured":"Yeh, L., & Chi, T. (2010). Spectro-temporal modulations for robust speech emotion recognition. In INTERSPEECH, Chiba, Japan, pp.\u00a0789\u2013792.","DOI":"10.21437\/Interspeech.2010-286"},{"key":"9491_CR164","doi-asserted-by":"crossref","unstructured":"Yildirim, S., Bulut, M., Lee, C. M., Kazemzadeh, A., Busso, C., Deng, Z., Lee, S., & Narayanan, S. (2004). An acoustic study of emotions expressed in speech. In Proceedings of International Conference on Spoken Language Processing (ICSLP\u201904), Korea, Vol.\u00a01, pp.\u00a02193\u20132196.","DOI":"10.21437\/Interspeech.2004-242"},{"key":"9491_CR165","first-page":"1743","volume":"4","author":"M You","year":"1997","unstructured":"You, M., Chen, C., Bu, J., Liu, J., & Tao, J. (1997). Getting started with susas: a speech under simulated and actual stress database. Eurospeech, 4, 1743\u20131746.","journal-title":"Eurospeech"},{"key":"9491_CR166","doi-asserted-by":"crossref","unstructured":"Yu, F., Chang, E., Xu, Y.-Q., & Shum, H.-Y. (2001). Emotion detection from speech to enrich multimedia content. In: Proceedings of IEEE Pacific-Rim Conference on Multimedia, Beijing, Vol.\u00a01, pp.\u00a0550\u2013557.","DOI":"10.1007\/3-540-45453-5_71"},{"key":"9491_CR167","doi-asserted-by":"crossref","unstructured":"Yuan, J., Shen, L., & Chen, F. (2002). The acoustic realization of anger, fear, joy and sadness in Chinese. In Proceedings of International Conference on Spoken Language Processing (ICSLP\u201902), Vol.\u00a03, pp.\u00a02025\u20132028.","DOI":"10.21437\/ICSLP.2002-556"},{"key":"9491_CR168","first-page":"457","volume-title":"Advances in neural networks. Lecture notes in computer science","author":"S Zhang","year":"2008","unstructured":"Zhang, S. (2008). Emotion recognition in Chinese natural speech by combining prosody and voice quality features. In Sun et al. (Ed.), Advances in neural networks. Lecture notes in computer science (pp.\u00a0457\u2013464). Berlin: Springer."},{"key":"9491_CR169","unstructured":"Zhang, T., Hasegawa-Johnson, M., & Levinson, S. E. (2004). Children\u2019s emotion recognition in an intelligent tutoring scenario. In Proceeding of the eighth European Conference on Speech Communication and Technology, INTERSPEECH."},{"key":"9491_CR170","first-page":"544","volume-title":"Human computer interaction, Part III, HCII","author":"A Zhu","year":"2007","unstructured":"Zhu, A., & Luo, Q. (2007). Study on speech emotion recognition system in E-learning. In J. Jacko (Ed.), Human computer interaction, Part III, HCII (pp.\u00a0544\u2013552). Berlin: Springer."}],"container-title":["International Journal of Speech Technology"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/article\/10.1007\/s10772-018-9491-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s10772-018-9491-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s10772-018-9491-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,6,30]],"date-time":"2024-06-30T16:27:40Z","timestamp":1719764860000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/s10772-018-9491-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,1,19]]},"references-count":170,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2018,3]]}},"alternative-id":["9491"],"URL":"https:\/\/doi.org\/10.1007\/s10772-018-9491-z","relation":{},"ISSN":["1381-2416","1572-8110"],"issn-type":[{"value":"1381-2416","type":"print"},{"value":"1572-8110","type":"electronic"}],"subject":[],"published":{"date-parts":[[2018,1,19]]},"assertion":[{"value":"13 April 2017","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 January 2018","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 January 2018","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}