{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T00:45:02Z","timestamp":1776041102371,"version":"3.50.1"},"reference-count":35,"publisher":"Springer Science and Business Media LLC","issue":"10","license":[{"start":{"date-parts":[[2021,2,5]],"date-time":"2021-02-05T00:00:00Z","timestamp":1612483200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,2,5]],"date-time":"2021-02-05T00:00:00Z","timestamp":1612483200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"published-print":{"date-parts":[[2021,4]]},"DOI":"10.1007\/s11042-020-10329-2","type":"journal-article","created":{"date-parts":[[2021,2,8]],"date-time":"2021-02-08T21:22:37Z","timestamp":1612819357000},"page":"15563-15587","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":58,"title":["Convolution neural network based automatic speech emotion recognition using Mel-frequency Cepstrum coefficients"],"prefix":"10.1007","volume":"80","author":[{"given":"Manju D.","family":"Pawar","sequence":"first","affiliation":[]},{"given":"Rajendra D.","family":"Kokate","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,2,5]]},"reference":[{"key":"10329_CR1","doi-asserted-by":"crossref","unstructured":"Alex SB, Mary L, Babu BP (2020) Attention and feature selection for automatic speech emotion recognition using utterance and syllable-level prosodic features. Circuits Syst Signal Process 39:5681\u20135709","DOI":"10.1007\/s00034-020-01429-3"},{"issue":"3","key":"10329_CR2","doi-asserted-by":"publisher","first-page":"572","DOI":"10.1016\/j.patcog.2010.09.020","volume":"44","author":"E Ayadi","year":"2011","unstructured":"Ayadi E, Moataz KMS, Karray F (2011) Survey on speech emotion recognition: features, classification schemes, and databases. Pattern Recogn 44(3):572\u2013587","journal-title":"Pattern Recogn"},{"issue":"2","key":"10329_CR3","first-page":"353","volume":"29","author":"SR Bandela","year":"2020","unstructured":"Bandela SR, Kumar TK (2020) Speech emotion recognition using unsupervised feature selection algorithms. Radio Eng 29(2):353","journal-title":"Radio Eng"},{"key":"10329_CR4","unstructured":"Berlin Dataset: http:\/\/emodb.bilderbar.info\/start.html. Accessed 11 Sept 2018"},{"issue":"2","key":"10329_CR5","first-page":"822","volume":"3","author":"HB Chauhan","year":"2015","unstructured":"Chauhan HB, Tanawala BA (2015) Comparative study of MFCC and LPC algorithms for Gujrati isolated word recognition. Int J Innov Res Comput Commun Eng 3(2):822\u2013826","journal-title":"Int J Innov Res Comput Commun Eng"},{"key":"10329_CR6","doi-asserted-by":"publisher","first-page":"32","DOI":"10.1016\/j.procs.2018.03.005","volume":"128","author":"R Errattahi","year":"2018","unstructured":"Errattahi R, Hannani AE, Ouahmane H (2018) Automatic speech recognition errors detection and correction: a review. Procedia Comput Sci 128:32\u201337","journal-title":"Procedia Comput Sci"},{"key":"10329_CR7","doi-asserted-by":"publisher","first-page":"60","DOI":"10.1016\/j.neunet.2017.02.013","volume":"92","author":"HM Fayek","year":"2017","unstructured":"Fayek HM, Lech M, Cavedon L (2017) Evaluating deep learning architectures for speech emotion recognition. Neural Netw 92:60\u201368","journal-title":"Neural Netw"},{"key":"10329_CR8","doi-asserted-by":"publisher","first-page":"230","DOI":"10.1016\/j.apacoust.2016.06.027","volume":"113","author":"EP Frigieri","year":"2016","unstructured":"Frigieri EP, Campos PHS, Paiva AP, Balestrassi PP, Ferreira JR, Ynoguti CA (2016) A mel-frequency cepstral coefficient-based approach for surface roughness diagnosis in hard turning using acoustic signals and gaussian mixture models. Appl Acoust 113:230\u2013237","journal-title":"Appl Acoust"},{"key":"10329_CR9","doi-asserted-by":"crossref","unstructured":"Fu L, Mao X, Chen L (2008) Relative speech emotion recognition based artificial neural network. 2008 IEEE Pacific-Asia workshop on computational intelligence and industrial application. IEEE, 2","DOI":"10.1109\/PACIIA.2008.355"},{"key":"10329_CR10","unstructured":"Glittas AX, Gopalakrishnan L (2020) A low latency modular-level deeply integrated MFCC feature extraction architecture for speech recognition. Integration 76:69\u201375"},{"issue":"4","key":"10329_CR11","doi-asserted-by":"publisher","first-page":"501","DOI":"10.1016\/j.jvoice.2018.01.012","volume":"33","author":"T Hakanpaa","year":"2019","unstructured":"Hakanpaa T, Waaramaa T, Laukkanen A-M (2019) Emotion recognition from singing voices using contemporary commercial music and classical styles. J Voice 33(4):501\u2013509","journal-title":"J Voice"},{"issue":"5","key":"10329_CR12","doi-asserted-by":"publisher","first-page":"358","DOI":"10.1631\/FITEE.1400323","volume":"16","author":"Z-W Huang","year":"2015","unstructured":"Huang Z-W, Xue W-T, Mao Q-R (2015) Speech emotion recognition with unsupervised feature learning. Front Information Technol Electron Eng 16(5):358\u2013366","journal-title":"Front Information Technol Electron Eng"},{"issue":"12","key":"10329_CR13","doi-asserted-by":"publisher","first-page":"2730","DOI":"10.3390\/s19122730","volume":"19","author":"W Jiang","year":"2019","unstructured":"Jiang W, Wang Z, Jin JS, Han X, Li C (2019) Speech emotion recognition with heterogeneous feature unification of deep neural network. Sensors 19(12):2730","journal-title":"Sensors"},{"key":"10329_CR14","doi-asserted-by":"publisher","first-page":"52","DOI":"10.1016\/j.specom.2016.11.005","volume":"86","author":"SR Kadiri","year":"2017","unstructured":"Kadiri SR, Yegnanarayana B (2017) Epoch extraction from emotional speech using single frequency filtering approach. Speech Comm 86:52\u201363","journal-title":"Speech Comm"},{"issue":"1","key":"10329_CR15","doi-asserted-by":"publisher","first-page":"45","DOI":"10.1007\/s10772-020-09672-4","volume":"23","author":"A Koduru","year":"2020","unstructured":"Koduru A, Valiveti HB, Budati AK (2020) Feature extraction algorithms to improve the speech emotion recognition rate. Int J Speech Technol 23(1):45\u201355","journal-title":"Int J Speech Technol"},{"issue":"1","key":"10329_CR16","first-page":"183","volume":"20","author":"S Kwon","year":"2020","unstructured":"Kwon S (2020) A CNN-assisted enhanced audio signal processing for speech emotion recognition. Sensors 20(1):183","journal-title":"Sensors"},{"key":"10329_CR17","doi-asserted-by":"publisher","first-page":"29","DOI":"10.1016\/j.procs.2015.10.020","volume":"70","author":"S Lalitha","year":"2015","unstructured":"Lalitha S, Geyasruti D, Narayanan R, M S (2015) Emotion detection using MFCC and cepstrum features. Procedia Comput Sci 70:29\u201335","journal-title":"Procedia Comput Sci"},{"key":"10329_CR18","doi-asserted-by":"publisher","first-page":"48720","DOI":"10.1109\/ACCESS.2020.2979799","volume":"8","author":"Q Li","year":"2020","unstructured":"Li Q, Yang Y, Lan T, Zhu H, Wei Q, Qiao F, Liu X, Yang H (2020) MSP-MFCC: energy-efficient MFCC feature extraction method with mixed-signal processing architecture for wearable speech recognition applications. IEEE Access 8:48720\u201348730","journal-title":"IEEE Access"},{"key":"10329_CR19","doi-asserted-by":"publisher","first-page":"271","DOI":"10.1016\/j.neucom.2017.07.050","volume":"273","author":"Z-T Liu","year":"2018","unstructured":"Liu Z-T, Wu M, Cao W-H, Mao J-W, Xu J-P, Tan G-Z (2018) Speech emotion recognition based on feature selection and extreme learning machine decision tree. Neurocomputing 273:271\u2013280","journal-title":"Neurocomputing"},{"key":"10329_CR20","unstructured":"Luefeng C, Su W, Feng Y, Wu M, She J, Hirota K (2019) Two-layer fuzzy multiple random Forest for speech emotion recognition in human-robot interaction. Inf Sci 509:150\u2013163"},{"key":"10329_CR21","doi-asserted-by":"crossref","unstructured":"Mirsamadi, Seyedmahdad, Barsoum E, Zhang C (2017) Automatic speech emotion recognition using recurrent neural networks with local attention. 2017 IEEE international conference on acoustics, speech and signal processing (ICASSP). IEEE","DOI":"10.1109\/ICASSP.2017.7952552"},{"key":"10329_CR22","unstructured":"Mohamed MM, Schuller BW (2020) Facing packet loss in deep speech emotion recognition. Electrical Engineering and Systems Science arXiv preprint arXiv:2005.07757, pp 1\u20134"},{"issue":"6\u20137","key":"10329_CR23","doi-asserted-by":"publisher","first-page":"2255","DOI":"10.1166\/jctn.2018.7447","volume":"15","author":"AM Nancy","year":"2018","unstructured":"Nancy AM, Kumar GS, Doshi P, Shaw S (2018) Audio based emotion recognition using Mel frequency Cepstral coefficient and support vector machine. J Comput Theor Nanosci 15(6\u20137):2255\u20132258","journal-title":"J Comput Theor Nanosci"},{"key":"10329_CR24","doi-asserted-by":"publisher","first-page":"320","DOI":"10.1016\/j.apacoust.2018.11.028","volume":"146","author":"T Ozseven","year":"2019","unstructured":"Ozseven T (2019) A novel feature selection method for speech emotion recognition. Appl Acoust 146:320\u2013326","journal-title":"Appl Acoust"},{"key":"10329_CR25","unstructured":"Rani P, Kakkar S, Rani S (2015) Speech recognition using neural network. International Journal of Computer Applications 2015(4):11\u201314"},{"key":"10329_CR26","first-page":"1402","volume":"3","author":"J Ravi","year":"2012","unstructured":"Ravi J, Raja KB (2012) Hybrid domain based face recognition system. Int J Advanced Network Appl 3:1402","journal-title":"Int J Advanced Network Appl"},{"issue":"3","key":"10329_CR27","doi-asserted-by":"publisher","first-page":"315","DOI":"10.1016\/j.ipm.2008.09.003","volume":"45","author":"J Rong","year":"2009","unstructured":"Rong J, Li G, Chen Y-PP (2009) Acoustic feature selection for automatic emotion recognition from speech. Inf Process Manag 45(3):315\u2013328","journal-title":"Inf Process Manag"},{"key":"10329_CR28","doi-asserted-by":"publisher","first-page":"79861","DOI":"10.1109\/ACCESS.2020.2990405","volume":"8","author":"M Sajjad","year":"2020","unstructured":"Sajjad M, Kwon S (2020) Clustering-based speech emotion recognition by incorporating learned features and deep BiLSTM. IEEE Access 8:79861\u201379875","journal-title":"IEEE Access"},{"key":"10329_CR29","first-page":"32","volume":"19","author":"M Sara","year":"2017","unstructured":"Sara M, Setayeshi S, Rabiee A (2017) Speech emotion recognition based on a modified brain emotional learning model. Biol Inspired Cogn Archit 19:32\u201338","journal-title":"Biol Inspired Cogn Archit"},{"key":"10329_CR30","doi-asserted-by":"publisher","first-page":"34","DOI":"10.1016\/j.specom.2016.07.010","volume":"83","author":"P Song","year":"2016","unstructured":"Song P, Zheng W, Ou S, Zhang X, Jin Y, Liu J, Yu Y (2016) Cross-corpus speech emotion recognition based on transfer non-negative matrix factorisation. Speech Comm 83:34\u201341","journal-title":"Speech Comm"},{"key":"10329_CR31","doi-asserted-by":"publisher","first-page":"80","DOI":"10.1016\/j.bspc.2014.10.008","volume":"18","author":"Y Sun","year":"2015","unstructured":"Sun Y, Wen G, Wang J (2015) Weighted spectral features based on local Hu moments for speech emotion recognition. Biomed Signal Process Control 18:80\u201390","journal-title":"Biomed Signal Process Control"},{"key":"10329_CR32","doi-asserted-by":"publisher","unstructured":"Yang S, Cao J, Wang J (2017) Acoustics recognition of construction equipments based on LPCC features and SVM. 2015 34th Chinese control conference (CCC). IEEE 2015. https:\/\/doi.org\/10.1109\/ChiCC.2015.7260254","DOI":"10.1109\/ChiCC.2015.7260254"},{"key":"10329_CR33","doi-asserted-by":"publisher","first-page":"291","DOI":"10.1016\/j.future.2017.10.002","volume":"81","author":"S Ying","year":"2018","unstructured":"Ying S, Xue-Ying Z (2018) Characteristics of human auditory model based on compensation of glottal features in speech emotion recognition. Futur Gener Comput Syst 81:291\u2013296","journal-title":"Futur Gener Comput Syst"},{"key":"10329_CR34","doi-asserted-by":"publisher","first-page":"149","DOI":"10.1016\/j.eswa.2016.10.035","volume":"69","author":"CK Yogesh","year":"2017","unstructured":"Yogesh CK, Hariharan M, Ngadiran R, Adom AH, Yaacob S, Berkai C, Polat K (2017) A new hybrid PSO assisted biogeography-based optimisation for emotion and stress recognition from speech signal. Expert Syst Appl 69:149\u2013158","journal-title":"Expert Syst Appl"},{"key":"10329_CR35","doi-asserted-by":"publisher","first-page":"312","DOI":"10.1016\/j.bspc.2018.08.035","volume":"47","author":"J Zhao","year":"2019","unstructured":"Zhao J, Mao X, Chen L (2019) Speech emotion recognition using deep 1D & 2D CNN LSTM networks. Biomed Signal Process Control 47:312\u2013323","journal-title":"Biomed Signal Process Control"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-020-10329-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-020-10329-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-020-10329-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,4,27]],"date-time":"2021-04-27T06:41:41Z","timestamp":1619505701000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-020-10329-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,2,5]]},"references-count":35,"journal-issue":{"issue":"10","published-print":{"date-parts":[[2021,4]]}},"alternative-id":["10329"],"URL":"https:\/\/doi.org\/10.1007\/s11042-020-10329-2","relation":{},"ISSN":["1380-7501","1573-7721"],"issn-type":[{"value":"1380-7501","type":"print"},{"value":"1573-7721","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,2,5]]},"assertion":[{"value":"18 March 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"30 November 2020","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 December 2020","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 February 2021","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}