{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,25]],"date-time":"2026-02-25T13:35:18Z","timestamp":1772026518158,"version":"3.50.1"},"reference-count":37,"publisher":"Springer Science and Business Media LLC","issue":"7","license":[{"start":{"date-parts":[[2020,11,16]],"date-time":"2020-11-16T00:00:00Z","timestamp":1605484800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,11,16]],"date-time":"2020-11-16T00:00:00Z","timestamp":1605484800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"published-print":{"date-parts":[[2021,3]]},"DOI":"10.1007\/s11042-020-10118-x","type":"journal-article","created":{"date-parts":[[2020,11,16]],"date-time":"2020-11-16T10:06:18Z","timestamp":1605521178000},"page":"9961-9992","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":41,"title":["RETRACTED ARTICLE: Performance of deer hunting optimization based deep learning algorithm for speech emotion recognition"],"prefix":"10.1007","volume":"80","author":[{"given":"Gaurav","family":"Agarwal","sequence":"first","affiliation":[]},{"given":"Hari","family":"Om","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,11,16]]},"reference":[{"key":"10118_CR1","doi-asserted-by":"publisher","unstructured":"Al-Anzi F, Zeina DA (2018) Literature survey of Arabic speech recognition. In: 2018 International Conference on Computing Sciences and Engineering (ICCSE), Kuwait City, pp 1\u20136. https:\/\/doi.org\/10.1109\/ICCSE1.2018.8374215","DOI":"10.1109\/ICCSE1.2018.8374215"},{"issue":"4","key":"10118_CR2","first-page":"31","volume":"10","author":"MN Arafa","year":"2018","unstructured":"Arafa MN, Elbarougy R, Ewees AA, Behery GM (2018) A dataset for speech recognition to support Arabic phoneme pronunciation. Int J Image Graph Signal Proc 10(4):31","journal-title":"Int J Image Graph Signal Proc"},{"issue":"1","key":"10118_CR3","doi-asserted-by":"publisher","first-page":"98","DOI":"10.1121\/1.5017834","volume":"143","author":"V Arora","year":"2018","unstructured":"Arora V, Lahiri A, Reetz H (2018) Phonological feature-based speech recognition system for pronunciation training in non-native language learning. J Acoust Soc Am 143(1):98\u2013108","journal-title":"J Acoust Soc Am"},{"key":"10118_CR4","doi-asserted-by":"publisher","unstructured":"Awan SK, Dunoyer EJ, Genuario KE, Levy AC, O'Connor KP, Serhatli S, Gerling GJ (2018) Using voice recognition enabled smartwatches to improve nurse documentation. In: 2018 Systems and Information Engineering Design Symposium (SIEDS), Charlottesville, VA, pp 159\u2013164. https:\/\/doi.org\/10.1109\/SIEDS.2018.8374728","DOI":"10.1109\/SIEDS.2018.8374728"},{"issue":"5","key":"10118_CR5","doi-asserted-by":"publisher","first-page":"5571","DOI":"10.1007\/s11042-017-5292-7","volume":"78","author":"MA Badshah","year":"2019","unstructured":"Badshah MA, Rahim N, Ullah N, Ahmad J, Muhammad K, Lee MY, Kwon S, Baik SW (2019) Deep features-based speech emotion recognition for smart effective services. Multimed Tools Appl 78(5):5571\u20135589","journal-title":"Multimed Tools Appl"},{"key":"10118_CR6","doi-asserted-by":"crossref","unstructured":"Barker J, Watanabe S, Vincent E, Trmal J (2018) The fifth 'CHiME' speech separation and recognition challenge: dataset, task and baselines. arXiv preprint arXiv:1803.10609","DOI":"10.21437\/Interspeech.2018-1768"},{"key":"10118_CR7","doi-asserted-by":"crossref","unstructured":"Bernal E, Castillo O, Soria J, Valdez F (2018) Galactic swarm optimization with adaptation of parameters using fuzzy logic for the optimization of mathematical functions. In: Fuzzy Logic Augmentation of Neural and Optimization Algorithms: Theoretical Aspects and Real Applications. Springer,\u00a0Cham,\u00a0vol. 749, no. 1, pp\u00a0131\u2013140","DOI":"10.1007\/978-3-319-71008-2_11"},{"key":"10118_CR8","doi-asserted-by":"publisher","first-page":"104886","DOI":"10.1016\/j.knosys.2019.104886","volume":"184","author":"A Bhavan","year":"2019","unstructured":"Bhavan A, Chauhan P, Shah RR (2019) Bagged support vector machines for emotion recognition from speech. Knowl-Based Syst 184:104886","journal-title":"Knowl-Based Syst"},{"key":"10118_CR9","doi-asserted-by":"crossref","unstructured":"Brammya G, Praveena S, Ninu Preetha NS, Ramya R, Rajakumar BR, Binu D (2019) Deer hunting optimization algorithm: a new nature-inspired meta-heuristic paradigm. Comput J","DOI":"10.1093\/comjnl\/bxy133"},{"key":"10118_CR10","doi-asserted-by":"publisher","first-page":"231","DOI":"10.1016\/j.asoc.2014.11.016","volume":"27","author":"K Daqrouq","year":"2015","unstructured":"Daqrouq K, Tutunji TA (2015) Speaker identification using vowels features through a combined method of formants, wavelets, and neural network classifiers. Appl Soft Comput 27:231\u2013239","journal-title":"Appl Soft Comput"},{"issue":"2","key":"10118_CR11","doi-asserted-by":"publisher","first-page":"285","DOI":"10.1002\/cae.21884","volume":"26","author":"KA Darabkh","year":"2018","unstructured":"Darabkh KA, Haddad L, Sweidan SZ, Hawa M, Saifan R, Alnabelsi SH (2018) An efficient speech recognition system for arm-disabled students based on isolated words. Comput Appl Eng Educ 26(2):285\u2013301","journal-title":"Comput Appl Eng Educ"},{"key":"10118_CR12","unstructured":"Gardini S (2018) Data preparation and improvement of NLP software modules for parametric speech synthesis"},{"issue":"2","key":"10118_CR13","doi-asserted-by":"publisher","first-page":"619","DOI":"10.1007\/s00366-018-0620-8","volume":"35","author":"GF Gomes","year":"2019","unstructured":"Gomes GF, da Cunha SS, Ancelotti AC (2019) A sunflower optimization (SFO) algorithm applied to damage identification on laminated composite plates. Eng Comput 35(2):619\u2013626","journal-title":"Eng Comput"},{"issue":"1","key":"10118_CR14","doi-asserted-by":"publisher","first-page":"2102","DOI":"10.1038\/s41467-018-04485-1","volume":"9","author":"N Gong","year":"2018","unstructured":"Gong N, Id\u00e9 T, Kim S, Boybat I, Sebastian A, Narayanan V, Ando T (2018) Signal and noise extraction from analog memory elements for neuromorphic computing. Nat Commun 9(1):2102","journal-title":"Nat Commun"},{"key":"10118_CR15","doi-asserted-by":"crossref","unstructured":"Gupta D, Bansal P, Choudhary K (2018) The state of the art of feature extraction techniques in speech recognition. In: Speech and Language Processing for Human-Machine Communications. Springer, Singapore, vol. 2, no. 1, pp 195\u2013207","DOI":"10.1007\/978-981-10-6626-9_22"},{"key":"10118_CR16","doi-asserted-by":"publisher","unstructured":"Hamsa S, Shahin I, Iraqi Y, Werghi N (2020) Emotion recognition from speech using wavelet packet transform Cochlear filter Bank and random Forest classifier. IEEE Access 8:96994\u201397006. https:\/\/doi.org\/10.1109\/ACCESS.2020.2991811","DOI":"10.1109\/ACCESS.2020.2991811"},{"issue":"1","key":"10118_CR17","first-page":"39","volume":"22","author":"VA Haridas","year":"2018","unstructured":"Haridas VA, Marimuthu R, Sivakumar VG (2018) A critical review and analysis of techniques of speech recognition: the road ahead. Int J Knowl-Based Intell Eng Syst 22(1):39\u201357","journal-title":"Int J Knowl-Based Intell Eng Syst"},{"key":"10118_CR18","first-page":"1","volume":"1","author":"CZ Huang","year":"2018","unstructured":"Huang CZ, Epps J (2018) An investigation of partition-based and phonetically-aware acoustic features for continuous emotion prediction from speech. IEEE Trans Affect Comput 1:1\u201311","journal-title":"IEEE Trans Affect Comput"},{"issue":"2","key":"10118_CR19","doi-asserted-by":"publisher","first-page":"233","DOI":"10.1093\/scan\/nsy001","volume":"13","author":"KN Karle","year":"2018","unstructured":"Karle KN, Ethofer T, Jacob H, Br\u00fcck C, Ml E, Lotze M, Nizielski S, Sch\u00fctz A, Wildgruber D, Kreifelts B (2018) Neurobiological correlates of emotional intelligence in voice and face perception networks. Soc Cogn Affect Neurosci 13(2):233\u2013244","journal-title":"Soc Cogn Affect Neurosci"},{"key":"10118_CR20","doi-asserted-by":"crossref","unstructured":"Koolagudi GS, Reddy R, Yadav J, Rao KS (2011) IITKGP-SEHSC: Hindi speech corpus for emotion analysis. In devices and communications (ICDeCom), 2011 international conference on IEEE 1-5","DOI":"10.1109\/ICDECOM.2011.5738540"},{"issue":"1","key":"10118_CR21","first-page":"183","volume":"20","author":"S Kwon","year":"2020","unstructured":"Kwon S (2020) A CNN-assisted enhanced audio signal processing for speech emotion recognition. Sensors 20(1):183","journal-title":"Sensors"},{"key":"10118_CR22","doi-asserted-by":"crossref","unstructured":"Latif S, Rana R, Khalifa S, Jurdak R, Epps J, Schuller BW (2020) Multi-task semi-supervised adversarial autoencoding for speech emotion recognition. IEEE Trans Affect Comput","DOI":"10.36227\/techrxiv.16689484"},{"issue":"2","key":"10118_CR23","doi-asserted-by":"publisher","first-page":"e4255","DOI":"10.1002\/cpe.4255","volume":"30","author":"J-C Liu","year":"2018","unstructured":"Liu J-C, Leu F-Y, Lin G-L, Susanto H (2018) An MFCC-based text-independent speaker identification system for access control. Concurr Comput Pract Exp 30(2):e4255","journal-title":"Concurr Comput Pract Exp"},{"issue":"5","key":"10118_CR24","doi-asserted-by":"publisher","first-page":"e0196391","DOI":"10.1371\/journal.pone.0196391","volume":"13","author":"SR Livingstone","year":"2018","unstructured":"Livingstone SR, Russo FA (2018) The Ryerson audio-visual database of emotional speech and Song (RAVDESS): a dynamic, multimodal set of facial and vocal expressions in north American English. PLoS One 13(5):e0196391","journal-title":"PLoS One"},{"key":"10118_CR25","doi-asserted-by":"crossref","unstructured":"Mannepalli K, Sastry PN, Suman M (2018) Analysis of emotion recognition system for Telugu using prosodic and formant features. In Speech and Language Processing for Human-Machine Communications. Springer, Singapore, pp 137\u2013144","DOI":"10.1007\/978-981-10-6626-9_15"},{"key":"10118_CR26","doi-asserted-by":"publisher","first-page":"17","DOI":"10.1016\/j.csl.2017.11.001","volume":"49","author":"SM Mirzaei","year":"2018","unstructured":"Mirzaei SM, Meshgi K, Kawahara T (2018) Exploiting automatic speech recognition errors to enhance partial and synchronized caption for facilitating second language listening. Comput Speech Lang 49:17\u201336","journal-title":"Comput Speech Lang"},{"key":"10118_CR27","doi-asserted-by":"publisher","first-page":"649","DOI":"10.1016\/j.asoc.2017.11.001","volume":"62","author":"L Moro-Vel\u00e1zquez","year":"2018","unstructured":"Moro-Vel\u00e1zquez L, G\u00f3mez-Garc\u00eda JA, Godino-Llorente JI, Villalba J, Orozco-Arroyave JR, Dehak N (2018) Analysis of speaker recognition methodologies and the influence of kinetic changes to automatically detect Parkinson's disease. Appl Soft Comput 62:649\u2013666","journal-title":"Appl Soft Comput"},{"key":"10118_CR28","doi-asserted-by":"publisher","first-page":"320","DOI":"10.1016\/j.apacoust.2018.11.028","volume":"146","author":"T \u00d6zseven","year":"2019","unstructured":"\u00d6zseven T (2019) A novel feature selection method for speech emotion recognition. Appl Acoust 146:320\u2013326","journal-title":"Appl Acoust"},{"key":"10118_CR29","unstructured":"Patel P, Chaudhari A, Kale R, Pund M (2017) Emotion recognition from speech with gaussian mixture models & via boosted GMM. Int J Res Sci Eng 3(2):47\u201353"},{"key":"10118_CR30","doi-asserted-by":"publisher","first-page":"66","DOI":"10.1109\/JSSC.2017.2752838","volume":"53","author":"M Price","year":"2018","unstructured":"Price M, Glass J, Chandrakasan AP (2018) A low-power speech recognizer and voice activity detector using deep neural networks. IEEE J Solid State Circuits 53:66\u201375","journal-title":"IEEE J Solid State Circuits"},{"key":"10118_CR31","doi-asserted-by":"publisher","first-page":"79861","DOI":"10.1109\/ACCESS.2020.2990405","volume":"8","author":"M Sajjad","year":"2020","unstructured":"Sajjad M, Kwon S (2020) Clustering-based speech emotion recognition by incorporating learned features and deep BiLSTM. IEEE Access 8:79861\u201379875","journal-title":"IEEE Access"},{"key":"10118_CR32","doi-asserted-by":"publisher","first-page":"373","DOI":"10.1109\/TAFFC.2018.2800046","volume":"11","author":"P Song","year":"2018","unstructured":"Song P, Zheng W (2018) Feature selection based transfer subspace learning for speech emotion recognition. IEEE Trans Affect Comput 11:373\u2013382","journal-title":"IEEE Trans Affect Comput"},{"issue":"1\/2","key":"10118_CR33","doi-asserted-by":"publisher","first-page":"14","DOI":"10.17743\/jaes.2019.0043","volume":"68","author":"N Vryzas","year":"2020","unstructured":"Vryzas N, Vrysis L, Matsiola M, Kotsakis R, Dimoulas C, Kalliris G (2020) Continuous speech emotion recognition with convolutional neural networks. J Audio Eng Soc 68(1\/2):14\u201324","journal-title":"J Audio Eng Soc"},{"key":"10118_CR34","doi-asserted-by":"crossref","unstructured":"Wang W-C, Pestana MH and Moutinho L (2018) The effect of emotions on brand recall by gender using voice emotion response with optimal data analysis. In: Innovative research methodologies in management.\u00a0Palgrave Macmillan, Cham, pp 103\u2013133","DOI":"10.1007\/978-3-319-64400-4_5"},{"issue":"3\u20134","key":"10118_CR35","doi-asserted-by":"publisher","first-page":"521","DOI":"10.1007\/s00779-019-01246-9","volume":"23","author":"P Wei","year":"2019","unstructured":"Wei P, Zhao Y (2019) A novel speech emotion recognition algorithm based on wavelet kernel sparse classifier in stacked deep auto-encoder model. Pers Ubiquit Comput 23(3\u20134):521\u2013529","journal-title":"Pers Ubiquit Comput"},{"key":"10118_CR36","doi-asserted-by":"publisher","first-page":"245","DOI":"10.1016\/j.engappai.2016.01.032","volume":"50","author":"Y Zhang","year":"2016","unstructured":"Zhang Y, Zhang E, Chen W (2016) Deep neural network for halftone image classification based on sparse auto-encoder. Eng Appl Artif Intell 50:245\u2013255","journal-title":"Eng Appl Artif Intell"},{"key":"10118_CR37","doi-asserted-by":"publisher","first-page":"312","DOI":"10.1016\/j.bspc.2018.08.035","volume":"47","author":"J Zhao","year":"2019","unstructured":"Zhao J, Mao X, Chen L (2019) Speech emotion recognition using deep 1D & 2D CNN LSTM networks. Biomed Signal Proc Control 47:312\u2013323","journal-title":"Biomed Signal Proc Control"}],"updated-by":[{"DOI":"10.1007\/s11042-024-19945-8","type":"retraction","label":"Retraction","source":"publisher","updated":{"date-parts":[[2024,7,27]],"date-time":"2024-07-27T00:00:00Z","timestamp":1722038400000}}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-020-10118-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-020-10118-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-020-10118-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,27]],"date-time":"2024-07-27T04:18:26Z","timestamp":1722053906000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-020-10118-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,11,16]]},"references-count":37,"journal-issue":{"issue":"7","published-print":{"date-parts":[[2021,3]]}},"alternative-id":["10118"],"URL":"https:\/\/doi.org\/10.1007\/s11042-020-10118-x","relation":{},"ISSN":["1380-7501","1573-7721"],"issn-type":[{"value":"1380-7501","type":"print"},{"value":"1573-7721","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020,11,16]]},"assertion":[{"value":"29 January 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 September 2020","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 October 2020","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 November 2020","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 July 2024","order":5,"name":"change_date","label":"Change Date","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"Correction","order":6,"name":"change_type","label":"Change Type","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"This article has been retracted. Please see the Retraction Notice for more detail:","order":7,"name":"change_details","label":"Change Details","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"https:\/\/doi.org\/10.1007\/s11042-024-19945-8","URL":"https:\/\/doi.org\/10.1007\/s11042-024-19945-8","order":8,"name":"change_details","label":"Change Details","group":{"name":"ArticleHistory","label":"Article History"}}]}}