{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,19]],"date-time":"2025-11-19T07:08:31Z","timestamp":1763536111265,"version":"3.37.3"},"reference-count":69,"publisher":"Springer Science and Business Media LLC","issue":"9","license":[{"start":{"date-parts":[[2023,4,6]],"date-time":"2023-04-06T00:00:00Z","timestamp":1680739200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,4,6]],"date-time":"2023-04-06T00:00:00Z","timestamp":1680739200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Circuits Syst Signal Process"],"published-print":{"date-parts":[[2023,9]]},"DOI":"10.1007\/s00034-023-02367-6","type":"journal-article","created":{"date-parts":[[2023,4,6]],"date-time":"2023-04-06T18:03:34Z","timestamp":1680804214000},"page":"5500-5522","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Improved Speech Emotion Recognition Using Channel-wise Global Head Pooling (CwGHP)"],"prefix":"10.1007","volume":"42","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1524-1790","authenticated-orcid":false,"given":"Krishna","family":"Chauhan","sequence":"first","affiliation":[]},{"given":"Kamalesh Kumar","family":"Sharma","sequence":"additional","affiliation":[]},{"given":"Tarun","family":"Varma","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,4,6]]},"reference":[{"issue":"11","key":"2367_CR1","doi-asserted-by":"crossref","first-page":"5681","DOI":"10.1007\/s00034-020-01429-3","volume":"39","author":"SB Alex","year":"2020","unstructured":"S.B. Alex, L. Mary, B.P. Babu, Attention and feature selection for automatic speech emotion recognition using utterance and syllable-level prosodic features. Circuits Syst. Signal Process. 39(11), 5681\u20135709 (2020)","journal-title":"Circuits Syst. Signal Process."},{"issue":"3","key":"2367_CR2","first-page":"11","volume":"6","author":"R Altrov","year":"2015","unstructured":"R. Altrov, H. Pajupuu, The influence of language and culture on the understanding of vocal emotions. Eesti ja soome-ugri keeleteaduse ajakiri. J. Est. Finno-Ugric Linguist. 6(3), 11\u201348 (2015)","journal-title":"Eesti ja soome-ugri keeleteaduse ajakiri. J. Est. Finno-Ugric Linguist."},{"key":"2367_CR3","doi-asserted-by":"crossref","first-page":"85327","DOI":"10.1109\/ACCESS.2019.2917470","volume":"7","author":"NN An","year":"2019","unstructured":"N.N. An, N.Q. Thanh, Y. Liu, Deep CNNS with self-attention for speaker identification. IEEE Access 7, 85327\u201385337 (2019)","journal-title":"IEEE Access"},{"issue":"11","key":"2367_CR4","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1007\/s10916-018-1088-1","volume":"42","author":"SM Anwar","year":"2018","unstructured":"S.M. Anwar, M. Majid, A. Qayyum, M. Awais, M. Alnowami, M.K. Khan, Medical image analysis using convolutional neural networks: a review. J. Med. Syst. 42(11), 1\u201313 (2018)","journal-title":"J. Med. Syst."},{"key":"2367_CR5","doi-asserted-by":"crossref","first-page":"71","DOI":"10.1007\/978-3-642-15184-2_6","volume-title":"Emotion-Oriented Systems","author":"A Batliner","year":"2011","unstructured":"A. Batliner, B. Schuller, D. Seppi, S. Steidl, L. Devillers, L. Vidrascu, T. Vogt, V. Aharonson, N. Amir, The automatic recognition of emotions in speech, in Emotion-Oriented Systems. (Springer, Berlin, 2011), pp.71\u201399"},{"key":"2367_CR6","doi-asserted-by":"crossref","first-page":"104886","DOI":"10.1016\/j.knosys.2019.104886","volume":"184","author":"A Bhavan","year":"2019","unstructured":"A. Bhavan, P. Chauhan, R.R. Shah et al., Bagged support vector machines for emotion recognition from speech. Knowl.-Based Syst. 184, 104886 (2019)","journal-title":"Knowl.-Based Syst."},{"key":"2367_CR7","doi-asserted-by":"crossref","unstructured":"F. Burkhardt, A. Paeschke, M. Rolfes, W.F. Sendlmeier, B. Weiss, A database of german emotional speech, in Ninth European Conference on Speech Communication and Technology (2005)","DOI":"10.21437\/Interspeech.2005-446"},{"issue":"4","key":"2367_CR8","doi-asserted-by":"crossref","first-page":"335","DOI":"10.1007\/s10579-008-9076-6","volume":"42","author":"C Busso","year":"2008","unstructured":"C. Busso, M. Bulut, C.-C. Lee, A. Kazemzadeh, E. Mower, S. Kim, J.N. Chang, S. Lee, S.S. Narayanan, IEMOCAP: interactive emotional dyadic motion capture database. Lang. Resour. Eval. 42(4), 335\u2013359 (2008)","journal-title":"Lang. Resour. Eval."},{"key":"2367_CR9","doi-asserted-by":"crossref","unstructured":"K. Chauhan, K.K. Sharma, T. Varma, Improved speech emotion recognition using modified mean cepstral features, in 2020 IEEE 17th India Council International Conference (INDICON) (IEEE, 2020), pp. 1\u20136","DOI":"10.1109\/INDICON49873.2020.9342495"},{"key":"2367_CR10","doi-asserted-by":"crossref","unstructured":"K. Chauhan, K.K. Sharma, T. Varma, Speech emotion recognition using convolution neural networks, in 2021 International Conference on Artificial Intelligence and Smart Systems (ICAIS) (IEEE, 2021), pp. 1176\u20131181","DOI":"10.1109\/ICAIS50930.2021.9395844"},{"key":"2367_CR11","first-page":"1","volume":"82","author":"K Chauhan","year":"2022","unstructured":"K. Chauhan, K.K. Sharma, T. Varma, A method for simplifying the spoken emotion recognition system using a shallow neural network and temporal feature stacking & pooling (TFSP). Multim. Tools Appl. 82, 1\u201319 (2022)","journal-title":"Multim. Tools Appl."},{"issue":"10","key":"2367_CR12","doi-asserted-by":"crossref","first-page":"1440","DOI":"10.1109\/LSP.2018.2860246","volume":"25","author":"M Chen","year":"2018","unstructured":"M. Chen, X. He, J. Yang, H. Zhang, 3-d convolutional recurrent neural networks with attention model for speech emotion recognition. IEEE Signal Process. Lett. 25(10), 1440\u20131444 (2018)","journal-title":"IEEE Signal Process. Lett."},{"issue":"1","key":"2367_CR13","doi-asserted-by":"crossref","first-page":"1261","DOI":"10.1007\/s11042-019-08222-8","volume":"79","author":"F Daneshfar","year":"2020","unstructured":"F. Daneshfar, S.J. Kabudian, Speech emotion recognition using discriminative dimension reduction by employing a modified quantum-behaved particle swarm optimization algorithm. Multim. Tools Appl. 79(1), 1261\u20131289 (2020)","journal-title":"Multim. Tools Appl."},{"issue":"3","key":"2367_CR14","doi-asserted-by":"crossref","first-page":"802","DOI":"10.1109\/TCYB.2017.2787717","volume":"49","author":"S Deb","year":"2018","unstructured":"S. Deb, S. Dandapat, Multiscale amplitude feature and significance of enhanced vocal tract information for emotion classification. IEEE Trans. Cybern. 49(3), 802\u2013815 (2018)","journal-title":"IEEE Trans. Cybern."},{"issue":"8","key":"2367_CR15","doi-asserted-by":"crossref","first-page":"59","DOI":"10.1007\/s00521-016-2712-y","volume":"29","author":"S Demircan","year":"2018","unstructured":"S. Demircan, H. Kahramanli, Application of fuzzy c-means clustering algorithm to spectral features for emotion classification from speech. Neural Comput. Appl. 29(8), 59\u201366 (2018)","journal-title":"Neural Comput. Appl."},{"issue":"2","key":"2367_CR16","doi-asserted-by":"crossref","first-page":"190","DOI":"10.1109\/TAFFC.2015.2457417","volume":"7","author":"F Eyben","year":"2015","unstructured":"F. Eyben, K.R. Scherer, B.W. Schuller, J. Sundberg, E. Andr\u00e9, C. Busso, L.Y. Devillers, J. Epps, P. Laukka, S.S. Narayanan et al., The Geneva minimalistic acoustic parameter set (GeMAPS) for voice research and affective computing. IEEE Trans. Affect. Comput. 7(2), 190\u2013202 (2015)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"2367_CR17","doi-asserted-by":"crossref","unstructured":"F. Eyben, F. Weninger, F. Gross, B. Schuller, Recent developments in opensmile, the munich open-source multimedia feature extractor, in Proceedings of the 21st ACM International Conference on Multimedia (2013), pp. 835\u2013838","DOI":"10.1145\/2502081.2502224"},{"issue":"1","key":"2367_CR18","doi-asserted-by":"crossref","first-page":"466","DOI":"10.1007\/s00034-020-01486-8","volume":"40","author":"M Fahad","year":"2021","unstructured":"M. Fahad, A. Deepak, G. Pradhan, J. Yadav et al., DNN-HMM-based speaker-adaptive emotion recognition using MFCC and epoch-based features. Circuits Syst. Signal Process. 40(1), 466\u2013489 (2021)","journal-title":"Circuits Syst. Signal Process."},{"key":"2367_CR19","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1007\/s00034-022-02068-6","volume":"41","author":"MS Fahad","year":"2022","unstructured":"M.S. Fahad, A. Ranjan, A. Deepak, G. Pradhan, Speaker adversarial neural network (SANN) for speaker-independent speech emotion recognition. Circuits Syst. Signal Process. 41, 1\u201323 (2022)","journal-title":"Circuits Syst. Signal Process."},{"issue":"5","key":"2367_CR20","doi-asserted-by":"crossref","first-page":"479","DOI":"10.3390\/e21050479","volume":"21","author":"N Hajarolasvadi","year":"2019","unstructured":"N. Hajarolasvadi, H. Demirel, 3d CNN-based speech emotion recognition using k-means clustering and spectrograms. Entropy 21(5), 479 (2019)","journal-title":"Entropy"},{"key":"2367_CR21","doi-asserted-by":"crossref","unstructured":"K. He, X. Zhang, S. Ren, J. Sun, Deep residual learning for image recognition, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2016), pp. 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"2367_CR22","doi-asserted-by":"crossref","first-page":"218","DOI":"10.1109\/TASLP.2021.3133196","volume":"30","author":"M Hou","year":"2022","unstructured":"M. Hou, Z. Zhang, Q. Cao, D. Zhang, G. Lu, Multi-view speech emotion recognition via collective relation construction. IEEE\/ACM Trans. Audio Speech Lang. Process. 30, 218\u2013229 (2022)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"2367_CR23","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2020.101894","volume":"59","author":"D Issa","year":"2020","unstructured":"D. Issa, M.F. Demirci, A. Yazici, Speech emotion recognition with deep convolutional neural networks. Biomed. Signal Process. Control 59, 101894 (2020)","journal-title":"Biomed. Signal Process. Control"},{"issue":"16","key":"2367_CR24","doi-asserted-by":"crossref","first-page":"23745","DOI":"10.1007\/s11042-020-09874-7","volume":"80","author":"R Jahangir","year":"2021","unstructured":"R. Jahangir, Y.W. Teh, F. Hanif, G. Mujtaba, Deep learning approaches for speech emotion recognition: State of the art and research challenges. Multim. Tools Appl. 80(16), 23745\u201323812 (2021)","journal-title":"Multim. Tools Appl."},{"issue":"2","key":"2367_CR25","doi-asserted-by":"crossref","first-page":"131","DOI":"10.1007\/s10772-012-9127-7","volume":"15","author":"M Kotti","year":"2012","unstructured":"M. Kotti, F. Patern\u00f2, Speaker-independent emotion recognition exploiting a psychologically-inspired binary cascade classification schema. Int. J. Speech Technol. 15(2), 131\u2013150 (2012)","journal-title":"Int. J. Speech Technol."},{"key":"2367_CR26","unstructured":"D. Krishna, A. Patil, Multimodal emotion recognition using cross-modal attention and 1d convolutional neural networks, in Interspeech (2020), pp. 4243\u20134247"},{"issue":"1","key":"2367_CR27","first-page":"183","volume":"20","author":"S Kwon","year":"2020","unstructured":"S. Kwon et al., A CNN-assisted enhanced audio signal processing for speech emotion recognition. Sensors 20(1), 183 (2020)","journal-title":"Sensors"},{"key":"2367_CR28","doi-asserted-by":"crossref","first-page":"107101","DOI":"10.1016\/j.asoc.2021.107101","volume":"102","author":"S Kwon","year":"2021","unstructured":"S. Kwon et al., Att-Net: enhanced emotion recognition system using lightweight self-attention module. Appl. Soft Comput. 102, 107101 (2021)","journal-title":"Appl. Soft Comput."},{"key":"2367_CR29","doi-asserted-by":"crossref","first-page":"107519","DOI":"10.1016\/j.apacoust.2020.107519","volume":"170","author":"S Lalitha","year":"2020","unstructured":"S. Lalitha, D. Gupta, M. Zakariah, Y.A. Alotaibi, Investigation of multilingual and mixed-lingual emotion recognition using enhanced cues with data augmentation. Appl. Acoust. 170, 107519 (2020)","journal-title":"Appl. Acoust."},{"key":"2367_CR30","doi-asserted-by":"crossref","first-page":"992","DOI":"10.1109\/TAFFC.2020.2983669","volume":"13","author":"S Latif","year":"2020","unstructured":"S. Latif, R. Rana, S. Khalifa, R. Jurdak, J. Epps, B.W. Schuller, Multi-task semi-supervised adversarial autoencoding for speech emotion recognition. IEEE Trans. Affect. Comput. 13, 992\u20131004 (2020)","journal-title":"IEEE Trans. Affect. Comput."},{"issue":"2","key":"2367_CR31","doi-asserted-by":"crossref","first-page":"293","DOI":"10.1109\/TSA.2004.838534","volume":"13","author":"CM Lee","year":"2005","unstructured":"C.M. Lee, S.S. Narayanan, Toward detecting emotions in spoken dialogs. IEEE Trans. Speech Audio Process. 13(2), 293\u2013303 (2005)","journal-title":"IEEE Trans. Speech Audio Process."},{"key":"2367_CR32","doi-asserted-by":"crossref","first-page":"114683","DOI":"10.1016\/j.eswa.2021.114683","volume":"173","author":"D Li","year":"2021","unstructured":"D. Li, J. Liu, Z. Yang, L. Sun, Z. Wang, Speech emotion recognition using recurrent neural networks with directional self-attention. Expert Syst. Appl. 173, 114683 (2021)","journal-title":"Expert Syst. Appl."},{"key":"2367_CR33","doi-asserted-by":"crossref","unstructured":"P. Li, Y. Song, I.V. McLoughlin, W. Guo, L.-R. Dai, An attention pooling based representation learning method for speech emotion recognition (2018)","DOI":"10.21437\/Interspeech.2018-1242"},{"key":"2367_CR34","doi-asserted-by":"crossref","unstructured":"X. Li, J. Tao, M.T. Johnson, J. Soltis, A. Savage, K.M. Leong, J.D. Newman, Stress and emotion classification using jitter and shimmer features, in 2007 IEEE International Conference on Acoustics, Speech and Signal Processing-ICASSP\u201907, vol.\u00a04 (IEEE, 2007), pp. IV\u20131081","DOI":"10.1109\/ICASSP.2007.367261"},{"issue":"5","key":"2367_CR35","doi-asserted-by":"crossref","first-page":"e0196391","DOI":"10.1371\/journal.pone.0196391","volume":"13","author":"SR Livingstone","year":"2018","unstructured":"S.R. Livingstone, F.A. Russo, The Ryerson audio-visual database of emotional speech and song (RAVDESS): a dynamic, multimodal set of facial and vocal expressions in North American English. PLoS ONE 13(5), e0196391 (2018)","journal-title":"PLoS ONE"},{"issue":"3","key":"2367_CR36","first-page":"574","volume":"58","author":"L-SA Low","year":"2010","unstructured":"L.-S.A. Low, N.C. Maddage, M. Lech, L.B. Sheeber, N.B. Allen, Detection of clinical depression in adolescents\u2019 speech during family interactions. IEEE Trans. Biomed. Eng. 58(3), 574\u2013586 (2010)","journal-title":"IEEE Trans. Biomed. Eng."},{"key":"2367_CR37","doi-asserted-by":"crossref","first-page":"125868","DOI":"10.1109\/ACCESS.2019.2938007","volume":"7","author":"H Meng","year":"2019","unstructured":"H. Meng, T. Yan, F. Yuan, H. Wei, Speech emotion recognition from 3d log-mel spectrograms with deep learning network. IEEE Access 7, 125868\u2013125881 (2019)","journal-title":"IEEE Access"},{"issue":"2","key":"2367_CR38","doi-asserted-by":"crossref","first-page":"109","DOI":"10.1007\/s10772-009-9023-y","volume":"10","author":"W Minker","year":"2007","unstructured":"W. Minker, J. Pittermann, A. Pittermann, P.-M. Strau\u00df, D. B\u00fchler, Challenges in speech-based human\u2013computer interfaces. Int. J. Speech Technol. 10(2), 109\u2013119 (2007)","journal-title":"Int. J. Speech Technol."},{"key":"2367_CR39","doi-asserted-by":"crossref","unstructured":"S. Mirsamadi, E. Barsoum, C. Zhang, Automatic speech emotion recognition using recurrent neural networks with local attention, in 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (IEEE, 2017), pp. 2227\u20132231","DOI":"10.1109\/ICASSP.2017.7952552"},{"issue":"2","key":"2367_CR40","doi-asserted-by":"crossref","first-page":"98","DOI":"10.1016\/j.specom.2006.11.004","volume":"49","author":"D Morrison","year":"2007","unstructured":"D. Morrison, R. Wang, L.C. De Silva, Ensemble methods for spoken emotion recognition in call-centres. Speech Commun. 49(2), 98\u2013112 (2007)","journal-title":"Speech Commun."},{"key":"2367_CR41","doi-asserted-by":"crossref","unstructured":"Mustaqeem, S. Kwon, MLT-DNet: speech emotion recognition using 1d dilated CNN based on multi-learning trick approach. Expert Syst. Appl. 167, 114177 (2021)","DOI":"10.1016\/j.eswa.2020.114177"},{"key":"2367_CR42","doi-asserted-by":"crossref","first-page":"102763","DOI":"10.1016\/j.dsp.2020.102763","volume":"104","author":"S Nagarajan","year":"2020","unstructured":"S. Nagarajan, S.S.S. Nettimi, L.S. Kumar, M.K. Nath, A. Kanhe, Speech emotion recognition using cepstral features extracted with novel triangular filter banks based on bark and erb frequency scales. Digital Signal Process. 104, 102763 (2020)","journal-title":"Digital Signal Process."},{"key":"2367_CR43","doi-asserted-by":"crossref","unstructured":"P. Nantasri, E. Phaisangittisagul, J. Karnjana, S. Boonkla, S. Keerativittayanun, A. Rugchatjaroen, S. Usanavasin, T. Shinozaki, A light-weight artificial neural network for speech emotion recognition using average values of mfccs and their derivatives, in 2020 17th International Conference on Electrical Engineering\/Electronics, Computer, Telecommunications and Information Technology (ECTI-CON) (IEEE, 2020), pp. 41\u201344","DOI":"10.1109\/ECTI-CON49241.2020.9158221"},{"key":"2367_CR44","doi-asserted-by":"crossref","unstructured":"A. Nediyanchath, P. Paramasivam, P. Yenigalla, Multi-head attention for speech emotion recognition with auxiliary learning of gender recognition, in ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (IEEE, 2020), pp. 7179\u20137183","DOI":"10.1109\/ICASSP40776.2020.9054073"},{"key":"2367_CR45","doi-asserted-by":"crossref","unstructured":"M. Neumann, N.T. Vu, Attentive convolutional neural network based speech emotion recognition: a study on the impact of input features, signal length, and acted speech. arXiv:1706.00612 (2017)","DOI":"10.21437\/Interspeech.2017-917"},{"issue":"13","key":"2367_CR46","doi-asserted-by":"crossref","first-page":"5858","DOI":"10.1016\/j.eswa.2014.03.026","volume":"41","author":"CS Ooi","year":"2014","unstructured":"C.S. Ooi, K.P. Seng, L.-M. Ang, L.W. Chew, A new approach of audio emotion recognition. Expert Syst. Appl. 41(13), 5858\u20135869 (2014)","journal-title":"Expert Syst. Appl."},{"key":"2367_CR47","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2021.103173","volume":"71","author":"SK Pandey","year":"2022","unstructured":"S.K. Pandey, H.S. Shekhawat, S. Prasanna, Attention gated tensor neural network architectures for speech emotion recognition. Biomed. Signal Process. Control 71, 103173 (2022)","journal-title":"Biomed. Signal Process. Control"},{"key":"2367_CR48","doi-asserted-by":"crossref","first-page":"2697","DOI":"10.1109\/TASLP.2020.3023632","volume":"28","author":"S Parthasarathy","year":"2020","unstructured":"S. Parthasarathy, C. Busso, Semi-supervised speech emotion recognition with ladder networks. IEEE\/ACM Trans. Audio Speech Lang. Process. 28, 2697\u20132709 (2020)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"2367_CR49","doi-asserted-by":"crossref","unstructured":"N.-Q. Pham, T.-S. Nguyen, J. Niehues, M. M\u00fcller, S. St\u00fcker, A. Waibel, Very deep self-attention networks for end-to-end speech recognition. arXiv:1904.13377 (2019)","DOI":"10.21437\/Interspeech.2019-2702"},{"issue":"12","key":"2367_CR50","doi-asserted-by":"crossref","first-page":"2263","DOI":"10.1109\/TASLP.2016.2602884","volume":"24","author":"Y Qian","year":"2016","unstructured":"Y. Qian, M. Bi, T. Tan, K. Yu, Very deep convolutional neural networks for noise robust speech recognition. IEEE\/ACM Trans. Audio Speech Lang. Process. 24(12), 2263\u20132276 (2016)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"2367_CR51","doi-asserted-by":"crossref","first-page":"79861","DOI":"10.1109\/ACCESS.2020.2990405","volume":"8","author":"M Sajjad","year":"2020","unstructured":"M. Sajjad, S. Kwon et al., Clustering-based speech emotion recognition by incorporating learned features and deep BiLSTM. IEEE Access 8, 79861\u201379875 (2020)","journal-title":"IEEE Access"},{"key":"2367_CR52","doi-asserted-by":"crossref","unstructured":"M. Sarma, P. Ghahremani, D. Povey, N.K. Goel, K.K. Sarma, N. Dehak, Emotion identification from raw speech signals using DNNS, in Interspeech (2018), pp. 3097\u20133101","DOI":"10.21437\/Interspeech.2018-1353"},{"key":"2367_CR53","doi-asserted-by":"crossref","first-page":"80","DOI":"10.1016\/j.bspc.2014.10.008","volume":"18","author":"Y Sun","year":"2015","unstructured":"Y. Sun, G. Wen, J. Wang, Weighted spectral features based on local hu moments for speech emotion recognition. Biomed. Signal Process. Control 18, 80\u201390 (2015)","journal-title":"Biomed. Signal Process. Control"},{"key":"2367_CR54","doi-asserted-by":"crossref","unstructured":"C. Szegedy, W. Liu, Y. Jia, P. Sermanet, S. Reed, D. Anguelov, D. Erhan, V. Vanhoucke, A. Rabinovich, Going deeper with convolutions, in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"2367_CR55","doi-asserted-by":"crossref","unstructured":"L. Tarantino, P.N. Garner, A. Lazaridis, et\u00a0al. Self-attention for speech emotion recognition, in Interspeech (2019), pp. 2578\u20132582","DOI":"10.21437\/Interspeech.2019-2822"},{"key":"2367_CR56","doi-asserted-by":"crossref","unstructured":"G. Trigeorgis, F. Ringeval, R. Brueckner, E. Marchi, M.A. Nicolaou, B. Schuller, S. Zafeiriou, Adieu features? End-to-end speech emotion recognition using a deep convolutional recurrent network, in 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (IEEE, 2016), pp. 5200\u20135204","DOI":"10.1109\/ICASSP.2016.7472669"},{"key":"2367_CR57","doi-asserted-by":"crossref","first-page":"106547","DOI":"10.1016\/j.knosys.2020.106547","volume":"211","author":"T Tuncer","year":"2021","unstructured":"T. Tuncer, S. Dogan, U.R. Acharya, Automated accurate speech emotion recognition system using twine shuffle pattern and iterative neighborhood component analysis techniques. Knowl.-Based Syst. 211, 106547 (2021)","journal-title":"Knowl.-Based Syst."},{"key":"2367_CR58","unstructured":"A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A.N. Gomez, \u0141. Kaiser, I. Polosukhin, Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"2367_CR59","doi-asserted-by":"crossref","unstructured":"X. Wu, S. Liu, Y. Cao, X. Li, J. Yu, D. Dai, X. Ma, S. Hu, Z. Wu, X. Liu, et\u00a0al. Speech emotion recognition using capsule networks, in ICASSP 2019\u20142019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (IEEE, 2019), pp. 6695\u20136699","DOI":"10.1109\/ICASSP.2019.8683163"},{"issue":"1","key":"2367_CR60","doi-asserted-by":"crossref","first-page":"3","DOI":"10.1109\/TAFFC.2015.2512598","volume":"8","author":"R Xia","year":"2015","unstructured":"R. Xia, Y. Liu, A multi-task learning framework for emotion recognition using 2d continuous space. IEEE Trans. Affect. Comput. 8(1), 3\u201314 (2015)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"2367_CR61","doi-asserted-by":"crossref","unstructured":"M. Xu, F. Zhang, X. Cui, W. Zhang, Speech emotion recognition with multiscale area attention and data augmentation, in ICASSP 2021\u20142021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (IEEE, 2021), pp. 6319\u20136323","DOI":"10.1109\/ICASSP39728.2021.9414635"},{"key":"2367_CR62","doi-asserted-by":"crossref","unstructured":"Y. Xu, H. Xu, J. Zou, HGFM: A hierarchical grained and feature model for acoustic emotion recognition, in ICASSP 2020\u20142020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (IEEE, 2020), pp. 6499\u20136503","DOI":"10.1109\/ICASSP40776.2020.9053039"},{"issue":"5","key":"2367_CR63","doi-asserted-by":"crossref","first-page":"1415","DOI":"10.1016\/j.sigpro.2009.09.009","volume":"90","author":"B Yang","year":"2010","unstructured":"B. Yang, M. Lugger, Emotion recognition from speech signals using new harmony features. Signal Process. 90(5), 1415\u20131423 (2010)","journal-title":"Signal Process."},{"key":"2367_CR64","doi-asserted-by":"crossref","unstructured":"S. Yoon, S. Byun, K. Jung, Multimodal speech emotion recognition using audio and text, in 2018 IEEE Spoken Language Technology Workshop (SLT) (IEEE, 2018), pp. 112\u2013118","DOI":"10.1109\/SLT.2018.8639583"},{"issue":"3","key":"2367_CR65","doi-asserted-by":"crossref","first-page":"3705","DOI":"10.1007\/s11042-017-5539-3","volume":"78","author":"Y Zeng","year":"2019","unstructured":"Y. Zeng, H. Mao, D. Peng, Z. Yi, Spectrogram based multi-task audio classification. Multim. Tools Appl. 78(3), 3705\u20133722 (2019)","journal-title":"Multim. Tools Appl."},{"key":"2367_CR66","volume":"168","author":"J Zhang","year":"2022","unstructured":"J. Zhang, L. Xing, Z. Tan, H. Wang, K. Wang, Multi-head attention fusion networks for multi-modal speech emotion recognition. Comput. Ind. Eng. 168, 108078 (2022)","journal-title":"Comput. Ind. Eng."},{"issue":"6","key":"2367_CR67","doi-asserted-by":"crossref","first-page":"1576","DOI":"10.1109\/TMM.2017.2766843","volume":"20","author":"S Zhang","year":"2017","unstructured":"S. Zhang, S. Zhang, T. Huang, W. Gao, Speech emotion recognition using deep convolutional neural network and discriminant temporal pyramid matching. IEEE Trans. Multim. 20(6), 1576\u20131590 (2017)","journal-title":"IEEE Trans. Multim."},{"key":"2367_CR68","doi-asserted-by":"crossref","first-page":"312","DOI":"10.1016\/j.bspc.2018.08.035","volume":"47","author":"J Zhao","year":"2019","unstructured":"J. Zhao, X. Mao, L. Chen, Speech emotion recognition using deep 1d & 2d CNN LSTM networks. Biomed. Signal Process. Control 47, 312\u2013323 (2019)","journal-title":"Biomed. Signal Process. Control"},{"key":"2367_CR69","doi-asserted-by":"crossref","first-page":"695","DOI":"10.1109\/TASLP.2022.3145287","volume":"30","author":"Y Zhou","year":"2022","unstructured":"Y. Zhou, X. Liang, Y. Gu, Y. Yin, L. Yao, Multi-classifier interactive learning for ambiguous speech emotion recognition. IEEE\/ACM Trans. Audio Speech Lang. Process. 30, 695\u2013705 (2022)","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."}],"container-title":["Circuits, Systems, and Signal Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00034-023-02367-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00034-023-02367-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00034-023-02367-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,7,28]],"date-time":"2023-07-28T11:08:55Z","timestamp":1690542535000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00034-023-02367-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,4,6]]},"references-count":69,"journal-issue":{"issue":"9","published-print":{"date-parts":[[2023,9]]}},"alternative-id":["2367"],"URL":"https:\/\/doi.org\/10.1007\/s00034-023-02367-6","relation":{},"ISSN":["0278-081X","1531-5878"],"issn-type":[{"type":"print","value":"0278-081X"},{"type":"electronic","value":"1531-5878"}],"subject":[],"published":{"date-parts":[[2023,4,6]]},"assertion":[{"value":"1 July 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 March 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 March 2023","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 April 2023","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"In research involving human participants, all procedures were carried out in compliance with ethical guidelines.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Human and animal rights"}}]}}