{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T19:56:29Z","timestamp":1768334189995,"version":"3.49.0"},"reference-count":70,"publisher":"Springer Science and Business Media LLC","issue":"22","license":[{"start":{"date-parts":[[2023,3,8]],"date-time":"2023-03-08T00:00:00Z","timestamp":1678233600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,3,8]],"date-time":"2023-03-08T00:00:00Z","timestamp":1678233600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"published-print":{"date-parts":[[2023,9]]},"DOI":"10.1007\/s11042-023-14543-6","type":"journal-article","created":{"date-parts":[[2023,3,8]],"date-time":"2023-03-08T05:03:00Z","timestamp":1678251780000},"page":"33835-33863","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Impact of lockdown on Generation-Z: a fuzzy based multimodal emotion recognition approach using CNN"],"prefix":"10.1007","volume":"82","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6045-5352","authenticated-orcid":false,"given":"Sirshendu","family":"Hore","sequence":"first","affiliation":[]},{"given":"Tanmay","family":"Bhattacharya","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,3,8]]},"reference":[{"issue":"3","key":"14543_CR1","doi-asserted-by":"publisher","first-page":"92","DOI":"10.4018\/IJACI.2019070106","volume":"10","author":"MNY Ali","year":"2019","unstructured":"Ali MNY, Sarowar MG, Rahman ML, Chaki J, Dey N, Tavares JMR (2019) Adam deep learning with SOM for human sentiment classification. Int J Ambient Comput Intell (IJACI) 10(3):92\u2013116","journal-title":"Int J Ambient Comput Intell (IJACI)"},{"key":"14543_CR2","doi-asserted-by":"publisher","unstructured":"Alizadeh S, Fazel A (2017) Convolutional neural networks for facial expression recognition arXiv:1704:06756. https:\/\/doi.org\/10.48550\/arXiv.1704.06756","DOI":"10.48550\/arXiv.1704.06756"},{"key":"14543_CR3","doi-asserted-by":"publisher","unstructured":"Benitez-Quiroz CF, Srinivasan R, Martinez AM (2016) Emotional: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild. In: 2016 IEEE International Conference on Computer Vision and Pattern Recognition (CVPR), Las Vegas, pp 5562\u20135570. https:\/\/doi.org\/10.1109\/CVPR.2016.600","DOI":"10.1109\/CVPR.2016.600"},{"issue":"1","key":"14543_CR4","doi-asserted-by":"publisher","first-page":"98","DOI":"10.4018\/IJACI.2017010105","volume":"8","author":"A Boulmaiz","year":"2017","unstructured":"Boulmaiz A, Messadeg D, Doghmane N, Taleb-Ahmed A (2017) Design and implementation of a robust acoustic recognition system for waterbird species using TMS320C6713 DSK. Int J Ambient Comput Intell (IJACI) 8(1):98\u2013118","journal-title":"Int J Ambient Comput Intell (IJACI)"},{"key":"14543_CR5","unstructured":"Breuer R, Kimmel RA (2014) deep learning perspective on the origin of facial expressions. arXiv 2017, arXiv:1705.01842"},{"key":"14543_CR6","doi-asserted-by":"publisher","first-page":"138882","DOI":"10.1016\/j.scitotenv.2020.138882","volume":"728","author":"I Chakraborty","year":"2020","unstructured":"Chakraborty I, Maity P (2020) COVID-19 outbreak: migration, effects on society, global environment, and prevention. Sci Total Environ 728:138882. https:\/\/doi.org\/10.1016\/j.scitotenv.2020.138882","journal-title":"Sci Total Environ"},{"issue":"8","key":"14543_CR7","doi-asserted-by":"publisher","first-page":"1299","DOI":"10.5958\/0974-360X.2016.00247.X","volume":"9","author":"R Chandrasekar","year":"2016","unstructured":"Chandrasekar R, Khare N (2016) Review of Fuzzy Rule-Based Classification systems. Res J Pharm Tech 9(8):1299\u20131302. https:\/\/doi.org\/10.5958\/0974-360X.2016.00247.X","journal-title":"Res J Pharm Tech"},{"key":"14543_CR8","doi-asserted-by":"crossref","unstructured":"Chen C-R, Wong W-S, Chiu C-T (2010) A 0.64 mm 2 real-time cascade face detection design based on reduced two-field extraction. IEEE Trans Very Large Scale Integr (VLSI) Syst 19(11):1937\u20131948 20","DOI":"10.1109\/TVLSI.2010.2069575"},{"key":"14543_CR9","unstructured":"Covid-19 impact on young people and the youth sector (2020) Knowledge HUB: COVID-19 impact on the youth sector Council of Europe European Union.\u00a0https:\/\/pjp-eu.coe.int\/en\/web\/youth-partnership\/covid-19"},{"issue":"1","key":"14543_CR10","doi-asserted-by":"publisher","first-page":"32","DOI":"10.1109\/79.911197","volume":"18","author":"R Cowie","year":"2001","unstructured":"Cowie R, Douglas-Cowie E, Tsapatsoulis N, Votsis G, Kollias S, Fellenz W, Taylor JG (2001) Emotion recognition in human-computer interaction. IEEE Signal Process Mag 18(1):32\u201380. https:\/\/doi.org\/10.1109\/79.911197","journal-title":"IEEE Signal Process Mag"},{"issue":"49\u201351","key":"14543_CR11","first-page":"2003","volume":"180","author":"A Damasio","year":"2003","unstructured":"Damasio A (2003) Virtue in mind. New Sci 180(49\u201351):2003","journal-title":"New Sci"},{"key":"14543_CR12","doi-asserted-by":"crossref","DOI":"10.1093\/oso\/9780195112719.001.0001","volume-title":"The expression of the emotions in man and animals","author":"C Darwin","year":"1998","unstructured":"Darwin C, Prodger P (1998) The expression of the emotions in man and animals. Oxford University Press, Oxford"},{"issue":"2","key":"14543_CR13","doi-asserted-by":"publisher","first-page":"124","DOI":"10.1037\/h0030377","volume":"17","author":"P Ekman","year":"1971","unstructured":"Ekman P, Friesen WV (1971) Constants across cultures in the face and emotion. J Pers Soc Psychol 17(2):124\u2013129","journal-title":"J Pers Soc Psychol"},{"issue":"1","key":"14543_CR14","doi-asserted-by":"publisher","first-page":"259","DOI":"10.1016\/S0031-3203(02)00052-3","volume":"36","author":"B Fasel","year":"2003","unstructured":"Fasel B, Luettin J (2003) Automatic facial expression analysis: a survey. Pattern Recogn 36(1):259\u2013275","journal-title":"Pattern Recogn"},{"key":"14543_CR15","doi-asserted-by":"publisher","unstructured":"Fathallah A, Abdi L, Douik A (2017) Facial expression recognition via deep learning. In:\u00a02017 IEEE\/ACS 14th International Conference on Computer Systems and Applications (AICCSA). IEEE, pp 745\u2013750.\u00a0https:\/\/doi.org\/10.1109\/AICCSA.2017.124","DOI":"10.1109\/AICCSA.2017.124"},{"key":"14543_CR16","doi-asserted-by":"publisher","unstructured":"Fong SJ, Dey N, Chaki J (2020) Artificial intelligence for coronavirus outbreak, pp 23\u201345. https:\/\/doi.org\/10.1007\/978-981-15-5936-5_2","DOI":"10.1007\/978-981-15-5936-5_2"},{"key":"14543_CR17","doi-asserted-by":"publisher","first-page":"106282","DOI":"10.1016\/j.asoc.2020.106282","volume":"93","author":"SJ Fong","year":"2020","unstructured":"Fong SJ, Li G, Dey N, Crespo RG, Herrera-Viedma E (2020) Monte Carlo decision making under high uncertainty of novel coronavirus epidemic using hybridized deep learning and fuzzy rule induction. Appl Soft Comput 93:106282","journal-title":"Appl Soft Comput"},{"key":"14543_CR18","doi-asserted-by":"publisher","first-page":"255","DOI":"10.1177\/1754073918765660","volume":"10","author":"K Gasper","year":"2018","unstructured":"Gasper K (2018) Utilizing neutral affective states in research: theory, assessment, and recommendations. Emot Rev 10:255\u2013266. https:\/\/doi.org\/10.1177\/1754073918765660","journal-title":"Emot Rev"},{"key":"14543_CR19","doi-asserted-by":"publisher","unstructured":"Goodfellow IJ, Erhan D, Carrier PL et al (2013) Challenges in representation learning: a report on three machine learning contests. Neural Networks : the Official Journal of the International Neural Network Society 64:59-63. https:\/\/doi.org\/10.1016\/j.neunet.2014.09.005","DOI":"10.1016\/j.neunet.2014.09.005"},{"key":"14543_CR20","doi-asserted-by":"publisher","first-page":"2241","DOI":"10.21437\/Interspeech.2007-609","volume":"2007","author":"P Gupta","year":"2007","unstructured":"Gupta P, Rajput N (2007) Two-stream emotion recognition for call center monitoring. Proc Interspeech 2007:2241\u20132244. https:\/\/doi.org\/10.21437\/Interspeech.2007-609","journal-title":"Proc Interspeech"},{"key":"14543_CR21","doi-asserted-by":"publisher","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: 2016 IEEE conference on computer vision and pattern recognition, Las Vegas, pp 770\u2013778. https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"14543_CR22","doi-asserted-by":"publisher","unstructured":"Iqbal A, Barua K (2019) A real-time emotion recognition from speech using gradient boosting. In: 2019 IEEE international conference on electrical, Computer and Communication Engineering (ECCE), Cox\u2019sBazar, Bangladesh,\u00a0pp 1\u20135. https:\/\/doi.org\/10.1109\/ECACE.2019.8679271","DOI":"10.1109\/ECACE.2019.8679271"},{"key":"14543_CR23","doi-asserted-by":"publisher","first-page":"260","DOI":"10.1111\/j.1745-6916.2007.00044.x","volume":"2","author":"CE Izard","year":"2007","unstructured":"Izard CE (2007) Basic emotions, natural kinds, emotion schemas, and a new paradigm. Perspect Psychol Sci 2:260\u2013280. https:\/\/doi.org\/10.1111\/j.1745-6916.2007.00044.x","journal-title":"Perspect Psychol Sci"},{"key":"14543_CR24","doi-asserted-by":"publisher","first-page":"69","DOI":"10.1016\/j.patrec.2019.01.008","volume":"120","author":"DK Jain","year":"2019","unstructured":"Jain DK, Shamsolmoali P, Sehdev P (2019) Extended deep neural network for facial emotion recognition. Pattern Recogn Lett 120:69\u201374","journal-title":"Pattern Recogn Lett"},{"key":"14543_CR25","doi-asserted-by":"publisher","unstructured":"Jannat R, Tynes I, Lime LL, Adorno J, Canavan S (2018) Ubiquitous emotion recognition using audio and video data. In: 2018 ACM International Joint Conference and 2018 International Symposium on Pervasive and Ubiquitous Computing and Wearable Computers, ACM, pp 956\u2013959. https:\/\/doi.org\/10.1145\/3267305.3267689","DOI":"10.1145\/3267305.3267689"},{"key":"14543_CR26","doi-asserted-by":"publisher","unstructured":"Kim DH, Baddar W, Jang J, Ro, YM (2017) Multi-objective based spatio-temporal feature representation learning robust to expression intensity variations for facial expression recognition. IEEE Trans Affect Comput\u00a010:223\u2013236. https:\/\/doi.org\/10.1109\/TAFFC.2017.2695999","DOI":"10.1109\/TAFFC.2017.2695999"},{"issue":"2","key":"14543_CR27","doi-asserted-by":"publisher","first-page":"223","DOI":"10.1109\/TAFFC.2017.2695999","volume":"10","author":"DH Kim","year":"2017","unstructured":"Kim DH, Baddar WJ, Jang J, Ro YM (2017) Multi-objective based spatio-temporal feature representation learning robust to expression intensity variations for facial expression recognition. IEEE Trans Affect Comput 10(2):223\u2013236","journal-title":"IEEE Trans Affect Comput"},{"issue":"1","key":"14543_CR28","doi-asserted-by":"publisher","first-page":"172","DOI":"10.1109\/TIP.2006.884954","volume":"16","author":"I Kotsia","year":"2006","unstructured":"Kotsia I, Pitas I (2006) Facial expression recognition in image sequences using geometric deformation features and support vector machines. IEEE Trans Image Process 16(1):172\u2013187","journal-title":"IEEE Trans Image Process"},{"issue":"Part A","key":"14543_CR29","doi-asserted-by":"publisher","first-page":"106775","DOI":"10.1016\/j.asoc.2020.106775","volume":"97","author":"Y Kuang","year":"2020","unstructured":"Kuang Y, Wu Q, Wang Y, Dey N, Shi F, Crespo RG, Sherratt RS (2020) Simplified inverse filter tracked affective acoustic signals classification incorporating deep convolutional neural networks. Appl Soft Comput 97(Part A):106775","journal-title":"Appl Soft Comput"},{"key":"14543_CR30","first-page":"10","volume":"3361","author":"Y Lecun","year":"1995","unstructured":"Lecun Y, Bengio Y et al (1995) Convolutional networks for images, speech, and time series. Handb Brain Theory Neural Netw 3361:10","journal-title":"Handb Brain Theory Neural Netw"},{"key":"14543_CR31","doi-asserted-by":"publisher","first-page":"2439","DOI":"10.1109\/TIP.2018.2886767","volume":"28","author":"Y Li","year":"2019","unstructured":"Li Y, Zeng J, Shan S, Chen X (2019) Occlusion aware facial expression recognition using CNN with attention mechanism. IEEE Trans Image Process 28:2439\u20132450","journal-title":"IEEE Trans Image Process"},{"issue":"4","key":"14543_CR32","doi-asserted-by":"publisher","first-page":"467","DOI":"10.1109\/TIP.2002.999679","volume":"11","author":"C Liu","year":"2002","unstructured":"Liu C, Wechsler H (2002) Gabor feature based classification using the enhanced fisher linear discriminant model for face recognition. IEEE Trans Image Process 11(4):467\u2013476","journal-title":"IEEE Trans Image Process"},{"key":"14543_CR33","doi-asserted-by":"publisher","unstructured":"Liu P, Han S, Meng Z, Tong Y (2014) Facial expression recognition via a boosted deep belief network. In: Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), Columbus, pp\u00a01805\u20131812. https:\/\/doi.org\/10.1109\/CVPR.2014.233","DOI":"10.1109\/CVPR.2014.233"},{"issue":"5","key":"14543_CR34","doi-asserted-by":"publisher","first-page":"e0196391","DOI":"10.1371\/journal.pone.0196391","volume":"13","author":"SR Livingstone","year":"2018","unstructured":"Livingstone SR, Russo FA (2018) The Ryerson audio-visual database of emotional speech and Song (RAVDESS): a dynamic, multimodal set of facial and vocal expressions in north American English. PLoS One 13(5):e0196391","journal-title":"PLoS One"},{"issue":"3","key":"14543_CR35","doi-asserted-by":"publisher","first-page":"574","DOI":"10.1109\/TBME.2010.2091640","volume":"58","author":"LA Low","year":"2011","unstructured":"Low LA, Maddage NC, Lech M, Sheeber LB, Allen NB (2011) Detection of clinical depression in adolescents\u2019 speech during family interactions. IEEE Trans Biomed Eng 58(3):574\u2013586. https:\/\/doi.org\/10.1109\/TBME.2010.2091640","journal-title":"IEEE Trans Biomed Eng"},{"key":"14543_CR36","doi-asserted-by":"publisher","unstructured":"Lucey P, Cohn JF, Kanade T, Saragih J, Ambadar Z, Matthews I (2010) The extended cohn-kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression. In: 2010 IEEE computer society conference on computer vision and pattern recognition-workshops. IEEE, pp 94\u2013101. https:\/\/doi.org\/10.1109\/CVPRW.2010.5543262","DOI":"10.1109\/CVPRW.2010.5543262"},{"key":"14543_CR37","doi-asserted-by":"publisher","unstructured":"Lyons M, Akamatsu S, Kamachi M, Gyoba J (1998) Coding facial expressions with gabor wavelets. In: 1998 IEEE International Conference on Automatic Face and Gesture Recognition, Nara, Japan, pp 200\u2013205. https:\/\/doi.org\/10.1109\/AFGR.1998.670949","DOI":"10.1109\/AFGR.1998.670949"},{"key":"14543_CR38","doi-asserted-by":"publisher","unstructured":"Mahalle P, Kalamkar AB, Dey N, Chaki J, Shinde GR (2020) Forecasting models for coronavirus (covid-19): a survey of the state-of-the-art. SN Comput Sci 1(4):197. https:\/\/doi.org\/10.1007\/s42979-020-00209-9","DOI":"10.1007\/s42979-020-00209-9"},{"key":"14543_CR39","doi-asserted-by":"publisher","unstructured":"Minaee S, Abdolrashidi A (2019) Deep-emotion: facial expression recognition using attentional convolutional network. Computer Vision and Pattern Recognition. arXiv:1902.0101. https:\/\/doi.org\/10.48550\/arxiv.1902.01019","DOI":"10.48550\/arxiv.1902.01019"},{"key":"14543_CR40","doi-asserted-by":"crossref","unstructured":"Mohammadi MR, Fatemizadeh E, Mahoor MH (2014) Pca-based dictionary building for accurate facial expression recognition via sparse representation. J Vis Commun Image Represent 25(5):1082\u20131092 13","DOI":"10.1016\/j.jvcir.2014.03.006"},{"issue":"564867","key":"14543_CR41","doi-asserted-by":"publisher","first-page":"8","DOI":"10.1155\/2015\/564867","volume":"2015","author":"RA Mohammadpour","year":"2015","unstructured":"Mohammadpour RA, Seyed M, Abedi M, Bagheri S, Ghaemian A (2015) Fuzzy rule-based classification system for assessing coronary artery disease. Comput Math Methods Med 2015(564867):8. https:\/\/doi.org\/10.1155\/2015\/564867","journal-title":"Comput Math Methods Med"},{"key":"14543_CR42","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIM.2020.3031835","volume":"70","author":"K Mohan","year":"2020","unstructured":"Mohan K, Seal A, Krejcar O, Yazidi A (2020) Facial expression recognition using local gravitational force descriptor based deep convolution neural networks. IEEE Trans Instrum Meas 70:1\u201312","journal-title":"IEEE Trans Instrum Meas"},{"key":"14543_CR43","doi-asserted-by":"publisher","first-page":"9125","DOI":"10.1007\/s00521-020-05676-y","volume":"33","author":"K Mohan","year":"2021","unstructured":"Mohan K, Seal A, Krejcar O, Yazidi A (2021) FER-net: facial expression recognition using deep neural net. Neural Comput Applic 33:9125\u20139136. https:\/\/doi.org\/10.1007\/s00521-020-05676-y","journal-title":"Neural Comput Applic"},{"key":"14543_CR44","doi-asserted-by":"publisher","unstructured":"Mollahosseini A, Chan D, Mahoor MH (2016) Going deeper in facial expression recognition using deep neural networks. In: 2016 IEEE Winter Conference on Applications of Computer Vision(WACV), pp 1\u201310. https:\/\/doi.org\/10.1109\/WACV.2016.7477450","DOI":"10.1109\/WACV.2016.7477450"},{"key":"14543_CR45","doi-asserted-by":"publisher","unstructured":"Muda L, Begam M, Elamvazuthi I (2010) Voice recognition algorithms using mel frequency cepstral coefficient (MFCC) and dynamic time warping (dtw) techniques, ArXiv, abs\/1003.4083. https:\/\/doi.org\/10.48550\/arXiv.1003.4083","DOI":"10.48550\/arXiv.1003.4083"},{"key":"14543_CR46","doi-asserted-by":"publisher","unstructured":"Pichora F, Kathleen M, Kate D (2020) Toronto emotional speech set (TESS), Borealis, V1. https:\/\/doi.org\/10.5683\/SP2\/E8H2MF","DOI":"10.5683\/SP2\/E8H2MF"},{"key":"14543_CR47","doi-asserted-by":"publisher","unstructured":"Pinto MG, Polignano M, Lops P, Semeraro G (2020) Emotions understanding model from spoken language using deep neural networks and Mel-frequency cepstral coefficients. In: 2020 IEEE Conference on Evolving and Adaptive Intelligent Systems (EAIS), pp 1\u20135. https:\/\/doi.org\/10.1109\/EAIS48028.2020.9122698","DOI":"10.1109\/EAIS48028.2020.9122698"},{"issue":"3","key":"14543_CR48","doi-asserted-by":"publisher","first-page":"343","DOI":"10.1109\/TAFFC.2017.2753235","volume":"9","author":"G Pons","year":"2017","unstructured":"Pons G, Masip D (2017) Supervised committee of convolutional neural networks in automated facial expression analysis. IEEE Trans Affect Comput 9(3):343\u2013350","journal-title":"IEEE Trans Affect Comput"},{"key":"14543_CR49","first-page":"152","volume":"64","author":"DL Robinson","year":"2008","unstructured":"Robinson DL (2008) Brain function, emotional experience and personality. Neth J Psychol 64:152\u2013167","journal-title":"Neth J Psychol"},{"issue":"6","key":"14543_CR50","doi-asserted-by":"publisher","first-page":"1161","DOI":"10.1037\/h0077714","volume":"39","author":"J Russell","year":"1980","unstructured":"Russell J (1980) A circumplex model of affect. J Pers Soc Psychol 39(6):1161\u20131178. https:\/\/doi.org\/10.1037\/h0077714","journal-title":"J Pers Soc Psychol"},{"issue":"6","key":"14543_CR51","doi-asserted-by":"publisher","first-page":"803","DOI":"10.1016\/j.imavis.2008.08.005","volume":"27","author":"C Shan","year":"2009","unstructured":"Shan C, Gong S, McOwan PW (2009) Facial expression recognition based on local binary patterns: a comprehensive study. Image Vis Comput 27(6):803\u2013816","journal-title":"Image Vis Comput"},{"key":"14543_CR52","doi-asserted-by":"publisher","first-page":"82","DOI":"10.1016\/j.neucom.2019.05.005","volume":"355","author":"J Shao","year":"2019","unstructured":"Shao J, Qian Y (2019) Three convolutional neural network models for facial expression recognition in the wild. Neurocomputing 355:82\u201392","journal-title":"Neurocomputing"},{"key":"14543_CR53","doi-asserted-by":"publisher","unstructured":"Simonyan K, Zisserman A (2014) Very deep convolutional networks for large-scale image recognition. CoRR, abs\/1409.1556. https:\/\/doi.org\/10.48550\/arXiv.1409.1556","DOI":"10.48550\/arXiv.1409.1556"},{"key":"14543_CR54","unstructured":"Socio-economic impact of COVID-19 (2020)\u00a0Briefs and Report. https:\/\/www.undp.org\/content\/undp\/en\/home\/coronavirus\/socio-economic-impact-of-covid-19.html"},{"issue":"49\u201361","key":"14543_CR55","first-page":"31","volume":"119","author":"N Sun","year":"2017","unstructured":"Sun N, Li Q, Huan R, Liu J, Han G (2017) Deep spatial-temporal feature fusion for facial expression recognition in static images. Pattern Recogn Lett 119(49\u201361):31","journal-title":"Pattern Recogn Lett"},{"key":"14543_CR56","unstructured":"Surrey Audio-Visual Expressed Emotion (SAVEE). (n.d.), http:\/\/kahlan.eps.surrey.ac.uk\/savee\/"},{"issue":"2","key":"14543_CR57","doi-asserted-by":"publisher","first-page":"97","DOI":"10.1109\/34.908962","volume":"23","author":"YI Tian","year":"2001","unstructured":"Tian YI, Kanade T, Cohn JF (2001) Recognizing action units for facial expression analysis. IEEE Trans Pattern Anal Mach Intell 23(2):97\u2013115","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"14543_CR58","unstructured":"Turk MA, Pentland AP (1991) Face recognition using eigenfaces. In: 1991 IEEE Conference on computer society computer vision and pattern recognition, Maui, pp 586\u2013591"},{"key":"14543_CR59","doi-asserted-by":"publisher","unstructured":"Tzirakis P, Zafeiriou S, Schuller B (2019) Real-world automatic continuous affect recognition from audiovisual signals. In:\u00a0Pineda A, Sebe R (eds) Multimodal Behavioral Analysis in the Wild: Advances and Challenges.\u00a0Academic Press Ltd-Elsevier Science Ltd, pp 387\u2013406.\u00a0https:\/\/doi.org\/10.1016\/B978-0-12-814601-9.00028-6","DOI":"10.1016\/B978-0-12-814601-9.00028-6"},{"key":"14543_CR60","doi-asserted-by":"publisher","unstructured":"Viola P, Jones P (2001) Rapid object detection using a boosted cascade of simple features. In: 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. CVPR 2001, Kauai, pp 511\u2013518. https:\/\/doi.org\/10.1109\/CVPR.2001.990517","DOI":"10.1109\/CVPR.2001.990517"},{"issue":"4","key":"14543_CR61","doi-asserted-by":"publisher","first-page":"1087","DOI":"10.1007\/s00521-016-2512-4","volume":"29","author":"D Wang","year":"2018","unstructured":"Wang D, He T, Li Z, Cao L, Dey N, Ashour AS, Shi F (2018) Image feature-based affective retrieval employing improved parameter and structure identification of adaptive neuro-fuzzy inference system. Neural Comput Applic 29(4):1087\u20131102","journal-title":"Neural Comput Applic"},{"key":"14543_CR62","doi-asserted-by":"publisher","first-page":"820","DOI":"10.1037\/0022-3514.76.5.820","volume":"76","author":"D Watson","year":"1999","unstructured":"Watson D, Wiese D, Vaidya J, Tellegen A (1999) The two general activation systems of affect: structural findings, evolutionary considerations, and psychobiological evidence. J Pers Soc Psychol 76:820\u2013838. https:\/\/doi.org\/10.1037\/0022-3514.76.5.820","journal-title":"J Pers Soc Psychol"},{"key":"14543_CR63","doi-asserted-by":"publisher","unstructured":"Whissell CM (1989) The dictionary of affect in language. In: Plutchik R, Kellerman H (eds) The measurement of emotion. Academic Press, pp 113\u2013131. https:\/\/doi.org\/10.1016\/B978-0-12-558704-4.50011-6","DOI":"10.1016\/B978-0-12-558704-4.50011-6"},{"key":"14543_CR64","doi-asserted-by":"crossref","unstructured":"Yang N, Dey N, Sherratt RS, Shi F (2020) Recognize basic emotional statesin speech by machine learning techniques using mel-frequency cepstral coefficient features. J Intell Fuzzy Syst 39(2):1925\u20131936 ISSN 1875-8967","DOI":"10.3233\/JIFS-179963"},{"key":"14543_CR65","unstructured":"Youth and COVID-19: Response, Recovery and Resilience (2020)\u00a0OECD Survey on COVID-19 and Youth. http:\/\/www.oecd.org\/coronavirus\/policy-responses\/youth-and-covid-19-response-recovery-and-resilience-c40e61c6\/"},{"issue":"3","key":"14543_CR66","doi-asserted-by":"publisher","first-page":"338","DOI":"10.1016\/S00199958(65)90241-X","volume":"8","author":"LA Zadeh","year":"1965","unstructured":"Zadeh LA (1965) Fuzzy sets. Inf Control 8(3):338\u2013353. https:\/\/doi.org\/10.1016\/S00199958(65)90241-X","journal-title":"Inf Control"},{"key":"14543_CR67","doi-asserted-by":"publisher","unstructured":"Zhang B, Essl G, Provost EM (2015) Recognizing emotion from singing and speaking using shared models. In: IEEE 2015 International Conference on Affective Computing and Intelligent Interaction (ACII) IEEE, pp 139\u2013145. https:\/\/doi.org\/10.1109\/ACII.2015.7344563","DOI":"10.1109\/ACII.2015.7344563"},{"key":"14543_CR68","doi-asserted-by":"publisher","unstructured":"Zhang D, Song F, Xu Y, Liang Z (2009) Decision level fusion, advanced pattern recognition technologies with applications to biometrics. IGI Global, pp 328\u2013348. https:\/\/doi.org\/10.4018\/978-1-60566-200-8.ch015","DOI":"10.4018\/978-1-60566-200-8.ch015"},{"key":"14543_CR69","doi-asserted-by":"crossref","unstructured":"Zhao X, Liang X, Liu L, Li T, Han Y, Vasconcelos N, Yan S (2016) Peak-piloted deep network for facial expression recognition. In: Computer Vision\u2013ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, Proceedings, Part II 14 425\u2013442. Springer International Publishing","DOI":"10.1007\/978-3-319-46475-6_27"},{"issue":"8","key":"14543_CR70","doi-asserted-by":"publisher","first-page":"1499","DOI":"10.1109\/TCYB.2014.2354351","volume":"45","author":"L Zhong","year":"2014","unstructured":"Zhong L, Liu Q, Yang P, Huang J, Metaxas DN (2014) Learning multiscale active facial patches for expression analysis. IEEE Trans Cybern 45(8):1499\u20131510","journal-title":"IEEE Trans Cybern"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-14543-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-023-14543-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-14543-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,12,8]],"date-time":"2023-12-08T13:52:10Z","timestamp":1702043530000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-023-14543-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,3,8]]},"references-count":70,"journal-issue":{"issue":"22","published-print":{"date-parts":[[2023,9]]}},"alternative-id":["14543"],"URL":"https:\/\/doi.org\/10.1007\/s11042-023-14543-6","relation":{},"ISSN":["1380-7501","1573-7721"],"issn-type":[{"value":"1380-7501","type":"print"},{"value":"1573-7721","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,3,8]]},"assertion":[{"value":"6 April 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 November 2021","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"31 January 2023","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 March 2023","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors certify that there is no conflict of interest regarding the material discussed in the manuscript. The authors also declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of Interest\/Competing Interest"}}]}}