{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T19:22:13Z","timestamp":1757618533737,"version":"3.44.0"},"reference-count":33,"publisher":"Springer Science and Business Media LLC","issue":"10","license":[{"start":{"date-parts":[[2025,7,8]],"date-time":"2025-07-08T00:00:00Z","timestamp":1751932800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,7,8]],"date-time":"2025-07-08T00:00:00Z","timestamp":1751932800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1007\/s11760-025-04492-y","type":"journal-article","created":{"date-parts":[[2025,7,10]],"date-time":"2025-07-10T09:13:17Z","timestamp":1752138797000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["MLEM for Human Emotion Detection using Dynamic Visual Perception"],"prefix":"10.1007","volume":"19","author":[{"given":"Sirshendu","family":"Hore","sequence":"first","affiliation":[]},{"given":"Tanmay","family":"Bhattacharya","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,7,8]]},"reference":[{"key":"4492_CR1","doi-asserted-by":"publisher","unstructured":"Varghese, A.A., Cherian, J.P., Kizhakkethottam, J.J.: Overview on emotion recognition system. In 2015 Int. Conf. Soft-Computing and Networks Security (ICSNS) (2015). https:\/\/doi.org\/10.1109\/icsns.2015.7292443","DOI":"10.1109\/icsns.2015.7292443"},{"key":"4492_CR2","doi-asserted-by":"publisher","DOI":"10.1093\/oso\/9780195112719.001.0001","volume-title":"The Expression of the Emotions in Man and Animals","author":"C Darwin","year":"1998","unstructured":"Darwin, C., Prodger, P.: The Expression of the Emotions in Man and Animals. Oxford University Press, USA (1998)"},{"key":"4492_CR3","doi-asserted-by":"publisher","first-page":"97","DOI":"10.1109\/34.908962","volume":"23","author":"Y-I Tian","year":"2001","unstructured":"Tian, Y.-I., Kanade, T., Cohn, J.F.: Recognizing action units for facial expression analysis. IEEE Trans. Pattern Anal. Mach. Intell. 23, 97\u2013115 (2001)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"4492_CR4","doi-asserted-by":"publisher","first-page":"169","DOI":"10.1080\/02699939208411068","volume":"6","author":"P Ekman","year":"1992","unstructured":"Ekman, P.: An argument for basic emotions. Cognition & Emotion 6, 169\u2013200 (1992)","journal-title":"Cognition & Emotion"},{"key":"4492_CR5","first-page":"59602E","volume":"5960","author":"I Kotsia","year":"2005","unstructured":"Kotsia, I., Pitas, I.: Real-time facial expression recognition from image sequences using support vector machines. Visual Commun. Image Process. 5960, 59602E (2005)","journal-title":"Visual Commun. Image Process."},{"key":"4492_CR6","first-page":"88","volume":"10165","author":"B Reddy","year":"2016","unstructured":"Reddy, B., Kim, Y.-H., Yun, S., Jang, J., Hong, S.: End-to-end deep learning for single-step real-time facial expression recognition. Video Anal. Face Facial Express. Recogn. 10165, 88\u201397 (2016)","journal-title":"Video Anal. Face Facial Express. Recogn."},{"key":"4492_CR7","doi-asserted-by":"publisher","first-page":"177","DOI":"10.1007\/s00371-016-1323-z","volume":"34","author":"S Agarwal","year":"2018","unstructured":"Agarwal, S., Santra, B., Mukherjee, D.P.: Anubhav: recognizing emotions through facial expression. Vis. Comput. 34, 177\u2013191 (2018). https:\/\/doi.org\/10.1007\/s00371-016-1323-z","journal-title":"Vis. Comput."},{"key":"4492_CR8","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/tim.2020.3031835","volume":"70","author":"M Karnati","year":"2020","unstructured":"Karnati, M., Seal, A., Krejcar, O., Yazidi, A.: Facial expression recognition using local gravitational force descriptor based deep convolution neural networks. IEEE Trans. Instrum. Meas. 70, 1 (2020). https:\/\/doi.org\/10.1109\/tim.2020.3031835","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"4492_CR9","doi-asserted-by":"publisher","first-page":"9125","DOI":"10.1007\/s00521-020-05676-y","volume":"33","author":"M Karnati","year":"2021","unstructured":"Karnati, M., Seal, A., Krejcar, O., Yazidi, A.: FER-net: facial expression recognition using deep neural net. Neural Comput. Appl. 33, 9125\u20139136 (2021). https:\/\/doi.org\/10.1007\/s00521-020-05676-y","journal-title":"Neural Comput. Appl."},{"key":"4492_CR10","doi-asserted-by":"publisher","first-page":"2058","DOI":"10.1109\/TAFFC.2022.3208309","volume":"13","author":"M Karnati","year":"2022","unstructured":"Karnati, M., Seal, A., Yazidi, A., Krejcar, O.: FLEPNet: Feature Level Ensemble Parallel Network for Facial Expression Recognition. IEEE Trans. Affect. Comput. 13, 2058\u20132070 (2022). https:\/\/doi.org\/10.1109\/TAFFC.2022.3208309","journal-title":"IEEE Trans. Affect. Comput."},{"key":"4492_CR11","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIM.2023.3243661","volume":"72","author":"M Karnati","year":"2023","unstructured":"Karnati, M., Seal, A., Bhattacharjee, D., Yazidi, A., Krejcar, O.: Understanding deep learning techniques for recognition of human emotions using facial expressions: A comprehensive survey. IEEE Trans. Instrum. Meas. 72, 1\u201331 (2023). https:\/\/doi.org\/10.1109\/TIM.2023.3243661","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"4492_CR12","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIM.2023.3314815","volume":"72","author":"M Karnati","year":"2023","unstructured":"Karnati, M., Seal, A., Jaworek-Korjakowska, J., Krejcar, O.: Facial expression recognition in-the-wild using blended feature attention network. IEEE Trans. Instrum. Meas. 72, 1\u201316 (2023). https:\/\/doi.org\/10.1109\/TIM.2023.3314815","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"4492_CR13","doi-asserted-by":"publisher","unstructured":"Zhao, Y., Cheng, J., Zhou, W., Zhang, C., Pan, X.: Infrared pedestrian detection with converted temperature map. In 2019 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC) (2019). https:\/\/doi.org\/10.1109\/apsipaasc47483.2019.9023228","DOI":"10.1109\/apsipaasc47483.2019.9023228"},{"key":"4492_CR14","doi-asserted-by":"publisher","first-page":"3286","DOI":"10.1109\/TCSVT.2023.3318672","volume":"34","author":"G Yue","year":"2024","unstructured":"Yue, G., et al.: Dual-constraint coarse-to-fine network for camouflaged object detection. IEEE Trans. Circuits Syst. Video Technol. 34, 3286\u20133298 (2024). https:\/\/doi.org\/10.1109\/TCSVT.2023.3318672","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"4492_CR15","doi-asserted-by":"publisher","unstructured":"Sarvestani, A.S., Zhou, W., Wang, Z.: Perceptual crack detection for rendered 3D textured meshes. In 2024 16th Int. Conf. Quality Multimedia Experience (QoMEX) (2024), 1\u20137. https:\/\/doi.org\/10.1109\/QoMEX61742.2024.10598253","DOI":"10.1109\/QoMEX61742.2024.10598253"},{"key":"4492_CR16","doi-asserted-by":"publisher","first-page":"498","DOI":"10.1016\/j.ins.2021.11.061","volume":"585","author":"S Bashath","year":"2022","unstructured":"Bashath, S., Perera, N., Tripathi, S., Manjang, K., Dehmer, M., Streib, F.E.: A data-centric review of deep transfer learning with applications to text data. Inf. Sci. 585, 498\u2013528 (2022). https:\/\/doi.org\/10.1016\/j.ins.2021.11.061","journal-title":"Inf. Sci."},{"key":"4492_CR17","doi-asserted-by":"publisher","unstructured":"Suen, C.Y., Lam, L.: Multiple classifier combination methodologies for different output levels. In Multiple Classifier Systems. MCS 2000. Lecture Notes in Computer Science 1857 (Springer, 2000). https:\/\/doi.org\/10.1007\/3-540-45014-9_5","DOI":"10.1007\/3-540-45014-9_5"},{"key":"4492_CR18","doi-asserted-by":"publisher","first-page":"418","DOI":"10.1109\/21.155943","volume":"22","author":"L Xu","year":"1992","unstructured":"Xu, L., Krzyzak, A., Suen, C.Y.: Methods of combining multiple classifiers and their application to handwriting recognition. IEEE Trans. Syst. Man Cybern. 22, 418\u2013435 (1992)","journal-title":"IEEE Trans. Syst. Man Cybern."},{"key":"4492_CR19","doi-asserted-by":"publisher","DOI":"10.1155\/2014\/380585","volume":"2014","author":"M Islam","year":"2014","unstructured":"Islam, M.: Feature and score fusion based multiple classifier selection for iris recognition. Comput. Intell. Neurosci. 2014, 380585 (2014). https:\/\/doi.org\/10.1155\/2014\/380585","journal-title":"Comput. Intell. Neurosci."},{"key":"4492_CR20","doi-asserted-by":"publisher","first-page":"299","DOI":"10.1016\/S0031-3203(99)00223-X","volume":"34","author":"LI Kuncheva","year":"2001","unstructured":"Kuncheva, L.I., Bezdek, J.C., Duin, R.: Decision templates for multiple classifier fusion: an experimental comparison. Pattern Recognit. 34, 299\u2013314 (2001)","journal-title":"Pattern Recognit."},{"key":"4492_CR21","doi-asserted-by":"publisher","first-page":"20200423","DOI":"10.1007\/s11042-023-14543-6","volume":"10","author":"S Hore","year":"2023","unstructured":"Hore, S., Bhattacharya, T.: Impact of lockdown on Generation-Z: a fuzzy based multimodal emotion recognition approach using CNN. Multimed. Tools Appl. 10, 20200423 (2023). https:\/\/doi.org\/10.1007\/s11042-023-14543-6","journal-title":"Multimed. Tools Appl."},{"key":"4492_CR22","doi-asserted-by":"publisher","unstructured":"Martinez, G.E., Mendoza, O., Melin, P., Gonzalez, F.: Comparison between Choquet and Sugeno integrals as aggregation operators for modular neural networks. In IEEE Int. Conf. Fuzzy Syst. (FUZZ-IEEE) (2016), 2331\u20132336. https:\/\/doi.org\/10.1109\/FUZZ-IEEE.2016.7737984","DOI":"10.1109\/FUZZ-IEEE.2016.7737984"},{"key":"4492_CR23","first-page":"618","volume":"6","author":"D Fatima","year":"2022","unstructured":"Fatima, D., Hameed, M.A., Khalid, A.: Performance comparison of various-norms with Choquet integral in fuzzy logic using Covid-19 chest X-ray data. J. Positive School Psychol. 6, 618\u2013622 (2022)","journal-title":"J. Positive School Psychol."},{"key":"4492_CR24","doi-asserted-by":"publisher","DOI":"10.1016\/j.ajp.2020.102398","volume":"53","author":"K Verma","year":"2020","unstructured":"Verma, K.: The mental health impact of the COVID-19 epidemic on college students in India. Asian J. Psychiatr. 53, 102398 (2020). https:\/\/doi.org\/10.1016\/j.ajp.2020.102398","journal-title":"Asian J. Psychiatr."},{"key":"4492_CR25","doi-asserted-by":"publisher","first-page":"7","DOI":"10.1016\/j.jad.2020.11.032","volume":"280","author":"W Fu","year":"2021","unstructured":"Fu, W., et al.: Mental health of college students during the COVID-19 epidemic in China. J. Affect. Disord. 280, 7\u201310 (2021). https:\/\/doi.org\/10.1016\/j.jad.2020.11.032","journal-title":"J. Affect. Disord."},{"key":"4492_CR26","doi-asserted-by":"crossref","unstructured":"Parkhi, O.M., Vedaldi, A., Zisserman, A.: Deep face recognition. In Br. Mach. Vis. Conf. (BMVC) (2015)","DOI":"10.5244\/C.29.41"},{"key":"4492_CR27","unstructured":"Huang, G.B., Ramesh, M., Berg, T., Miller, E.L.: Labeled faces in the wild: a database for studying face recognition in unconstrained environments. Technical Report 07-49, Univ. Massachusetts, Amherst (2007)"},{"key":"4492_CR28","doi-asserted-by":"crossref","unstructured":"Schroff,F., Kalenichenko, D., Philbin, J.: FaceNet: a unified embedding for face recognition and clustering. In Proc. IEEE Conf. Comput. Vis. Pattern Recognit. (CVPR) (2015), 815\u2013823","DOI":"10.1109\/CVPR.2015.7298682"},{"key":"4492_CR29","doi-asserted-by":"crossref","unstructured":"Wolf, L., Hassner, T., Maoz, I.: Face recognition in unconstrained videos with matched background similarity. In Proc. IEEE Conf. Comput. Vis. Pattern Recognit. (CVPR) (2011)","DOI":"10.1109\/CVPR.2011.5995566"},{"key":"4492_CR30","unstructured":"Facebook AI Research. DeepFace: closing the gap to human-level performance in face verification. (Retrieved 2019-07-25)"},{"key":"4492_CR31","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In Proc. IEEE Conf. Comput. Vis. Pattern Recognit. (CVPR) (2015). arXiv:1512.03385","DOI":"10.1109\/CVPR.2016.90"},{"key":"4492_CR32","doi-asserted-by":"publisher","first-page":"79","DOI":"10.1162\/neco.1991.3.1.79","volume":"3","author":"RA Jacobs","year":"1991","unstructured":"Jacobs, R.A., Jordan, M.I., Nowlan, S.J., Hinton, G.E.: Adaptive mixtures of local experts. Neural Comput. 3, 79\u201387 (1991)","journal-title":"Neural Comput."},{"key":"4492_CR33","doi-asserted-by":"crossref","unstructured":"Alpaydin, E., Jordan, M.I.: Local linear perceptrons for classification. IEEE Trans. Neural Netw. 7(3), 788\u2013792 (1996)","DOI":"10.1109\/72.501737"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-025-04492-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-025-04492-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-025-04492-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,7]],"date-time":"2025-09-07T01:29:53Z","timestamp":1757208593000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-025-04492-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,8]]},"references-count":33,"journal-issue":{"issue":"10","published-print":{"date-parts":[[2025,10]]}},"alternative-id":["4492"],"URL":"https:\/\/doi.org\/10.1007\/s11760-025-04492-y","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"type":"print","value":"1863-1703"},{"type":"electronic","value":"1863-1711"}],"subject":[],"published":{"date-parts":[[2025,7,8]]},"assertion":[{"value":"28 October 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"26 June 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 June 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 July 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could appear to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of Interest\/Competing interests"}},{"value":".","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Code availability"}}],"article-number":"853"}}