{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T18:19:52Z","timestamp":1775067592478,"version":"3.50.1"},"reference-count":48,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2022,4,21]],"date-time":"2022-04-21T00:00:00Z","timestamp":1650499200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,4,21]],"date-time":"2022-04-21T00:00:00Z","timestamp":1650499200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"published-print":{"date-parts":[[2023,2]]},"DOI":"10.1007\/s11042-022-12796-1","type":"journal-article","created":{"date-parts":[[2022,4,21]],"date-time":"2022-04-21T18:05:57Z","timestamp":1650564357000},"page":"5455-5472","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":38,"title":["LSTM model for visual speech recognition through facial expressions"],"prefix":"10.1007","volume":"82","author":[{"given":"Shabina","family":"Bhaskar","sequence":"first","affiliation":[]},{"family":"Thasleema T. M.","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,4,21]]},"reference":[{"key":"12796_CR1","doi-asserted-by":"publisher","first-page":"20787","DOI":"10.1007\/s11042-019-7329-6","volume":"78","author":"R Arunachalam","year":"2018","unstructured":"Arunachalam R (2018) A strategic approach to recognize the speech of the children with hearing impairment: different sets of features and models. Multimed Tools Appl 78:20787\u201320808","journal-title":"Multimed Tools Appl"},{"issue":"5","key":"12796_CR2","doi-asserted-by":"publisher","first-page":"975","DOI":"10.1007\/s00138-018-0960-9","volume":"30","author":"E Avots","year":"2019","unstructured":"Avots E, Sapi\u0144ski T, Bachmann M, Kami\u0144ska D (2019) Audiovisual emotion recognition in wild. Mach Vis Appl 30(5):975\u2013985","journal-title":"Mach Vis Appl"},{"key":"12796_CR3","doi-asserted-by":"crossref","unstructured":"Bao W, Li Y, Gu M, Yang M, Li H, Chao L, Tao J (2014) Building a chinese natural emotional audio-visual database. In: 2014 12th International conference on signal processing (ICSP), pp 583\u2013587","DOI":"10.1109\/ICOSP.2014.7015071"},{"key":"12796_CR4","doi-asserted-by":"crossref","unstructured":"Busso C, Bulut M, Lee C-C, Kazemzadeh A, Mower E, Kim S, Chang JN, Lee S, Narayanan S, Narayanan SS (2008) Iemocap: interactive emotional dyadic motion capture database. Language Resources and Evaluation","DOI":"10.1007\/s10579-008-9076-6"},{"key":"12796_CR5","doi-asserted-by":"crossref","unstructured":"Busso C, Deng Z, Yildirim S, Bulut M, Lee CM, Kazemzadeh A, Lee S, Neumann U, Narayanan S (2004) Analysis of emotion recognition using facial expressions, speech and multimodal information. In: Proceedings of the 6th international conference on multimodal interfaces, pp 205\u2013211","DOI":"10.1145\/1027933.1027968"},{"issue":"5","key":"12796_CR6","doi-asserted-by":"publisher","first-page":"981","DOI":"10.1007\/s11760-019-01630-1","volume":"14","author":"X Chen","year":"2020","unstructured":"Chen X, Du J, Zhang H (2020) Lipreading with densenet and resbi-lstm. SIViP 14(5):981\u2013989","journal-title":"SIViP"},{"key":"12796_CR7","doi-asserted-by":"crossref","unstructured":"Chen J, Wang C, Wang K, Yin C, Zhao C, Xu T, Zhang X, Huang Z, Liu M, Yang T (2021) Heu emotion: a large-scale database for multimodal emotion recognition in the wild. Neural Comput and Applic, 1\u201317","DOI":"10.1007\/s00521-020-05616-w"},{"key":"12796_CR8","doi-asserted-by":"crossref","unstructured":"Chung JS, Zisserman A (2016) Lip reading in the wild. In: Proc Asian Conf Comput Vis. Springer (ICASSP), Cham, pp 87\u2013103","DOI":"10.1007\/978-3-319-54184-6_6"},{"key":"12796_CR9","doi-asserted-by":"publisher","unstructured":"Dhanjal AS, Singh W (2019) Tools and techniques of assistive technology for hearing impaired people. In: 2019 International conference on machine learning, big data, cloud and parallel computing (COMITCon), pp 205\u2013210. https:\/\/doi.org\/10.1109\/COMITCon.2019.8862454","DOI":"10.1109\/COMITCon.2019.8862454"},{"issue":"1\u20132","key":"12796_CR10","doi-asserted-by":"publisher","first-page":"33","DOI":"10.1016\/S0167-6393(02)00070-5","volume":"40","author":"E Douglas-Cowie","year":"2003","unstructured":"Douglas-Cowie E, Campbell N, Cowie R, Roach P (2003) Emotional speech: towards a new generation of databases. Speech Commun 40(1\u20132):33\u201360","journal-title":"Speech Commun"},{"key":"12796_CR11","doi-asserted-by":"crossref","unstructured":"Elmadany NED, He Y, Guan L (2016) Multiview emotion recognition via multi-set locality preserving canonical correlation analysis. In: 2016 IEEE international symposium on circuits and systems (ISCAS), pp 590\u2013593","DOI":"10.1109\/ISCAS.2016.7527309"},{"key":"12796_CR12","doi-asserted-by":"publisher","first-page":"39098","DOI":"10.1109\/ACCESS.2019.2904788","volume":"7","author":"H Fabelo","year":"2019","unstructured":"Fabelo H (2019) In-vivo hyperspectral human brain image database for brain cancer detection. IEEE Access 7:39098\u201339116","journal-title":"IEEE Access"},{"key":"12796_CR13","doi-asserted-by":"crossref","unstructured":"Frank MG (2001) Facial expressions. In: Smelser NJ, Baltes PB (eds) International encyclopedia of the social and behavioral sciences. Pergamon, Oxford, pp 5230\u20135234","DOI":"10.1016\/B0-08-043076-7\/01713-7"},{"key":"12796_CR14","doi-asserted-by":"publisher","first-page":"183","DOI":"10.1016\/j.heares.2016.11.012","volume":"344","author":"T Goehring","year":"2017","unstructured":"Goehring T, Bolner F, Monaghan JJ, Van Dijk B, Zarowski A, Bleeck S (2017) Speech enhancement based on neural networks improves speech intelligibility in noise for cochlear implant users. Hear Res 344:183\u2013194","journal-title":"Hear Res"},{"key":"12796_CR15","unstructured":"Goldschen AJ, Garcia ON, Petajan E (2002) Continuous optical automatic speech recognition by lipreading. In: Conf Signals, Syst Comput, pp 572\u2013577"},{"key":"12796_CR16","doi-asserted-by":"publisher","first-page":"204518","DOI":"10.1109\/ACCESS.2020.3036865","volume":"8","author":"M Hao","year":"2020","unstructured":"Hao M, Mamut M, Yadikar N, Aysa A, Ubul K (2020) A survey of research on lipreading technology. IEEE Access 8:204518\u2013204544","journal-title":"IEEE Access"},{"key":"12796_CR17","first-page":"668","volume":"10.3","author":"A Jan","year":"2017","unstructured":"Jan A (2017) Artificial intelligent system for automatic depression level analysis through visual and vocal expressions. IEEE Trans Cogn Develop Syst 10.3:668\u2013680","journal-title":"IEEE Trans Cogn Develop Syst"},{"issue":"4","key":"12796_CR18","first-page":"1","volume":"22","author":"A Khan","year":"2020","unstructured":"Khan A, Sohail A, U Z, AS Q (2020) A survey of the recent architectures of deep convolutional neural networks. Artif Intell Rev 22(4):1\u201362","journal-title":"Artif Intell Rev"},{"key":"12796_CR19","unstructured":"Kossaifi J, Walecki R, Panagakis Y, Shen J, Schmitt M, Ringeval F, Han J, Pandit V, Toisoul A, Schuller BW et al, Sewa DB (2019) A rich database for audio-visual emotion and sentiment research in the wild. IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"12796_CR20","doi-asserted-by":"publisher","first-page":"722","DOI":"10.1007\/s10489-014-0629-7","volume":"42","author":"KB Kumar","year":"2015","unstructured":"Kumar KB, Kumar RS, Sandesh EPA, Sourabh S, Lajish V (2015) Audio-visual speech recognition using deep learning. Appl Intell 42:722\u2013737","journal-title":"Appl Intell"},{"key":"12796_CR21","unstructured":"Lee H, Ekanadham C, Ng AY (2008) Sparse deep belief net model for visual area v2. In: Adv Neural Inf Process Syst, pp 873\u2013880"},{"key":"12796_CR22","doi-asserted-by":"crossref","unstructured":"Martin O, Kotsia I, Macq B, Pitas I (2006) The enterface\u201905 audio-visual emotion database. In: 22nd International conference on data engineering workshops (ICDEW\u201906), pp 8\u20138","DOI":"10.1109\/ICDEW.2006.145"},{"key":"12796_CR23","doi-asserted-by":"crossref","unstructured":"Martinez B, Ma P, Petridis S, Pantic M (2020) Lipreading using temporal convolutional networks. In: ICASSP 2020-2020 IEEE international conference on acoustics, speech and signal processing (ICASSP), pp 6319\u20136323","DOI":"10.1109\/ICASSP40776.2020.9053841"},{"key":"12796_CR24","unstructured":"Ngiam J, Khosla A, Kim M, Nam J, Lee H, Ng AY (2007) Continuous automatic speech recognition by lipreading in motion-based recognition. In: Proc ACM Int Multimedia Conf Exhib, pp 57\u201366"},{"key":"12796_CR25","doi-asserted-by":"crossref","unstructured":"Noda K, Yamaguchi Y, Nakadai K, Okuno HG, Ogata T (2014) Lipreading using convolutional neural network. In: Proc Conf Int.speech Commun Assoc, pp 1149\u20131153","DOI":"10.21437\/Interspeech.2014-293"},{"issue":"1","key":"12796_CR26","doi-asserted-by":"publisher","first-page":"60","DOI":"10.1109\/TAFFC.2017.2713783","volume":"10","author":"F Noroozi","year":"2019","unstructured":"Noroozi F, Marjanovic M, Njegus A, Escalera S, Anbarjafari G (2019) Audio-visual emotion recognition in video clips. IEEE Trans Affect Comput 10(1):60\u201375","journal-title":"IEEE Trans Affect Comput"},{"key":"12796_CR27","doi-asserted-by":"publisher","first-page":"61401","DOI":"10.1109\/ACCESS.2018.2876710","volume":"6","author":"T Ogawa","year":"2018","unstructured":"Ogawa T, Sasaka Y, Maeda K, Haseyama M (2018) Favorite video classification based on multimodal bidirectional lstm. IEEE Access 6:61401\u201361409. https:\/\/doi.org\/10.1109\/ACCESS.2018.2876710","journal-title":"IEEE Access"},{"key":"12796_CR28","unstructured":"Petajan ED (1984) Automatic lipreading to enhance speech recognition. In: Proc. Global Telecommun. Conf., pp 265\u2013272"},{"issue":"4","key":"12796_CR29","first-page":"43","volume":"9","author":"D Phutela","year":"2015","unstructured":"Phutela D (2015) The importance of non-verbal communication. IUP J Soft Skills 9(4):43","journal-title":"IUP J Soft Skills"},{"key":"12796_CR30","doi-asserted-by":"publisher","first-page":"98","DOI":"10.1016\/j.inffus.2017.02.003","volume":"37","author":"S Poria","year":"2017","unstructured":"Poria S (2017) A review of affective computing: from unimodal analysis to multimodal fusion. Inform Fus 37:98\u2013125","journal-title":"Inform Fus"},{"key":"12796_CR31","doi-asserted-by":"publisher","first-page":"4477","DOI":"10.1016\/j.eswa.2010.09.119","volume":"38","author":"N Puviarasan","year":"2011","unstructured":"Puviarasan N, Palanivel S (2011) Lip reading of hearing-impaired persons using hmm. Expert Syst Appl 38:4477\u20134481","journal-title":"Expert Syst Appl"},{"key":"12796_CR32","doi-asserted-by":"publisher","first-page":"171","DOI":"10.1007\/s40998-018-0142-9","volume":"43","author":"F Rahdari","year":"2019","unstructured":"Rahdari F, Rashedi E, Eftekhari M (2019) A multimodal emotion recognition system using facial landmark analysis. Iran J Sci Technol Trans Electr Eng 43:171\u2013189","journal-title":"Iran J Sci Technol Trans Electr Eng"},{"issue":"4","key":"12796_CR33","doi-asserted-by":"publisher","first-page":"678","DOI":"10.1037\/0022-3514.92.4.678","volume":"92","author":"GI Roisman","year":"2007","unstructured":"Roisman GI, Holland A, Fortuna K, Fraley RC, Clausell E, Clarke A (2007) The adult attachment interview and self-reports of attachment style: an empirical rapprochement. J Person Soc Psychol 92(4):678","journal-title":"J Person Soc Psychol"},{"key":"12796_CR34","doi-asserted-by":"crossref","unstructured":"Shah M, Jain R (1997) Continuous automatic speech recognition by lipreading in motion-based recognition. Dordrecht, pp 321\u2013343","DOI":"10.1007\/978-94-015-8935-2_14"},{"key":"12796_CR35","doi-asserted-by":"publisher","first-page":"102447","DOI":"10.1016\/j.jnca.2019.102447","volume":"149","author":"JN Shoumy","year":"2020","unstructured":"Shoumy JN (2020) Multimodal big data affective analytics: a comprehensive survey using text, audio, visual and physiological signals. J Netw Comput Appl 149:102447","journal-title":"J Netw Comput Appl"},{"key":"12796_CR36","doi-asserted-by":"publisher","unstructured":"Szegedy C, Liu W, Jia Y, Sermanet P, Reed S, Anguelov D, Erhan D, Vanhoucke V, Rabinovich A (2015) Going deeper with convolutions. In: 2015 IEEE conference on computer vision and pattern recognition (CVPR), pp 1\u20139. https:\/\/doi.org\/10.1109\/CVPR.2015.7298594","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"12796_CR37","doi-asserted-by":"crossref","unstructured":"Ullah W, Ullah A, Haq IU, Muhammad K, Sajjad M, Baik SW (2020) Cnn features with bi-directional lstm for real-time anomaly detection in surveillance networks. Multimed Tools Appl, 1\u201317","DOI":"10.1007\/s11042-020-09406-3"},{"key":"12796_CR38","doi-asserted-by":"publisher","first-page":"2523","DOI":"10.1007\/s00034-018-0975-5","volume":"38","author":"F Vakhshiteh","year":"2019","unstructured":"Vakhshiteh F, Almasganj F (2019) Exploration of properly combined audiovisual representation with the entropy measure in audiovisual speech recognition. Circ Syst Signal Process 38:2523\u20132543","journal-title":"Circ Syst Signal Process"},{"key":"12796_CR39","doi-asserted-by":"crossref","unstructured":"Vidal A, Salman A, Lin W-C, Busso C (2020) Msp-face corpus: a natural audiovisual emotional database. In: Proceedings of the 2020 international conference on multimodal interaction, pp 397\u2013405","DOI":"10.1145\/3382507.3418872"},{"key":"12796_CR40","doi-asserted-by":"crossref","unstructured":"Wand M, Koutnik J, Schmidhuber J (2016) Lipreading with long shortterm memory. In: Proc IEEE Int Conf Acoust., Speech Signal Process (ICASSP), pp 6115\u20136119","DOI":"10.1109\/ICASSP.2016.7472852"},{"key":"12796_CR41","doi-asserted-by":"crossref","unstructured":"Wang W (2011) Machine audition: principles, algorithms, and systems. IGI Global","DOI":"10.4018\/978-1-61520-919-4"},{"issue":"5","key":"12796_CR42","doi-asserted-by":"publisher","first-page":"936","DOI":"10.1109\/TMM.2008.927665","volume":"10","author":"Y Wang","year":"2008","unstructured":"Wang Y, Guan L (2008) Recognizing human emotional state from audiovisual signals. IEEE Trans Multimed 10(5):936\u2013946","journal-title":"IEEE Trans Multimed"},{"key":"12796_CR43","doi-asserted-by":"publisher","first-page":"4669","DOI":"10.1109\/TIP.2017.2696744","volume":"26","author":"SC Wong","year":"2017","unstructured":"Wong SC, Stamatescu V, Gatt A, Kearney D, Lee I, McDonnell MD (2017) Track everything: limiting prior knowledge in online multi-object recognition. IEEE Trans Image Process 26:4669\u20134683","journal-title":"IEEE Trans Image Process"},{"key":"12796_CR44","doi-asserted-by":"crossref","unstructured":"Yang S, Zhang Y, Feng D, Yang M, Wang C, Xiao J, Long K, Shan S, Chen X (2019) Lrw-1000: a naturally-distributed large-scale benchmark for lip reading in the wild. In: 2019 14th IEEE international conference on automatic face & gesture recognition (FG 2019), pp 1\u20138","DOI":"10.1109\/FG.2019.8756582"},{"issue":"3","key":"12796_CR45","doi-asserted-by":"publisher","first-page":"300","DOI":"10.1109\/TAFFC.2016.2553038","volume":"8","author":"S Zhalehpour","year":"2017","unstructured":"Zhalehpour S, Onder O, Akhtar Z, Erdem CE (2017) Baum-1: a spontaneous audio-visual face database of affective and mental states. IEEE Trans Affect Comput 8(3):300\u2013313","journal-title":"IEEE Trans Affect Comput"},{"key":"12796_CR46","doi-asserted-by":"publisher","first-page":"32297","DOI":"10.1109\/ACCESS.2019.2901521","volume":"7","author":"S Zhang","year":"2019","unstructured":"Zhang S, Pan X, Cui Y, Zhao X, Liu L (2019) Learning affective video features for facial expression recognition via hybrid deep learning. IEEE Access 7:32297\u201332304","journal-title":"IEEE Access"},{"issue":"10","key":"12796_CR47","doi-asserted-by":"publisher","first-page":"3030","DOI":"10.1109\/TCSVT.2017.2719043","volume":"28","author":"S Zhang","year":"2017","unstructured":"Zhang S, Zhang S, Huang T, Gao W, Tian Q (2017) Learning affective features with a hybrid deep model for audio\u2013visual emotion recognition. IEEE Trans Circuits Syst Video Technol 28(10):3030\u20133043","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"12796_CR48","unstructured":"Zhao G, Pietik\u00e4inen M, Hadid A (2007) Continuous automatic speech recognition by lipreading in motion-based recognition. In: Proc ACM Int Multimedia Conf Exhib, pp 57\u201366"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-022-12796-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-022-12796-1\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-022-12796-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,25]],"date-time":"2023-01-25T08:22:33Z","timestamp":1674634953000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-022-12796-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,4,21]]},"references-count":48,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2023,2]]}},"alternative-id":["12796"],"URL":"https:\/\/doi.org\/10.1007\/s11042-022-12796-1","relation":{},"ISSN":["1380-7501","1573-7721"],"issn-type":[{"value":"1380-7501","type":"print"},{"value":"1573-7721","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,4,21]]},"assertion":[{"value":"28 February 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 January 2022","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 February 2022","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 April 2022","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The dataset used human volunteers for the experiment. The data collection got ethical clearance from Institutional Ethics Clearance Committee Central University of Kerala (CUK\/IHEC\/2017-015). Informed written consent was obtained prior to any experiment or recording from all participants.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Ethics approval and consent to participate"}}]}}