{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T21:04:06Z","timestamp":1774991046621,"version":"3.50.1"},"reference-count":40,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2025,1,28]],"date-time":"2025-01-28T00:00:00Z","timestamp":1738022400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,28]],"date-time":"2025-01-28T00:00:00Z","timestamp":1738022400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Robust Lipreading Theory and Methods in Open-Scene for Public Safety","award":["U24A20332"],"award-info":[{"award-number":["U24A20332"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["62272143"],"award-info":[{"award-number":["62272143"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"University Synergy Innovation Program of Anhui Province","award":["GXXT-2022-054"],"award-info":[{"award-number":["GXXT-2022-054"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1007\/s00530-024-01627-y","type":"journal-article","created":{"date-parts":[[2025,1,28]],"date-time":"2025-01-28T16:42:17Z","timestamp":1738082537000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["WPELip: enhance lip reading with word-prior information"],"prefix":"10.1007","volume":"31","author":[{"given":"Feng","family":"Xue","sequence":"first","affiliation":[]},{"given":"Peng","family":"Li","sequence":"additional","affiliation":[]},{"given":"Yu","family":"Li","sequence":"additional","affiliation":[]},{"given":"Shujie","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,1,28]]},"reference":[{"issue":"5","key":"1627_CR1","doi-asserted-by":"publisher","first-page":"656","DOI":"10.1097\/AUD.0b013e31812f7185","volume":"28","author":"N Tye-Murray","year":"2007","unstructured":"Tye-Murray, N., Sommers, M.S., Spehar, B.: Audiovisual integration and lipreading abilities of older adults with normal and impaired hearing. Ear Hear. 28(5), 656\u2013668 (2007)","journal-title":"Ear Hear."},{"key":"1627_CR2","doi-asserted-by":"crossref","unstructured":"Rekik, A., Ben-Hamadou, A., Mahdi, W.: Human machine interaction via visual speech spotting. In: Advanced Concepts for Intelligent Vision Systems: 16th International Conference, ACIVS 2015, Catania, Italy, October 26-29, 2015. Proceedings 16, Springer, pp. 566\u2013574 (2015)","DOI":"10.1007\/978-3-319-25903-1_49"},{"issue":"7","key":"1627_CR3","doi-asserted-by":"publisher","first-page":"971","DOI":"10.1109\/TPAMI.2002.1017623","volume":"24","author":"T Ojala","year":"2002","unstructured":"Ojala, T., Pietikainen, M., Maenpaa, T.: Multiresolution gray-scale and rotation invariant texture classification with local binary patterns. IEEE Transactions on pattern analysis and machine intelligence 24(7), 971\u2013987 (2002)","journal-title":"IEEE Transactions on pattern analysis and machine intelligence"},{"issue":"6","key":"1627_CR4","doi-asserted-by":"publisher","first-page":"915","DOI":"10.1109\/TPAMI.2007.1110","volume":"29","author":"G Zhao","year":"2007","unstructured":"Zhao, G., Pietikainen, M.: Dynamic texture recognition using local binary patterns with an application to facial expressions. IEEE Trans. Patt. Anal. Mach. Intell. 29(6), 915\u2013928 (2007)","journal-title":"IEEE Trans. Patt. Anal. Mach. Intell."},{"key":"1627_CR5","doi-asserted-by":"crossref","unstructured":"Alizadeh, S., Boostani, R., Asadpour, V.: Lip feature extraction and reduction for hmm-based visual speech recognition systems. In: 2008 9th International Conference on Signal Processing, IEEE, pp. 561\u2013564 (2008)","DOI":"10.1109\/ICOSP.2008.4697195"},{"key":"1627_CR6","doi-asserted-by":"crossref","unstructured":"Chen, J., Tiddeman, B., Zhao, G.: Real-time lip contour extraction and tracking using an improved active contour model. In: Advances in visual computing: 4th International Symposium, ISVC 2008, Las Vegas, NV, USA, Dec 1\u20133, 2008. Proceedings, Part II 4, Springer, pp. 236\u2013245 (2008)","DOI":"10.1007\/978-3-540-89646-3_23"},{"key":"1627_CR7","doi-asserted-by":"crossref","unstructured":"Lan, Y., Theobald, B.-J., Harvey, R.: View independent computer lip-reading. In: 2012 IEEE International Conference on Multimedia and Expo, pp. 432\u2013437 (2012)","DOI":"10.1109\/ICME.2012.192"},{"issue":"1","key":"1627_CR8","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s00138-012-0445-1","volume":"24","author":"S Stillittano","year":"2013","unstructured":"Stillittano, S., Girondel, V., Caplier, A.: Lip contour segmentation and tracking compliant with lip-reading application constraints. Mach. Vis. Appl. 24(1), 1\u201318 (2013)","journal-title":"Mach. Vis. Appl."},{"key":"1627_CR9","unstructured":"Assael, Y.M., Shillingford, B., Whiteson, S., De\u00a0Freitas, N.: Lipnet: End-to-end sentence-level lipreading. arXiv preprint arXiv:1611.01599 (2016)"},{"key":"1627_CR10","doi-asserted-by":"crossref","unstructured":"Tran, D., Bourdev, L., Fergus, R., Torresani, L., Paluri, M.: Learning spatiotemporal features with 3D convolutional networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 4489\u20134497 (2015)","DOI":"10.1109\/ICCV.2015.510"},{"key":"1627_CR11","doi-asserted-by":"crossref","unstructured":"Cho, K., Van\u00a0Merri\u00ebnboer, B., Gulcehre, C., Bahdanau, D., Bougares, F., Schwenk, H., Bengio, Y.: Learning phrase representations using rnn encoder-decoder for statistical machine translation. arXiv preprint arXiv:1406.1078 (2014)","DOI":"10.3115\/v1\/D14-1179"},{"issue":"5","key":"1627_CR12","doi-asserted-by":"publisher","first-page":"2421","DOI":"10.1121\/1.2229005","volume":"120","author":"M Cooke","year":"2006","unstructured":"Cooke, M., Barker, J., Cunningham, S., Shao, X.: An audio-visual corpus for speech perception and automatic speech recognition. J. Acoust. Soc. Am. 120(5), 2421\u20132424 (2006)","journal-title":"J. Acoust. Soc. Am."},{"key":"1627_CR13","doi-asserted-by":"crossref","unstructured":"Xu, K., Li, D., Cassimatis, N., Wang, X.: Lcanet: End-to-end lipreading with cascaded attention-ctc. In: 2018 13th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2018), pp. 548\u2013555 (2018)","DOI":"10.1109\/FG.2018.00088"},{"key":"1627_CR14","unstructured":"Srivastava, R.K., Greff, K., Schmidhuber, J.: Highway networks. arXiv preprint arXiv:1505.00387 (2015)"},{"issue":"1s","key":"1627_CR15","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3524620","volume":"19","author":"F Xue","year":"2023","unstructured":"Xue, F., Yang, T., Liu, K., Hong, Z., Cao, M., Guo, D., Hong, R.: Lcsnet: end-to-end lipreading with channel-aware feature selection. ACM Trans. Multimed. Comput. Commun. Appl. 19(1s), 1\u201321 (2023)","journal-title":"ACM Trans. Multimed. Comput. Commun. Appl."},{"key":"1627_CR16","unstructured":"Weng, X., Kitani, K.: Learning spatio-temporal features with two-stream deep 3d cnns for lipreading. arXiv preprint arXiv:1905.02540 (2019)"},{"key":"1627_CR17","doi-asserted-by":"crossref","unstructured":"Zhao, Y., Xu, R., Wang, X., Hou, P., Tang, H., Song, M.: Hearing lips: improving lip reading by distilling speech recognizers. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 6917\u20136924 (2020)","DOI":"10.1609\/aaai.v34i04.6174"},{"key":"1627_CR18","doi-asserted-by":"crossref","unstructured":"Xue, F., Li, Y., Liu, D., Xie, Y., Wu, L., Hong, R.: Lipformer: learning to lipread unseen speakers based on visual-landmark transformers. In: IEEE Transactions on Circuits and Systems for Video Technology (2023)","DOI":"10.1109\/TCSVT.2023.3282224"},{"key":"1627_CR19","doi-asserted-by":"crossref","unstructured":"Noda, K., Yamaguchi, Y., Nakadai, K., Okuno, H.G., Ogata, T., et al.: Lipreading using convolutional neural network. In: Interspeech, vol. 1, p. 3 (2014)","DOI":"10.21437\/Interspeech.2014-293"},{"key":"1627_CR20","doi-asserted-by":"publisher","first-page":"722","DOI":"10.1007\/s10489-014-0629-7","volume":"42","author":"K Noda","year":"2015","unstructured":"Noda, K., Yamaguchi, Y., Nakadai, K., Okuno, H.G., Ogata, T.: Audio-visual speech recognition using deep learning. Appl. intell. 42, 722\u2013737 (2015)","journal-title":"Appl. intell."},{"key":"1627_CR21","doi-asserted-by":"crossref","unstructured":"Graves, A., Fern\u00e1ndez, S., Gomez, F., Schmidhuber, J.: Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks. In: Proceedings of the 23rd International Conference on Machine Learning, pp. 369\u2013376 (2006)","DOI":"10.1145\/1143844.1143891"},{"key":"1627_CR22","doi-asserted-by":"crossref","unstructured":"Son\u00a0Chung, J., Senior, A., Vinyals, O., Zisserman, A.: Lip reading sentences in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6447\u20136456 (2017)","DOI":"10.1109\/CVPR.2017.367"},{"key":"1627_CR23","unstructured":"Chung, J., Zisserman, A.: Lip reading in profile. In: Ritish Machine Vision Conference, 2017 (2017). British Machine Vision Association and Society for Pattern Recognition"},{"issue":"8","key":"1627_CR24","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural Comput 9(8), 1735\u20131780 (1997)","journal-title":"Neural Comput"},{"key":"1627_CR25","unstructured":"Sutskever, I., Vinyals, O., Le, Q.V.: Sequence to sequence learning with neural networks. Advances in neural information processing systems 27 (2014)"},{"key":"1627_CR26","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., et al.: Imagenet large scale visual recognition challenge. Int. J. Comput. Vis. 115, 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vis."},{"key":"1627_CR27","unstructured":"Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531 (2015)"},{"key":"1627_CR28","doi-asserted-by":"crossref","unstructured":"Huang, Y., Liang, X., Fang, C.: Callip: lipreading using contrastive and attribute learning. In: Proceedings of the 29th ACM International Conference on Multimedia, pp. 2492\u20132500 (2021)","DOI":"10.1145\/3474085.3475420"},{"issue":"11","key":"1627_CR29","doi-asserted-by":"publisher","first-page":"930","DOI":"10.1038\/s42256-022-00550-z","volume":"4","author":"P Ma","year":"2022","unstructured":"Ma, P., Petridis, S., Pantic, M.: Visual speech recognition for multiple languages in the wild. Nat Mach Intell 4(11), 930\u2013939 (2022)","journal-title":"Nat Mach Intell"},{"key":"1627_CR30","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo vadis, action recognition? a new model and the kinetics dataset. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6299\u20136308 (2017)","DOI":"10.1109\/CVPR.2017.502"},{"issue":"1","key":"1627_CR31","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s00530-023-01226-3","volume":"30","author":"Y Li","year":"2024","unstructured":"Li, Y., Xue, F., Wu, L., Xie, Y., Li, S.: Generalizing sentence-level lipreading to unseen speakers: a two-stream end-to-end approach. Multimed. Syst. 30(1), 1\u201310 (2024)","journal-title":"Multimed. Syst."},{"key":"1627_CR32","doi-asserted-by":"crossref","unstructured":"Yan, S., Xiong, Y., Lin, D.: Spatial temporal graph convolutional networks for skeleton-based action recognition. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 32 (2018)","DOI":"10.1609\/aaai.v32i1.12328"},{"key":"1627_CR33","doi-asserted-by":"crossref","unstructured":"Wang, Y., Jiang, Y., Li, J., Ni, B., Dai, W., Li, C., Xiong, H., Li, T.: Contrastive regression for domain adaptation on gaze estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19376\u201319385 (2022)","DOI":"10.1109\/CVPR52688.2022.01877"},{"key":"1627_CR34","doi-asserted-by":"crossref","unstructured":"Huang, X., Wang, Y., Liu, Y., Ni, B., Zhang, W., Liu, J., Li, T.: Audioear: single-view ear reconstruction for personalized spatial audio. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, pp. 944\u2013952 (2023)","DOI":"10.1609\/aaai.v37i1.25174"},{"key":"1627_CR35","doi-asserted-by":"crossref","unstructured":"Zhang, X., Gong, H., Dai, X., Yang, F., Liu, N., Liu, M.: Understanding pictograph with facial features: end-to-end sentence-level lip reading of chinese. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 33, pp. 9211\u20139218 (2019)","DOI":"10.1609\/aaai.v33i01.33019211"},{"key":"1627_CR36","doi-asserted-by":"crossref","unstructured":"Zhao, Y., Xu, R., Song, M.: A cascade sequence-to-sequence model for chinese mandarin lip reading. In: Proceedings of the 1st ACM International Conference on Multimedia in Asia, pp. 1\u20136 (2019)","DOI":"10.1145\/3338533.3366579"},{"key":"1627_CR37","first-page":"1755","volume":"10","author":"DE King","year":"2009","unstructured":"King, D.E.: Dlib-ml: a machine learning toolkit. J. Mach. Learn. Res. 10, 1755\u20131758 (2009)","journal-title":"J. Mach. Learn. Res."},{"key":"1627_CR38","unstructured":"Diederik, P.K.: Adam: a method for stochastic optimization (2014)"},{"key":"1627_CR39","unstructured":"Bengio, S., Vinyals, O., Jaitly, N., Shazeer, N.: Scheduled sampling for sequence prediction with recurrent neural networks. Adv. Neural Inf. Process. Syst. 28 (2015)"},{"key":"1627_CR40","unstructured":"Maaten, L., Hinton, G.: Visualizing data using t-sne. J. Mach Learn Res 9(11) (2008)"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01627-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-024-01627-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01627-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,21]],"date-time":"2025-04-21T19:34:20Z","timestamp":1745264060000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-024-01627-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,1,28]]},"references-count":40,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2025,4]]}},"alternative-id":["1627"],"URL":"https:\/\/doi.org\/10.1007\/s00530-024-01627-y","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,1,28]]},"assertion":[{"value":"1 September 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 December 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 January 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"84"}}