{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T12:02:23Z","timestamp":1774440143509,"version":"3.50.1"},"reference-count":42,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"the National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["No. U24A20332"],"award-info":[{"award-number":["No. U24A20332"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100001809","name":"the National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["No. U24A20332"],"award-info":[{"award-number":["No. U24A20332"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100001809","name":"the National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["No. U24A20332"],"award-info":[{"award-number":["No. U24A20332"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100001809","name":"the National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["No. U24A20332"],"award-info":[{"award-number":["No. U24A20332"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"the Seventh Special Support Plan for Innovation and Entrepreneurship in Anhui Province"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1007\/s00530-025-02101-z","type":"journal-article","created":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T09:15:04Z","timestamp":1765012504000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["D2M2Lip: dual-domain feature fusion and motion magnification for lip reading"],"prefix":"10.1007","volume":"32","author":[{"given":"Baochao","family":"Zhu","sequence":"first","affiliation":[]},{"given":"Shujie","family":"Li","sequence":"additional","affiliation":[]},{"given":"Jinrui","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Feng","family":"Xue","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,12,6]]},"reference":[{"key":"2101_CR1","doi-asserted-by":"crossref","unstructured":"Ma, P., Wang, Y., Petridis, S., Shen, J., Pantic, M.: Training strategies for improved lip-reading. In: ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 8472\u20138476 (2022)","DOI":"10.1109\/ICASSP43922.2022.9746706"},{"key":"2101_CR2","doi-asserted-by":"publisher","first-page":"6462","DOI":"10.1109\/TMM.2024.3352388","volume":"26","author":"JH Yeo","year":"2024","unstructured":"Yeo, J.H., Kim, M., Choi, J., Kim, D.H., Ro, Y.M.: Akvsr: audio knowledge empowered visual speech recognition by compressing audio knowledge of a pretrained model. IEEE Trans. Multimed. 26, 6462\u20136474 (2024)","journal-title":"IEEE Trans. Multimed."},{"key":"2101_CR3","doi-asserted-by":"publisher","first-page":"1122","DOI":"10.1109\/TIP.2024.3359045","volume":"33","author":"P Song","year":"2024","unstructured":"Song, P., Guo, D., Yang, X., Tang, S., Wang, M.: Emotional video captioning with vision-based emotion interpretation network. IEEE Trans. Image Process. 33, 1122\u20131135 (2024)","journal-title":"IEEE Trans. Image Process."},{"key":"2101_CR4","doi-asserted-by":"crossref","unstructured":"Xu, B., Lu, C., Guo, Y., Wang, J.: Discriminative multi-modality speech recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14433\u201314442 (2020)","DOI":"10.1109\/CVPR42600.2020.01444"},{"key":"2101_CR5","doi-asserted-by":"publisher","first-page":"2199","DOI":"10.1109\/TMM.2021.3065578","volume":"23","author":"C Bai","year":"2021","unstructured":"Bai, C., Li, H., Zhang, J., Huang, L., Zhang, L.: Unsupervised adversarial instance-level image retrieval. IEEE Trans. Multimed. 23, 2199\u20132207 (2021)","journal-title":"IEEE Trans. Multimed."},{"key":"2101_CR6","doi-asserted-by":"crossref","unstructured":"Cheng, X., Jin, T., Huang, R., Li, L., Lin, W., Wang, Z., Wang, Y., Liu, H., Yin, A., Zhao, Z.: Mixspeech: Cross-modality self-learning with audio-visual stream mixup for visual speech translation and recognition. In: 2023 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 15689\u201315699. IEEE, Paris, France (2023)","DOI":"10.1109\/ICCV51070.2023.01442"},{"key":"2101_CR7","doi-asserted-by":"crossref","unstructured":"Xu, K., Li, D., Cassimatis, N., Wang, X.: Lcanet: End-to-end lipreading with cascaded attention-ctc. In: 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pp. 548\u2013555. IEEE, Xi\u2019an (2018)","DOI":"10.1109\/FG.2018.00088"},{"key":"2101_CR8","doi-asserted-by":"crossref","unstructured":"Zhao, Y., Xu, R., Wang, X., Hou, P., Tang, H., Song, M.: Hearing lips: Improving lip reading by distilling speech recognizers. Proceedings of the AAAI Conference on Artificial Intelligence 34(04), 6917\u20136924 (2020)","DOI":"10.1609\/aaai.v34i04.6174"},{"issue":"1","key":"2101_CR9","doi-asserted-by":"publisher","first-page":"42","DOI":"10.1007\/s00530-023-01226-3","volume":"30","author":"Y Li","year":"2024","unstructured":"Li, Y., Xue, F., Wu, L., Xie, Y., Li, S.: Generalizing sentence-level lipreading to unseen speakers: a two-stream end-to-end approach. Multimed. Syst. 30(1), 42 (2024)","journal-title":"Multimed. Syst."},{"issue":"2","key":"2101_CR10","doi-asserted-by":"publisher","first-page":"84","DOI":"10.1007\/s00530-024-01627-y","volume":"31","author":"F Xue","year":"2025","unstructured":"Xue, F., Li, P., Li, Y., Li, S.: Wpelip: enhance lip reading with word-prior information. Multimed. Syst. 31(2), 84 (2025)","journal-title":"Multimed. Syst."},{"key":"2101_CR11","first-page":"1","volume":"62","author":"Z Yao","year":"2024","unstructured":"Yao, Z., Fan, G., Fan, J., Gan, M., Philip Chen, C.L.: Spatial-frequency dual-domain feature fusion network for low-light remote sensing image enhancement. IEEE Trans. Geosci. Remote Sens. 62, 1\u201316 (2024)","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"2101_CR12","doi-asserted-by":"crossref","unstructured":"Chen, Y., Fan, H., Xu, B., Yan, Z., Kalantidis, Y., Rohrbach, M., Shuicheng, Y., Feng, J.: Drop an octave: Reducing spatial redundancy in convolutional neural networks with octave convolution. In: 2019 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 3434\u20133443. IEEE, Seoul, Korea (South) (2019)","DOI":"10.1109\/ICCV.2019.00353"},{"issue":"4","key":"2101_CR13","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2185520.2185561","volume":"31","author":"H-Y Wu","year":"2012","unstructured":"Wu, H.-Y., Rubinstein, M., Shih, E., Guttag, J., Durand, F., Freeman, W.: Eulerian video magnification for revealing subtle changes in the world. ACM Trans. Gr. (TOG) 31(4), 1\u20138 (2012)","journal-title":"ACM Trans. Gr. (TOG)"},{"key":"2101_CR14","doi-asserted-by":"publisher","first-page":"23495","DOI":"10.52202\/068431-1707","volume":"35","author":"C Si","year":"2022","unstructured":"Si, C., Yu, W., Zhou, P., Zhou, Y., Wang, X., Yan, S.: Inception transformer. Adv. Neural. Inf. Process. Syst. 35, 23495\u201323509 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"2101_CR15","doi-asserted-by":"crossref","unstructured":"Yun, G., Yoo, J., Kim, K., Lee, J., Kim, D.H.: Spanet: Frequency-balancing token mixer using spectral pooling aggregation modulation. In: 2023 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 6090\u20136101. IEEE, Paris, France (2023)","DOI":"10.1109\/ICCV51070.2023.00562"},{"key":"2101_CR16","doi-asserted-by":"crossref","unstructured":"Wang, F., Guo, D., Li, K., Zhong, Z., Wang, M.: Frequency decoupling for motion magnification via multi-level isomorphic architecture. In: 2024 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 18984\u201318994. IEEE, Seattle, WA, USA (2024)","DOI":"10.1109\/CVPR52733.2024.01796"},{"key":"2101_CR17","doi-asserted-by":"crossref","unstructured":"Potamianos, G., Neti, C.: Improved roi and within frame discriminant features for lipreading. In: Proceedings 2001 International Conference on Image Processing (Cat. No.01CH37205), vol. 3, pp. 250\u20132533 (2001)","DOI":"10.1109\/ICIP.2001.958098"},{"key":"2101_CR18","unstructured":"Lucey, P., Potamianos, G., Sridharan, S.: Patch-based analysis of visual speech from multiple views. In: Proceedings of the International Conference on Auditory-Visual Speech Processing 2008, pp. 69\u201374 (2008). AVISA"},{"key":"2101_CR19","unstructured":"Lan, Y., Theobald, B.-J., Harvey, R.W., Ong, E.-J., Bowden, R.: Improving visual features for lip-reading. In: AVSP, pp. 7\u20133 (2010)"},{"issue":"9","key":"2101_CR20","doi-asserted-by":"publisher","first-page":"1306","DOI":"10.1109\/JPROC.2003.817150","volume":"91","author":"G Potamianos","year":"2003","unstructured":"Potamianos, G., Neti, C., Gravier, G., Garg, A., Senior, A.W.: Recent advances in the automatic recognition of audiovisual speech. Proc. IEEE 91(9), 1306\u20131326 (2003)","journal-title":"Proc. IEEE"},{"key":"2101_CR21","doi-asserted-by":"crossref","unstructured":"Noda, K., Yamaguchi, Y., Nakadai, K., Okuno, H.G., Ogata, T., et al.: Lipreading using convolutional neural network. In: Interspeech, vol. 1, p. 3 (2014)","DOI":"10.21437\/Interspeech.2014-293"},{"key":"2101_CR22","unstructured":"Assael, Y.M., Shillingford, B., Whiteson, S., Freitas, N.: LipNet: End-to-End Sentence-level Lipreading. arXiv (2016)"},{"key":"2101_CR23","doi-asserted-by":"publisher","first-page":"113","DOI":"10.1016\/j.patrec.2019.04.012","volume":"125","author":"Y Miao","year":"2019","unstructured":"Miao, Y., Han, J., Gao, Y., Zhang, B.: St-cnn: spatial-temporal convolutional neural network for crowd counting in videos. Pattern Recogn. Lett. 125, 113\u2013118 (2019)","journal-title":"Pattern Recogn. Lett."},{"key":"2101_CR24","doi-asserted-by":"crossref","unstructured":"Chung, J.S., Senior, A., Vinyals, O., Zisserman, A.: Lip reading sentences in the wild. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3444\u20133453. IEEE, Honolulu, HI, USA (2017)","DOI":"10.1109\/CVPR.2017.367"},{"key":"2101_CR25","doi-asserted-by":"crossref","unstructured":"Zhao, Y., Xu, R., Song, M.: A cascade sequence-to-sequence model for Chinese mandarin lip reading. In: Proceedings of the ACM Multimedia Asia, pp. 1\u20136. ACM, Beijing China (2019)","DOI":"10.1145\/3338533.3366579"},{"issue":"01","key":"2101_CR26","first-page":"9211","volume":"33","author":"X Zhang","year":"2019","unstructured":"Zhang, X., Gong, H., Dai, X., Yang, F., Liu, N., Liu, M.: Understanding pictograph with facial features: End-to-end sentence-level lip reading of Chinese. Proc. AAAI Conf. Artif. Intell. 33(01), 9211\u20139218 (2019)","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"2101_CR27","doi-asserted-by":"crossref","unstructured":"Prajwal, K.R., Afouras, T., Zisserman, A.: Sub-word level lip reading with visual attention. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5152\u20135162. IEEE, New Orleans, LA, USA (2022)","DOI":"10.1109\/CVPR52688.2022.00510"},{"issue":"1s","key":"2101_CR28","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3524620","volume":"19","author":"F Xue","year":"2023","unstructured":"Xue, F., Yang, T., Liu, K., Hong, Z., Cao, M., Guo, D., Hong, R.: Lcsnet: end-to-end lipreading with channel-aware feature selection. ACM Trans. Multimed. Comput. Commun. Appl. 19(1s), 1\u201321 (2023)","journal-title":"ACM Trans. Multimed. Comput. Commun. Appl."},{"key":"2101_CR29","unstructured":"Weng, X., Kitani, K.: Learning Spatio-Temporal Features with Two-Stream Deep 3D CNNs for Lipreading. arXiv (2019)"},{"key":"2101_CR30","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo vadis, action recognition? a new model and the kinetics dataset. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4724\u20134733. IEEE, Honolulu, HI (2017)","DOI":"10.1109\/CVPR.2017.502"},{"issue":"9","key":"2101_CR31","doi-asserted-by":"publisher","first-page":"4507","DOI":"10.1109\/TCSVT.2023.3282224","volume":"33","author":"F Xue","year":"2023","unstructured":"Xue, F., Li, Y., Liu, D., Xie, Y., Wu, L., Hong, R.: Lipformer: learning to lipread unseen speakers based on visual-landmark transformers. IEEE Trans. Circuits Syst. Video Technol. 33(9), 4507\u20134517 (2023)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"2101_CR32","doi-asserted-by":"crossref","unstructured":"Li, Y., Xue, F., Guo, D., Tang, S., Li, P., Li, S., Hong, R.: Cflip: Generalizing lipreading to unseen speakers by learning common features. IEEE Trans. Comput. Soc. Syst. 1\u201316 (2025)","DOI":"10.1109\/TCSS.2025.3586845"},{"issue":"10","key":"2101_CR33","doi-asserted-by":"publisher","first-page":"3","DOI":"10.23915\/distill.00003","volume":"1","author":"A Odena","year":"2016","unstructured":"Odena, A., Dumoulin, V., Olah, C.: Deconvolution and checkerboard artifacts. Distill 1(10), 3 (2016)","journal-title":"Distill"},{"issue":"4","key":"2101_CR34","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2461912.2461966","volume":"32","author":"N Wadhwa","year":"2013","unstructured":"Wadhwa, N., Rubinstein, M., Durand, F., Freeman, W.T.: Phase-based video motion processing. ACM Trans. Gr. (ToG) 32(4), 1\u201310 (2013)","journal-title":"ACM Trans. Gr. (ToG)"},{"issue":"5","key":"2101_CR35","doi-asserted-by":"publisher","first-page":"2421","DOI":"10.1121\/1.2229005","volume":"120","author":"M Cooke","year":"2006","unstructured":"Cooke, M., Barker, J., Cunningham, S., Shao, X.: An audio-visual corpus for speech perception and automatic speech recognition. J. Acoust. Soc. Am. 120(5), 2421\u20132424 (2006)","journal-title":"J. Acoust. Soc. Am."},{"key":"2101_CR36","first-page":"1755","volume":"10","author":"DE King","year":"2009","unstructured":"King, D.E.: Dlib-ml: a machine learning toolkit. J Mach. Learn. Res. 10, 1755\u20131758 (2009)","journal-title":"J Mach. Learn. Res."},{"key":"2101_CR37","unstructured":"Kingma, D.P., Ba, J.: Adam: A Method for Stochastic Optimization. arXiv (2017)"},{"key":"2101_CR38","doi-asserted-by":"crossref","unstructured":"Huang, Y., Liang, X., Fang, C.: Callip: Lipreading using contrastive and attribute learning. In: Proceedings of the 29th ACM International Conference on Multimedia, pp. 2492\u20132500. ACM, Virtual Event China (2021)","DOI":"10.1145\/3474085.3475420"},{"key":"2101_CR39","doi-asserted-by":"crossref","unstructured":"Wu, L., Zhang, X., Zhang, Y., Zheng, C., Liu, T., Xie, L., Yan, Y., Yin, E.: Landmark-guided cross-speaker lip reading with mutual information regularization. arXiv preprint arXiv:2403.16071 (2024)","DOI":"10.63317\/28em5tq9jc2n"},{"key":"2101_CR40","doi-asserted-by":"crossref","unstructured":"Dorkenwald, M., Buchler, U., Ommer, B.: Unsupervised magnification of posture deviations across subjects. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8256\u20138266 (2020)","DOI":"10.1109\/CVPR42600.2020.00828"},{"key":"2101_CR41","doi-asserted-by":"crossref","unstructured":"Oh, T.-H., Jaroensri, R., Kim, C., Elgharib, M., Durand, F., Freeman, W.T., Matusik, W.: Learning-based video motion magnification. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 633\u2013648 (2018)","DOI":"10.1007\/978-3-030-01225-0_39"},{"key":"2101_CR42","doi-asserted-by":"crossref","unstructured":"Takeda, S., Akagi, Y., Okami, K., Isogai, M., Kimata, H.: Video magnification in the wild using fractional anisotropy in temporal distribution. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1614\u20131622 (2019)","DOI":"10.1109\/CVPR.2019.00171"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-025-02101-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-025-02101-z","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-025-02101-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T08:43:09Z","timestamp":1774428189000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-025-02101-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":42,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,2]]}},"alternative-id":["2101"],"URL":"https:\/\/doi.org\/10.1007\/s00530-025-02101-z","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,12,6]]},"assertion":[{"value":"15 September 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 November 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 December 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"40"}}