{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,27]],"date-time":"2026-03-27T17:01:06Z","timestamp":1774630866064,"version":"3.50.1"},"reference-count":76,"publisher":"Springer Science and Business Media LLC","issue":"7","license":[{"start":{"date-parts":[[2025,3,19]],"date-time":"2025-03-19T00:00:00Z","timestamp":1742342400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,3,19]],"date-time":"2025-03-19T00:00:00Z","timestamp":1742342400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62302324"],"award-info":[{"award-number":["62302324"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62072319"],"award-info":[{"award-number":["62072319"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62272329"],"award-info":[{"award-number":["62272329"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100012542","name":"Sichuan Province Science and Technology Support Program","doi-asserted-by":"publisher","award":["2023YFQ0022"],"award-info":[{"award-number":["2023YFQ0022"]}],"id":[{"id":"10.13039\/100012542","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2025,7]]},"DOI":"10.1007\/s11263-025-02399-2","type":"journal-article","created":{"date-parts":[[2025,3,19]],"date-time":"2025-03-19T20:58:11Z","timestamp":1742417891000},"page":"4749-4769","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["LR-ASD: Lightweight and Robust Network for Active Speaker Detection"],"prefix":"10.1007","volume":"133","author":[{"given":"Junhua","family":"Liao","sequence":"first","affiliation":[]},{"given":"Haihan","family":"Duan","sequence":"additional","affiliation":[]},{"given":"Kanghui","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Wanbing","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Yanbing","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Liangyin","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9677-7142","authenticated-orcid":false,"given":"Yanru","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,3,19]]},"reference":[{"key":"2399_CR1","doi-asserted-by":"crossref","unstructured":"Alc\u00e1zar, J. L., Caba, F., Mai, L., Perazzi, F., Lee, J. -Y., Arbel\u00e1ez, P., & Ghanem, B. (2020). Active speakers in context. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 12465\u201312474).","DOI":"10.1109\/CVPR42600.2020.01248"},{"key":"2399_CR2","doi-asserted-by":"crossref","unstructured":"Alc\u00e1zar, J. L., Caba, F., Thabet, A. K., & Ghanem, B. (2021). Maas: Multi-modal assignation for active speaker detection. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 265\u2013274).","DOI":"10.1109\/ICCV48922.2021.00033"},{"key":"2399_CR3","unstructured":"Afouras, T., Chung, J. S., & Zisserman, A. (2018). Lrs3-ted: A large-scale dataset for visual speech recognition. arXiv preprint arXiv:1809.00496."},{"key":"2399_CR4","doi-asserted-by":"crossref","unstructured":"Alc\u00e1zar, J. L., Cordes, M., Zhao, C., & Ghanem, B. (2022). End-to-end active speaker detection. In Computer Vision\u2013ECCV 2022: 17th European Conference, Part XXXVII (pp. 126\u2013143). Springer.","DOI":"10.1007\/978-3-031-19836-6_8"},{"key":"2399_CR5","doi-asserted-by":"crossref","unstructured":"Afouras, T., Owens, A., Chung, J. S., & Zisserman, A. (2020). Self-supervised learning of audio-visual objects from video. In Computer Vision\u2013ECCV 2020: 16th European conference, Part XVIII 16 (pp. 208\u2013224). Springer.","DOI":"10.1007\/978-3-030-58523-5_13"},{"key":"2399_CR6","doi-asserted-by":"crossref","unstructured":"Arandjelovic, R., & Zisserman, A. (2018). Objects that sound. In Proceedings of the European conference on computer vision (ECCV) (pp. 435\u2013451).","DOI":"10.1007\/978-3-030-01246-5_27"},{"issue":"5","key":"2399_CR7","doi-asserted-by":"publisher","first-page":"1761","DOI":"10.1109\/TPAMI.2019.2953020","volume":"43","author":"Y Ban","year":"2021","unstructured":"Ban, Y., Alameda-Pineda, X., Girin, L., & Horaud, R. (2021). Variational Bayesian inference for audio-visual tracking of multiple speakers. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(5), 1761\u20131776.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2399_CR8","doi-asserted-by":"publisher","first-page":"2071","DOI":"10.1109\/TMM.2020.3007350","volume":"23","author":"C Beyan","year":"2020","unstructured":"Beyan, C., Shahid, M., & Murino, V. (2020). Realvad: A real-world dataset and a method for voice activity detection by body motion analysis. IEEE Transactions on Multimedia, 23, 2071\u20132085.","journal-title":"IEEE Transactions on Multimedia"},{"key":"2399_CR9","doi-asserted-by":"crossref","unstructured":"Cutler, R., & Davis, L. (2000). Look who\u2019s talking: Speaker detection using video and audio correlation. In 2000 IEEE international conference on multimedia and expo (vol. 3, pp. 1589\u20131592). IEEE.","DOI":"10.1109\/ICME.2000.871073"},{"key":"2399_CR10","unstructured":"Chung, J., Gulcehre, C., Cho, K., & Bengio, Y. (2014). Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555."},{"key":"2399_CR11","doi-asserted-by":"crossref","unstructured":"Chollet, F. (2017). Xception: Deep learning with depthwise separable convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1251\u20131258).","DOI":"10.1109\/CVPR.2017.195"},{"key":"2399_CR12","unstructured":"Chung, J. S. (2019). Naver at activitynet challenge 2019\u2013task b active speaker detection (ava). arXiv preprint arXiv:1906.10555."},{"key":"2399_CR13","doi-asserted-by":"crossref","unstructured":"Chung, J. S., Nagrani, A., & Zisserman, A. (2018). Voxceleb2: Deep speaker recognition. In INTERSPEECH.","DOI":"10.21437\/Interspeech.2018-1929"},{"key":"2399_CR14","doi-asserted-by":"crossref","unstructured":"Chakravarty, P., & Tuytelaars, T.: Cross-modal supervision for learning active speaker detection in video. In Computer Vision\u2013ECCV 2016: 14th European conference, Part V 14 (pp. 285\u2013301). Springer.","DOI":"10.1007\/978-3-319-46454-1_18"},{"key":"2399_CR15","volume-title":"Oxford guide to plain English","author":"M Cutts","year":"2020","unstructured":"Cutts, M. (2020). Oxford guide to plain English. Oxford University Press."},{"key":"2399_CR16","doi-asserted-by":"crossref","unstructured":"Chung, J. S., & Zisserman, A. (2017). Out of time: Automated lip sync in the wild. In Computer Vision\u2013ACCV 2016 Workshops: ACCV 2016 International Workshops, Taipei, Taiwan, November 20-24, 2016, Revised Selected Papers, Part II 13 (pp. 251\u2013263). Springer.","DOI":"10.1007\/978-3-319-54427-4_19"},{"key":"2399_CR17","doi-asserted-by":"crossref","unstructured":"Chakravarty, P., Zegers, J., Tuytelaars, T., & Van\u00a0hamme, H. (2016). Active speaker detection with audio-visual co-training. In Proceedings of the 18th ACM international conference on multimodal interaction (pp. 312\u2013316).","DOI":"10.1145\/2993148.2993172"},{"key":"2399_CR18","doi-asserted-by":"crossref","unstructured":"Datta, G., Etchart, T., Yadav, V., Hedau, V., Natarajan, P., & Chang, S. -F. (2022). Asd-transformer: Efficient active speaker detection using self and multimodal transformers. In ICASSP 2022-2022 IEEE international conference on acoustics, speech and signal processing (ICASSP) (pp. 4568\u20134572). IEEE.","DOI":"10.1109\/ICASSP43922.2022.9746991"},{"issue":"9","key":"2399_CR19","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3648681","volume":"20","author":"H Duan","year":"2024","unstructured":"Duan, H., Liao, J., Lin, L., El Saddik, A., & Cai, W. (2024). Meetor: A human-centered automatic video editing system for meeting recordings. ACM Transactions on Multimedia Computing, Communications and Applications, 20(9), 1\u201323.","journal-title":"ACM Transactions on Multimedia Computing, Communications and Applications"},{"key":"2399_CR20","doi-asserted-by":"crossref","unstructured":"Duan, H., Liao, J., Lin, L., & Cai, W. (2022). Flad: A human-centered video content flaw detection system for meeting recordings. In Proceedings of the 32nd workshop on network and operating systems support for digital audio and video (pp. 43\u201349).","DOI":"10.1145\/3534088.3534349"},{"issue":"4","key":"2399_CR21","doi-asserted-by":"publisher","first-page":"357","DOI":"10.1109\/TASSP.1980.1163420","volume":"28","author":"S Davis","year":"1980","unstructured":"Davis, S., & Mermelstein, P. (1980). Comparison of parametric representations for monosyllabic word recognition in continuously spoken sentences. IEEE Transactions on Acoustics, Speech, and Signal Processing, 28(4), 357\u2013366.","journal-title":"IEEE Transactions on Acoustics, Speech, and Signal Processing"},{"key":"2399_CR22","unstructured":"Donley, J., Tourbabin, V., Lee, J. -S., Broyles, M., Jiang, H., Shen, J., Pantic, M., Ithapu, V. K., & Mehra, R. (2021). Easycom: An augmented reality dataset to support algorithms for easy communication in noisy environments. arXiv preprint arXiv:2107.04174."},{"key":"2399_CR23","doi-asserted-by":"crossref","unstructured":"Ding, X., Zhang, X., Ma, N., Han, J., Ding, G., Sun, J. (2021). Repvgg: Making vgg-style convnets great again. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 13733\u201313742).","DOI":"10.1109\/CVPR46437.2021.01352"},{"key":"2399_CR24","unstructured":"Howard, A. G., Zhu, M., Chen, B., Kalenichenko, D., Wang, W., Weyand, T., Andreetto, M., & Adam, H. (2017). Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861."},{"key":"2399_CR25","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., & Sun, J. (2016). Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 770\u2013778)","DOI":"10.1109\/CVPR.2016.90"},{"issue":"10","key":"2399_CR26","doi-asserted-by":"publisher","first-page":"1577","DOI":"10.1109\/TASLP.2019.2921890","volume":"27","author":"A Jati","year":"2019","unstructured":"Jati, A., & Georgiou, P. (2019). Neural predictive coding using convolutional neural networks toward unsupervised learning of speaker characteristics. IEEE\/ACM Transactions on Audio, Speech, and Language Processing, 27(10), 1577\u20131589.","journal-title":"IEEE\/ACM Transactions on Audio, Speech, and Language Processing"},{"key":"2399_CR27","doi-asserted-by":"crossref","unstructured":"Jiang, Y., Tao, R., Pan, Z., & Li, H. (2023). Target active speaker detection with audio-visual cues. arXiv preprint arXiv:2305.12831.","DOI":"10.21437\/Interspeech.2023-574"},{"issue":"12","key":"2399_CR28","doi-asserted-by":"publisher","first-page":"1931","DOI":"10.1109\/TASLP.2014.2354236","volume":"22","author":"M Krawczyk","year":"2014","unstructured":"Krawczyk, M., & Gerkmann, T. (2014). Stft phase reconstruction in voiced speech for an improved single-channel speech enhancement. IEEE\/ACM Transactions on Audio, Speech, and Language Processing, 22(12), 1931\u20131940.","journal-title":"IEEE\/ACM Transactions on Audio, Speech, and Language Processing"},{"key":"2399_CR29","doi-asserted-by":"crossref","unstructured":"K\u00f6p\u00fckl\u00fc, O., Taseska, M., & Rigoll, G. (2021). How to design a three-stage architecture for audio-visual active speaker detection in the wild. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 1193\u20131203).","DOI":"10.1109\/ICCV48922.2021.00123"},{"key":"2399_CR30","doi-asserted-by":"publisher","first-page":"2885","DOI":"10.1007\/s11263-024-02000-2","volume":"132","author":"Z-S Liu","year":"2024","unstructured":"Liu, Z.-S., Courant, R., & Kalogeiton, V. (2024). Funnynet-W: Multimodal learning of funny moments in videos in the wild. International Journal of Computer Vision, 132, 2885\u20132906.","journal-title":"International Journal of Computer Vision"},{"key":"2399_CR31","doi-asserted-by":"crossref","unstructured":"Liao, J., Duan, H., Feng, K., Zhao, W., Yang, Y., & Chen, L. (2023). A light weight model for active speaker detection. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (CVPR) (pp. 22932\u201322941).","DOI":"10.1109\/CVPR52729.2023.02196"},{"key":"2399_CR32","doi-asserted-by":"crossref","unstructured":"Liao, J., Duan, H., Li, X., Xu, H., Yang, Y., Cai, W., Chen, Y., & Chen, L. (2020) Occlusion detection for automatic video editing. In Proceedings of the 28th ACM international conference on multimedia (pp. 2255\u20132263) (2020)","DOI":"10.1145\/3394171.3413725"},{"key":"2399_CR33","doi-asserted-by":"crossref","unstructured":"Liao, J., Duan, H., Zhao, W., Yang, Y., & Chen, L. (2022). A light weight model for video shot occlusion detection. In ICASSP 2022-2022 IEEE international conference on acoustics, speech and signal processing (ICASSP) (pp. 3154\u20133158). IEEE.","DOI":"10.1109\/ICASSP43922.2022.9746742"},{"issue":"3","key":"2399_CR34","doi-asserted-by":"publisher","first-page":"1627","DOI":"10.1109\/TCSVT.2023.3295243","volume":"34","author":"J Liao","year":"2024","unstructured":"Liao, J., Duan, H., Zhao, W., Feng, K., Yang, Y., & Chen, L. (2024). A video shot occlusion detection algorithm based on the abnormal fluctuation of depth information. IEEE Transactions on Circuits and Systems for Video Technology, 34(3), 1627\u20131640.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"2399_CR35","doi-asserted-by":"crossref","unstructured":"Li, H., Niu, H., Zhu, Z., & Zhao, F. (2023). Intensity-aware loss for dynamic facial expression recognition in the wild. In Proceedings of the AAAI conference on artificial intelligence, (vol. 37, pp. 67\u201375).","DOI":"10.1609\/aaai.v37i1.25077"},{"issue":"2","key":"2399_CR36","doi-asserted-by":"publisher","first-page":"198","DOI":"10.1109\/34.982900","volume":"24","author":"I Matthews","year":"2002","unstructured":"Matthews, I., Cootes, T. F., Bangham, J. A., Cox, S., & Harvey, R. (2002). Extraction of visual features for lipreading. IEEE Transactions on Pattern Analysis and Machine Intelligence, 24(2), 198\u2013213.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2399_CR37","unstructured":"Moattar, M. H., & Homayounpour, M. M.: A simple but efficient real-time voice activity detection algorithm. In 2009 17th European signal processing conference (pp. 2549\u20132553). IEEE."},{"key":"2399_CR38","doi-asserted-by":"crossref","unstructured":"Min, K., Roy, S., Tripathi, S., Guha, T., & Majumdar, S. (2022). Learning long-term spatial-temporal graphs for active speaker detection. In Computer Vision\u2013ECCV 2022: 17th European Conference, Part XXXV (pp. 371\u2013387). Springer.","DOI":"10.1007\/978-3-031-19833-5_22"},{"key":"2399_CR39","doi-asserted-by":"publisher","first-page":"1368","DOI":"10.1109\/TASLP.2021.3066303","volume":"29","author":"D Michelsanti","year":"2021","unstructured":"Michelsanti, D., Tan, Z.-H., Zhang, S.-X., Xu, Y., Yu, M., Yu, D., & Jensen, J. (2021). An overview of deep-learning-based audio-visual speech enhancement and separation. IEEE\/ACM Transactions on Audio, Speech, and Language Processing, 29, 1368\u20131396.","journal-title":"IEEE\/ACM Transactions on Audio, Speech, and Language Processing"},{"key":"2399_CR40","unstructured":"Ngiam, J., Khosla, A., Kim, M., Nam, J., Lee, H., & Ng, A. Y. (2011). Multimodal deep learning. In Proceedings of the 28th international conference on machine learning (ICML-11) (pp. 689\u2013696)."},{"key":"2399_CR41","doi-asserted-by":"crossref","unstructured":"Owens, A., & Efros, A. A. (2018). Audio-visual scene analysis with self-supervised multisensory features. In Proceedings of the European conference on computer vision (ECCV). (pp. 631\u2013648).","DOI":"10.1007\/978-3-030-01231-1_39"},{"issue":"2","key":"2399_CR42","doi-asserted-by":"publisher","first-page":"206","DOI":"10.1109\/JSTSP.2019.2908700","volume":"13","author":"H Purwins","year":"2019","unstructured":"Purwins, H., Li, B., Virtanen, T., Schl\u00fcter, J., Chang, S.-Y., & Sainath, T. (2019). Deep learning for audio signal processing. IEEE Journal of Selected Topics in Signal Processing, 13(2), 206\u2013219.","journal-title":"IEEE Journal of Selected Topics in Signal Processing"},{"key":"2399_CR43","doi-asserted-by":"publisher","first-page":"2618","DOI":"10.1007\/s11263-024-01998-9","volume":"132","author":"M Planamente","year":"2024","unstructured":"Planamente, M., Plizzari, C., Peirone, S. A., Caputo, B., & Bottino, A. (2024). Relative norm alignment for tackling domain shift in deep multi-modal classification. International Journal of Computer Vision, 132, 2618\u20132638.","journal-title":"International Journal of Computer Vision"},{"key":"2399_CR44","doi-asserted-by":"publisher","first-page":"942","DOI":"10.1109\/TMM.2021.3061800","volume":"24","author":"X Qian","year":"2021","unstructured":"Qian, X., Brutti, A., Lanz, O., Omologo, M., & Cavallaro, A. (2021). Audio-visual tracking of concurrent speakers. IEEE Transactions on Multimedia, 24, 942\u2013954.","journal-title":"IEEE Transactions on Multimedia"},{"issue":"6","key":"2399_CR45","doi-asserted-by":"publisher","first-page":"2003","DOI":"10.1007\/s11263-023-01950-3","volume":"132","author":"M Qiao","year":"2024","unstructured":"Qiao, M., Liu, Y., Xu, M., Deng, X., Li, B., Hu, W., & Borji, A. (2024). Joint learning of audio-visual saliency prediction and sound source localization on multi-face videos. International Journal of Computer Vision, 132(6), 2003\u20132025.","journal-title":"International Journal of Computer Vision"},{"key":"2399_CR46","doi-asserted-by":"crossref","unstructured":"Ravanelli, M., & Bengio, Y. (2018). Speaker recognition from raw waveform with sincnet. In 2018 IEEE spoken language technology workshop (SLT) (pp. 1021\u20131028). IEEE.","DOI":"10.1109\/SLT.2018.8639585"},{"key":"2399_CR47","doi-asserted-by":"crossref","unstructured":"Roth, J., Chaudhuri, S., Klejch, O., Marvin, R., Gallagher, A., Kaver, L., Ramaswamy, S., Stopczynski, A., Schmid, C., & Xi, Z. (2020). Ava active speaker: An audio-visual dataset for active speaker detection. In ICASSP 2020-2020 IEEE international conference on acoustics, speech and signal processing (ICASSP) (pp. 4492\u20134496). IEEE.","DOI":"10.1109\/ICASSP40776.2020.9053900"},{"key":"2399_CR48","doi-asserted-by":"crossref","unstructured":"Radosavovic, I., Kosaraju, R. P., Girshick, R., He, K., & Doll\u00e1r, P. (2020). Designing network design spaces. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 10428\u201310436).","DOI":"10.1109\/CVPR42600.2020.01044"},{"key":"2399_CR49","doi-asserted-by":"crossref","unstructured":"Ravanelli, M., Parcollet, T., & Bengio, Y. (2019). The Pytorch-Kaldi speech recognition toolkit. In ICASSP 2019-2019 IEEE international conference on acoustics, speech and signal processing (ICASSP) (pp. 6465\u20136469). IEEE","DOI":"10.1109\/ICASSP.2019.8683713"},{"issue":"3\u20134","key":"2399_CR50","doi-asserted-by":"publisher","first-page":"271","DOI":"10.1016\/j.specom.2003.10.002","volume":"42","author":"J Ram\u0131rez","year":"2004","unstructured":"Ram\u0131rez, J., Segura, J. C., Ben\u0131tez, C., De La Torre, A., & Rubio, A. (2004). Efficient voice activity detection algorithms using long-term speech information. Speech Communication, 42(3\u20134), 271\u2013287.","journal-title":"Speech Communication"},{"key":"2399_CR51","doi-asserted-by":"crossref","unstructured":"Rao, A., Wang, J., Xu, L., Jiang, X., Huang, Q., Zhou, B., & Lin, D. (2020). A unified framework for shot type classification based on subject centric lens. In Computer Vision\u2013ECCV 2020: 16th European Conference, Part XI 16 (pp. 17\u201334). Springer.","DOI":"10.1007\/978-3-030-58621-8_2"},{"key":"2399_CR52","doi-asserted-by":"crossref","unstructured":"Shahid, M., Beyan, C., & Murino, V. (2019). Comparisons of visual activity primitives for voice activity detection. In Image analysis and processing\u2013ICIAP 2019: 20th international conference, Trento, Italy, September 9\u201313, 2019, Proceedings, Part I 20 (pp. 48\u201359). Springer.","DOI":"10.1007\/978-3-030-30642-7_5"},{"key":"2399_CR53","doi-asserted-by":"crossref","unstructured":"Shahid, M., Beyan, C., & Murino, V. (2019). Voice activity detection by upper body motion analysis and unsupervised domain adaptation. In Proceedings of the IEEE\/CVF international conference on computer vision workshops (pp. 0\u20130).","DOI":"10.1109\/ICCVW.2019.00159"},{"key":"2399_CR54","doi-asserted-by":"crossref","unstructured":"Shahid, M., Beyan, C., & Murino, V. (2021). S-vvad: Visual voice activity detection by motion segmentation. In: Proceedings of the IEEE\/CVF winter conference on applications of computer vision pp. 2332\u20132341 (2021)","DOI":"10.1109\/WACV48630.2021.00238"},{"key":"2399_CR55","unstructured":"Slaney, M., & Covell, M. (2000). Facesync: A linear operator for measuring synchronization of video facial images and audio tracks. Advances in Neural Information Processing Systems 13."},{"key":"2399_CR56","doi-asserted-by":"crossref","unstructured":"Son\u00a0Chung, J., Senior, A., Vinyals, O., & Zisserman, A. (2017). Lip reading sentences in the wild. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 6447\u20136456).","DOI":"10.1109\/CVPR.2017.367"},{"key":"2399_CR57","doi-asserted-by":"crossref","unstructured":"Saenko, K., Livescu, K., Siracusa, M., Wilson, K., Glass, J., & Darrell, T. (2005). Visual speech recognition with loosely synchronized feature streams. In Tenth IEEE international conference on computer vision (ICCV\u201905) volume 1 (vol. 2, pp. 1424\u20131431). IEEE.","DOI":"10.1109\/ICCV.2005.251"},{"key":"2399_CR58","unstructured":"Sharma, R., & Narayanan, S. (2022). Unsupervised active speaker detection in media content using cross-modal information. arXiv preprint arXiv:2209.11896."},{"issue":"1","key":"2399_CR59","doi-asserted-by":"publisher","first-page":"90","DOI":"10.1093\/applin\/11.1.90","volume":"11","author":"S Tauroza","year":"1990","unstructured":"Tauroza, S., & Allison, D. (1990). Speech rates in British English. Applied Linguistics, 11(1), 90\u2013105.","journal-title":"Applied Linguistics"},{"key":"2399_CR60","doi-asserted-by":"crossref","unstructured":"Tran, D., Bourdev, L., Fergus, R., Torresani, L., & Paluri, M. (2015). Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE international conference on computer vision (pp. 4489\u20134497).","DOI":"10.1109\/ICCV.2015.510"},{"key":"2399_CR61","doi-asserted-by":"crossref","unstructured":"Truong, T.-D., Duong, C.N., Pham, H.A., Raj, B., Le, N., & Luu, K. (2021). The right to talk: An audio-visual transformer approach. In: Proceedings of the IEEE\/CVF international conference on computer vision (pp. 1105\u20131114).","DOI":"10.1109\/ICCV48922.2021.00114"},{"key":"2399_CR62","doi-asserted-by":"crossref","unstructured":"Tesema, F.B., Lin, Z., Zhu, S., Song, W., Gu, J., Wu, H.: End-to-end audiovisual feature fusion for active speaker detection. In: Fourteenth International Conference on Digital Image Processing (ICDIP 2022), vol. 12342, pp. 681\u2013688 (2022). SPIE","DOI":"10.1117\/12.2643881"},{"key":"2399_CR63","doi-asserted-by":"crossref","unstructured":"Tao, R., Pan, Z., Das, R. K., Qian, X., Shou, M. Z., & Li, H. (2021). Is someone speaking? exploring long-term temporal features for audio-visual active speaker detection. In Proceedings of the 29th ACM international conference on multimedia (pp. 3927\u20133935).","DOI":"10.1145\/3474085.3475587"},{"key":"2399_CR64","doi-asserted-by":"crossref","unstructured":"Tran, D., Wang, H., Torresani, L., Ray, J., LeCun, Y., & Paluri, M. (2018). A closer look at spatiotemporal convolutions for action recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 6450\u20136459).","DOI":"10.1109\/CVPR.2018.00675"},{"key":"2399_CR65","doi-asserted-by":"crossref","unstructured":"Vasu, P. K. A., Gabriel, J., Zhu, J., Tuzel, O., & Ranjan, A. (2023). Mobileone: An improved one millisecond mobile backbone. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 7907\u20137917).","DOI":"10.1109\/CVPR52729.2023.00764"},{"key":"2399_CR66","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, \u0141., & Polosukhin, I. (2017). Attention is all you need. Advances in Neural Information Processing Systems 30"},{"key":"2399_CR67","doi-asserted-by":"crossref","unstructured":"Wang, X., Cheng, F., & Bertasius, G. (2024). Loconet: Long-short context network for active speaker detection. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 18462\u201318472).","DOI":"10.1109\/CVPR52733.2024.01747"},{"key":"2399_CR68","doi-asserted-by":"crossref","unstructured":"Wang, Q., Downey, C., Wan, L., Mansfield, P. A., & Moreno, I. L. (2018). Speaker diarization with lstm. In 2018 IEEE international conference on acoustics, speech and signal processing (ICASSP) (pp. 5239\u20135243). IEEE.","DOI":"10.1109\/ICASSP.2018.8462628"},{"key":"2399_CR69","doi-asserted-by":"crossref","unstructured":"Wang, H., Li, B., Wu, S., Shen, S., Liu, F., Ding, S., & Zhou, A. (2023). Rethinking the learning paradigm for dynamic facial expression recognition. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 17958\u201317968).","DOI":"10.1109\/CVPR52729.2023.01722"},{"key":"2399_CR70","doi-asserted-by":"crossref","unstructured":"Wang, Y., Sun, Y., Huang, Y., Liu, Z., Gao, S., Zhang, W., Ge, W., & Zhang, W. (2022). Ferv39k: A large-scale multi-scene dataset for facial expression recognition in videos. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 20922\u201320931).","DOI":"10.1109\/CVPR52688.2022.02025"},{"key":"2399_CR71","doi-asserted-by":"crossref","unstructured":"Wuerkaixi, A., Zhang, Y., Duan, Z., & Zhang, C. (2022). Rethinking audio-visual synchronization for active speaker detection. In 2022 IEEE 32nd international workshop on machine learning for signal processing (MLSP) (pp. 01\u201306). IEEE.","DOI":"10.1109\/MLSP55214.2022.9943352"},{"key":"2399_CR72","doi-asserted-by":"crossref","unstructured":"Xiong, J., Zhou, Y., Zhang, P., Xie, L., Huang, W., & Zha, Y. (2022) Look &listen: Multi-modal correlation learning for active speaker detection and speech enhancement. IEEE Transactions on Multimedia (pp. 1\u201314).","DOI":"10.1109\/TMM.2022.3199109"},{"key":"2399_CR73","unstructured":"Zhang, Y., Liang, S., Yang, S., Liu, X., Wu, Z., & Shan, S. (2021a). Ictcas-ucas-tal submission to the ava-activespeaker task at activitynet challenge 2021. The ActivityNet Large-Scale Activity Recognition Challenge, 1(3), 4."},{"key":"2399_CR74","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Liang, S., Yang, S., Liu, X., Wu, Z., Shan, S., & Chen, X. (2021b). Unicon: Unified context network for robust active speaker detection. In Proceedings of the 29th ACM international conference on multimedia (pp. 3964\u20133972).","DOI":"10.1145\/3474085.3475275"},{"key":"2399_CR75","unstructured":"Zhang, Y. -H., Xiao, J., Yang, S., & Shan, S. (2019). Multi-task learning for audio-visual active speaker detection. The ActivityNet Large-Scale Activity Recognition Challenge (pp. 1\u20134)."},{"key":"2399_CR76","doi-asserted-by":"crossref","unstructured":"Zhang, X., Zhou, X., Lin, M., & Sun, J. (2018). Shufflenet: An extremely efficient convolutional neural network for mobile devices. In: Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 6848\u20136856).","DOI":"10.1109\/CVPR.2018.00716"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02399-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-025-02399-2\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02399-2.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,7]],"date-time":"2025-06-07T05:59:32Z","timestamp":1749275972000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-025-02399-2"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3,19]]},"references-count":76,"journal-issue":{"issue":"7","published-print":{"date-parts":[[2025,7]]}},"alternative-id":["2399"],"URL":"https:\/\/doi.org\/10.1007\/s11263-025-02399-2","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3,19]]},"assertion":[{"value":"22 June 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 February 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 March 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}