{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,28]],"date-time":"2026-01-28T20:22:26Z","timestamp":1769631746989,"version":"3.49.0"},"reference-count":34,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,9,26]],"date-time":"2022-09-26T00:00:00Z","timestamp":1664150400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,9,26]],"date-time":"2022-09-26T00:00:00Z","timestamp":1664150400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation (NSF)","doi-asserted-by":"publisher","award":["1741472,DGE-1922591"],"award-info":[{"award-number":["1741472,DGE-1922591"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,9,26]]},"DOI":"10.1109\/mmsp55362.2022.9948860","type":"proceedings-article","created":{"date-parts":[[2022,11,22]],"date-time":"2022-11-22T21:39:16Z","timestamp":1669153156000},"page":"1-6","source":"Crossref","is-referenced-by-count":7,"title":["DyViSE: Dynamic Vision-Guided Speaker Embedding for Audio-Visual Speaker Diarization"],"prefix":"10.1109","author":[{"given":"Abudukelimu","family":"Wuerkaixi","sequence":"first","affiliation":[{"name":"Institute for Artificial Intelligence, Tsinghua University (THUAI),State Key Lab of Intelligent Technologies and Systems, Beijing National Research Center for Information Science and Technology (BNRist),Department of Automation,Beijing,P.R.China"}]},{"given":"Kunda","family":"Yan","sequence":"additional","affiliation":[{"name":"Institute for Artificial Intelligence, Tsinghua University (THUAI),State Key Lab of Intelligent Technologies and Systems, Beijing National Research Center for Information Science and Technology (BNRist),Department of Automation,Beijing,P.R.China"}]},{"given":"You","family":"Zhang","sequence":"additional","affiliation":[{"name":"Institute for Artificial Intelligence, Tsinghua University (THUAI),State Key Lab of Intelligent Technologies and Systems, Beijing National Research Center for Information Science and Technology (BNRist),Department of Automation,Beijing,P.R.China"}]},{"given":"Zhiyao","family":"Duan","sequence":"additional","affiliation":[{"name":"University of Rochester,Department of Electrical and Computer Engineering,Rochester,NY,USA"}]},{"given":"Changshui","family":"Zhang","sequence":"additional","affiliation":[{"name":"Institute for Artificial Intelligence, Tsinghua University (THUAI),State Key Lab of Intelligent Technologies and Systems, Beijing National Research Center for Information Science and Technology (BNRist),Department of Automation,Beijing,P.R.China"}]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3057230"},{"key":"ref32","article-title":"NIST RT'05S evaluation: Pre-processing techniques and speaker di-arization on multiple microphone meetings","author":"istrate","year":"2005","journal-title":"Int Workshop Mach Learn Multimodal Interact"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003959"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1929"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-012-1080-6"},{"key":"ref10","article-title":"AVA-AVD: Audio-visual speaker diarization in the wild","volume":"abs 2111 14448","author":"xu","year":"2021","journal-title":"CoRR"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01524"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2928140"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683892"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2899"},{"key":"ref15","article-title":"Using active speaker faces for diarization in TV shows","volume":"abs 2203 15961","author":"sharma","year":"2022","journal-title":"CoRR"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2012.2233724"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054376"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-014-2274-x"},{"key":"ref19","article-title":"Learn an effective lip reading model without pains","volume":"abs 2011 7557","author":"feng","year":"2020","journal-title":"CoRR"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/MLSP55214.2022.9943352"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-448"},{"key":"ref27","article-title":"WavLM: Large-scale self-supervised pre-training for full stack speech processing","volume":"abs 2110 13900","author":"chen","year":"2021","journal-title":"CoRR"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1893"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2648793"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00482"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-015-3181-5"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2337"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-3116"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2021.101317"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178882"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/SLT48900.2021.9383556"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/s41870-022-00907-y"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-2516"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054180"},{"key":"ref24","first-page":"192","article-title":"S3fd: Single shot scale-invariant face detector","author":"zhang","year":"0","journal-title":"Proc ICCV"},{"key":"ref23","article-title":"Royalflush speaker diarization system for ICASSP 2022 multi-channel multi-party meeting transcription chal-lenge","volume":"abs 2202 4814","author":"tian","year":"0","journal-title":"CoRR"},{"key":"ref26","first-page":"1021","article-title":"How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230, 000 3d facial landmarks)","author":"bulat","year":"0","journal-title":"Proc ICCV"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475587"}],"event":{"name":"2022 IEEE 24th International Workshop on Multimedia Signal Processing (MMSP)","location":"Shanghai, China","start":{"date-parts":[[2022,9,26]]},"end":{"date-parts":[[2022,9,28]]}},"container-title":["2022 IEEE 24th International Workshop on Multimedia Signal Processing (MMSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9948698\/9948704\/09948860.pdf?arnumber=9948860","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,12]],"date-time":"2022-12-12T19:54:22Z","timestamp":1670874862000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9948860\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,9,26]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/mmsp55362.2022.9948860","relation":{},"subject":[],"published":{"date-parts":[[2022,9,26]]}}}