{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,22]],"date-time":"2026-01-22T16:58:20Z","timestamp":1769101100719,"version":"3.49.0"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6,4]]},"DOI":"10.1109\/icassp49357.2023.10095796","type":"proceedings-article","created":{"date-parts":[[2023,5,5]],"date-time":"2023-05-05T17:28:30Z","timestamp":1683307710000},"page":"1-5","source":"Crossref","is-referenced-by-count":21,"title":["CN-CVS: A Mandarin Audio-Visual Dataset for Large Vocabulary Continuous Visual to Speech Synthesis"],"prefix":"10.1109","author":[{"given":"Chen","family":"Chen","sequence":"first","affiliation":[{"name":"Tsinghua University,Center for Speech and Language Technologies, BNRist,China"}]},{"given":"Dong","family":"Wang","sequence":"additional","affiliation":[{"name":"Tsinghua University,Center for Speech and Language Technologies, BNRist,China"}]},{"given":"Thomas Fang","family":"Zheng","sequence":"additional","affiliation":[{"name":"Tsinghua University,Center for Speech and Language Technologies, BNRist,China"}]}],"member":"263","reference":[{"key":"ref13","first-page":"2758","article-title":"Lip to Speech Synthesis with Visual Context Attentional GAN","volume":"34","author":"kim","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01381"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2022.104389"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.23919\/EUSIPCO54536.2021.9616266"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1445"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2017.61"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747187"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746401"},{"key":"ref17","article-title":"SVTS: Scalable Video-to-Speech Synthesis","author":"mira","year":"2022"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2022.3162495"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2015.2407694"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1121\/1.2229005"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2018.07.002"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-54184-6_6"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-54427-4_19"},{"key":"ref25","article-title":"Advances and Challenges in Deep Lip Reading","author":"oghbaie","year":"2021"},{"key":"ref20","article-title":"LipSound2: Self-Supervised Pre-Training for Lip-to-Speech Reconstruction and Lip Reading","author":"qu","year":"2021"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3338533.3366579"},{"key":"ref21","doi-asserted-by":"crossref","DOI":"10.1109\/FG.2019.8756582","article-title":"LRW-1000: A Naturally-Distributed Large-Scale Benchmark for Lip Reading in the Wild","author":"yang","year":"2019"},{"key":"ref8","article-title":"Large-Scale Visual Speech Recognition","author":"shillingford","year":"2018"},{"key":"ref7","article-title":"LipNet: End-to-End Sentence-level Lipreading","author":"assael","year":"2016"},{"key":"ref9","article-title":"Sub-word Level Lip Reading With Visual Attention","author":"prajwal","year":"2021"},{"key":"ref4","first-page":"1","article-title":"Deep Audio-visual Speech Recognition","author":"afouras","year":"2019","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.367"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-99"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9004036"}],"event":{"name":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Rhodes Island, Greece","start":{"date-parts":[[2023,6,4]]},"end":{"date-parts":[[2023,6,10]]}},"container-title":["ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10094559\/10094560\/10095796.pdf?arnumber=10095796","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,23]],"date-time":"2023-10-23T17:59:31Z","timestamp":1698083971000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10095796\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,4]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/icassp49357.2023.10095796","relation":{},"subject":[],"published":{"date-parts":[[2023,6,4]]}}}