{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,28]],"date-time":"2026-02-28T17:58:49Z","timestamp":1772301529517,"version":"3.50.1"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9747187","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"7308-7311","source":"Crossref","is-referenced-by-count":13,"title":["VSEGAN: Visual Speech Enhancement Generative Adversarial Network"],"prefix":"10.1109","author":[{"given":"Xinmeng","family":"Xu","sequence":"first","affiliation":[{"name":"Vivo AI Lab,P. R. China"}]},{"given":"Yang","family":"Wang","sequence":"additional","affiliation":[{"name":"Vivo AI Lab,P. R. China"}]},{"given":"Dongxiang","family":"Xu","sequence":"additional","affiliation":[{"name":"Vivo AI Lab,P. R. China"}]},{"given":"Yiyuan","family":"Peng","sequence":"additional","affiliation":[{"name":"Vivo AI Lab,P. R. China"}]},{"given":"Cong","family":"Zhang","sequence":"additional","affiliation":[{"name":"Vivo AI Lab,P. R. China"}]},{"given":"Jie","family":"Jia","sequence":"additional","affiliation":[{"name":"Vivo AI Lab,P. R. China"}]},{"given":"Binbin","family":"Chen","sequence":"additional","affiliation":[{"name":"Vivo AI Lab,P. R. China"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"crossref","first-page":"3642","DOI":"10.21437\/Interspeech.2017-1428","article-title":"SEGAN: Speech enhancement generative adversarial network","author":"pascual","year":"2017","journal-title":"in INTERSPEECH 2017"},{"key":"ref11","first-page":"2672","article-title":"Conditional generative adversarial nets","author":"mirza","year":"2014","journal-title":"Computer ence"},{"key":"ref12","article-title":"Un-supervised representation learning with deep convolutional generative adversarial networks","author":"radford","year":"2016"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.304"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683799"},{"key":"ref15","first-page":"1125","article-title":"Image-to-image translation with conditional adversarial networks","author":"isola","year":"2017","journal-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462614"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1121\/1.2229005"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2015.2407694"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1955"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TASSP.1978.1163086"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/89.397090"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1121\/1.1907309"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/0167-6393(91)90027-Q"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/APSIPA.2016.7820732"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2005.1416331"},{"key":"ref2","article-title":"Spectral subtraction based on minimum statistics","volume":"6","author":"martin","year":"1994","journal-title":"Power"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1201\/b14529"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462527"},{"key":"ref20","doi-asserted-by":"crossref","DOI":"10.1145\/3197517.3201357","article-title":"Looking to listen at the cock-tail party: A speaker-independent audio-visual model for speech separation","author":"ephrat","year":"2018","journal-title":"ACM Transactions on Graphics"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054528"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053033"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Singapore, Singapore","start":{"date-parts":[[2022,5,23]]},"end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09747187.pdf?arnumber=9747187","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,15]],"date-time":"2022-08-15T20:09:20Z","timestamp":1660594160000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9747187\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9747187","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}