{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T18:24:55Z","timestamp":1775067895058,"version":"3.50.1"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icassp40776.2020.9053841","type":"proceedings-article","created":{"date-parts":[[2020,4,9]],"date-time":"2020-04-09T20:21:13Z","timestamp":1586463673000},"page":"6319-6323","source":"Crossref","is-referenced-by-count":218,"title":["Lipreading Using Temporal Convolutional Networks"],"prefix":"10.1109","author":[{"given":"Brais","family":"Martinez","sequence":"first","affiliation":[]},{"given":"Pingchuan","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Stavros","family":"Petridis","sequence":"additional","affiliation":[]},{"given":"Maja","family":"Pantic","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2017-85","article-title":"Combining residual networks with LSTMs for lipreading","author":"stafylakis","year":"2017","journal-title":"InterSpeech"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1669"},{"key":"ref12","article-title":"Deep audio-visual speech recognition","author":"afouras","year":"2018","journal-title":"IEEE Transactions of Pattern Analysis and Machine Intelligence"},{"key":"ref13","article-title":"Lip reading sentences in the wild","author":"chung","year":"2016","journal-title":"Computer Vision and Pattern Recognition"},{"key":"ref14","first-page":"513","article-title":"Audio-visual speech recognition with a hybrid ctc\/attention architecture","author":"petridis","year":"2018","journal-title":"IEEE Spoken Language Technology Workshop"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461326"},{"key":"ref16","article-title":"Learning spatio-temporal features with two-stream deep 3D CNNs for lipreading","author":"weng","year":"2019","journal-title":"British Machine Vision Conference"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2019.8756582"},{"key":"ref19","author":"bai","year":"2018","journal-title":"An empirical evaluation of generic convolutional and recurrent networks for sequence modeling"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-014-0629-7"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2014.06.004"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICIS.2016.7550888"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472088"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472852"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952625"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/6046.865479"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2003.817150"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.5244\/C.31.161"},{"key":"ref20","article-title":"SGDR: Stochastic gradient descent with warm restarts","author":"loshchilov","year":"2017","journal-title":"Int&#x2019;l Conference on Learning Representations"},{"key":"ref22","author":"oord","year":"2016","journal-title":"WaveNet A Generative Model for Raw Audio"},{"key":"ref21","article-title":"Lip reading in the wild","author":"c","year":"2016","journal-title":"Asian Conf on Computer Vision"},{"key":"ref24","article-title":"Multi-grained spatio-temporal modeling for lip-reading","author":"wang","year":"2019","journal-title":"British Machine Vision Conference"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"}],"event":{"name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Barcelona, Spain","start":{"date-parts":[[2020,5,4]]},"end":{"date-parts":[[2020,5,8]]}},"container-title":["ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9040208\/9052899\/09053841.pdf?arnumber=9053841","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T00:25:22Z","timestamp":1656375922000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9053841\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/icassp40776.2020.9053841","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}