{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T18:31:58Z","timestamp":1774722718381,"version":"3.50.1"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100008845","name":"Xinjiang University","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100008845","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,6,6]]},"DOI":"10.1109\/icassp39728.2021.9414659","type":"proceedings-article","created":{"date-parts":[[2021,5,13]],"date-time":"2021-05-13T19:53:45Z","timestamp":1620935625000},"page":"7988-7992","source":"Crossref","is-referenced-by-count":18,"title":["How to Use Time Information Effectively? Combining with Time Shift Module for Lipreading"],"prefix":"10.1109","author":[{"given":"Mingfeng","family":"Hao","sequence":"first","affiliation":[]},{"given":"Mutallip","family":"Mamut","sequence":"additional","affiliation":[]},{"given":"Nurbiya","family":"Yadikar","sequence":"additional","affiliation":[]},{"given":"Alimjan","family":"Aysa","sequence":"additional","affiliation":[]},{"given":"Kurban","family":"Ubul","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952625"},{"key":"ref11","first-page":"277","article-title":"Concatenated frame image based cnn for visual speech recognition","author":"saitoh","year":"2016","journal-title":"Asian Conference on Computer Vision"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/IC3.2018.8530509"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2927166"},{"key":"ref14","article-title":"Lipnet: End-to-end sentence-level lipreading","author":"assael","year":"2016","journal-title":"arXiv preprint arXiv 1611 01599"},{"key":"ref15","article-title":"Multi-Grained Spatio-temporal Modeling for Lip-reading","author":"wang","year":"2019","journal-title":"presented at the Brit Mach Vis Conf"},{"key":"ref16","article-title":"Learning Spatio-Temporal Features with Two-Stream Deep 3D CNNs for Lipreading","author":"weng","year":"2019","journal-title":"presented at the Brit Mach Vis Conf"},{"key":"ref17","first-page":"836","article-title":"Deformation Flow Based Two-Stream Network for Lip Reading","author":"xiao","year":"2020","journal-title":"2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020)(FG)"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-85"},{"key":"ref19","first-page":"69","article-title":"Pseudo-Convolutional Policy Gradient for Sequence-to-Sequence Lip-Reading","author":"luo","year":"2020","journal-title":"2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020)(FG)"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00718"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.3073640"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2019.8756582"},{"key":"ref5","first-page":"87","article-title":"Lip reading in the wild","author":"chung","year":"2016","journal-title":"Asian Conference on Computer Vision"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.367"},{"key":"ref7","article-title":"Lipreading using convolutional neural network","author":"noda","year":"2014","journal-title":"Fifteenth Annual Conference of the International Speech Communication Association"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-25903-1_49"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1097\/AUD.0b013e31812f7185"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICIS.2016.7550888"},{"key":"ref20","first-page":"843","article-title":"Mutual Information Maximization for Effective Lip Reading","author":"zhao","year":"2020","journal-title":"2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020)(FG)"},{"key":"ref22","first-page":"851","article-title":"Can We Read Speech Beyond the Lips? Rethinking RoI Selection for Deep Visual Speech Recognition","author":"zhang","year":"2020","journal-title":"2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020)(FG)"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053841"}],"event":{"name":"ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Toronto, ON, Canada","start":{"date-parts":[[2021,6,6]]},"end":{"date-parts":[[2021,6,11]]}},"container-title":["ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9413349\/9413350\/09414659.pdf?arnumber=9414659","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T15:40:58Z","timestamp":1652197258000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9414659\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,6,6]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/icassp39728.2021.9414659","relation":{},"subject":[],"published":{"date-parts":[[2021,6,6]]}}}