{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,28]],"date-time":"2025-06-28T06:23:22Z","timestamp":1751091802615,"version":"3.28.0"},"reference-count":32,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2015,12]]},"DOI":"10.1109\/apsipa.2015.7415335","type":"proceedings-article","created":{"date-parts":[[2016,3,28]],"date-time":"2016-03-28T19:11:46Z","timestamp":1459192306000},"page":"575-582","source":"Crossref","is-referenced-by-count":38,"title":["Audio-visual speech recognition using deep bottleneck features and high-performance lipreading"],"prefix":"10.1109","author":[{"given":"Satoshi","family":"Tamura","sequence":"first","affiliation":[]},{"given":"Hiroshi","family":"Ninomiya","sequence":"additional","affiliation":[]},{"given":"Norihide","family":"Kitaoka","sequence":"additional","affiliation":[]},{"given":"Shin","family":"Osuga","sequence":"additional","affiliation":[]},{"given":"Yurie","family":"Iribe","sequence":"additional","affiliation":[]},{"given":"Kazuya","family":"Takeda","sequence":"additional","affiliation":[]},{"given":"Satoru","family":"Hayamizu","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref32","first-page":"81","article-title":"Real-time audio-visual voice activity detection for speech recognition in noisy environments","author":"ishi","year":"2010","journal-title":"Proc AVSP2010"},{"key":"ref31","first-page":"151","article-title":"Voice activity detection based on fusion of audio and visual information","author":"takeuchi","year":"2009","journal-title":"Proc AVSP2009"},{"year":"0","key":"ref30"},{"key":"ref10","first-page":"102","article-title":"Comparing visual features for lipreading","author":"lan","year":"2009","journal-title":"Proc AVSP 2009"},{"key":"ref11","first-page":"619","article-title":"Data collection and evaluation of AURORA-2 Japanese corpus","author":"nakamura","year":"2003","journal-title":"Proc ASRU2003"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1994.389567"},{"key":"ref13","first-page":"746","article-title":"Stream confidence estimation for audio-visual speech recognition","volume":"3","author":"potamianos","year":"2008","journal-title":"Proc ICSLP2000"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1002\/scj.4690220607"},{"key":"ref15","first-page":"187","article-title":"Bimodal speech recognition using lip movement measured by optical-flow analysis","author":"iwano","year":"2001","journal-title":"Proc HSC2001"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/MMSP.2010.5662075"},{"key":"ref17","first-page":"227","article-title":"Efficient face model for lip reading","author":"saitoh","year":"2013","journal-title":"Proc AVSP2013"},{"key":"ref18","first-page":"1527","article-title":"A new EM estimationof dynamic stream weights for coupled-HMM-based audio-visual ASR","author":"abdelaziz","year":"2014","journal-title":"Proc ICASSP2014"},{"key":"ref19","doi-asserted-by":"crossref","first-page":"1145","DOI":"10.1109\/TASL.2011.2172427","article-title":"On dynamic stream weighting for audio-visual speech recognition","volume":"20","author":"estellers","year":"2011","journal-title":"IEEE Transaction on Audio Speech and Language Processing"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1250\/ast.3.75"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638284"},{"key":"ref27","first-page":"85","article-title":"CENSREC-1-AV: An audio-visual corpus for noisy bimodal speech recognition","author":"tamura","year":"2010","journal-title":"Proc AVSP2010"},{"key":"ref3","doi-asserted-by":"crossref","first-page":"437","DOI":"10.21437\/Interspeech.2011-169","article-title":"Conversational speech transcription using context-dependent deep neural networks","author":"seide","year":"2011","journal-title":"Proc Interspeech 2011"},{"key":"ref6","article-title":"Multimodal deep learning","author":"ngiam","year":"2011","journal-title":"Proc ICML2011"},{"key":"ref29","first-page":"221","article-title":"Improvement of lipreading performance using discriminative feature and speaker adaptation","author":"seko","year":"2013","journal-title":"Proc AVSP2013"},{"key":"ref5","article-title":"Investigation of robustness of deep bottleneck features for speakers of a variety of ages in speech recognition","author":"hayashi","year":"2014","journal-title":"Proc Forum Acusricum 2014"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-014-0629-7"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6639140"},{"key":"ref2","doi-asserted-by":"crossref","first-page":"237","DOI":"10.21437\/Interspeech.2011-91","article-title":"Improved bottleneck features using pretrained deep neural networks","author":"yu","year":"2011","journal-title":"Proc Interspeech 2011"},{"key":"ref9","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2015-204","article-title":"Integration of deep bottleneck features for audiovisual speech recognition","author":"ninomiya","year":"2015","journal-title":"Proceedings of Interspeech 2015"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2011.2109382"},{"key":"ref20","first-page":"153","article-title":"Greedy layer-wise training of deep networks","author":"bengio","year":"2007","journal-title":"Proc NIPS'06"},{"key":"ref22","article-title":"GIF-SP: GA-based informative feature for noisy speech recognition","author":"tamura","year":"2012","journal-title":"Proc APSIPA ASC 2012"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1162\/neco.2006.18.7.1527"},{"key":"ref24","first-page":"13","article-title":"Statistical face shape model separating inter-individual variation from intra-individual variation","volume":"113","author":"kojima","year":"2013","journal-title":"IEICE technical report (IBISML)"},{"key":"ref23","article-title":"GIF-LR: GA-based informative feature for lipreading","author":"ukai","year":"2012","journal-title":"Proc APSIPA ASC 2012"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2008.4587808"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.5244\/C.20.95"}],"event":{"name":"2015 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA)","start":{"date-parts":[[2015,12,16]]},"location":"Hong Kong","end":{"date-parts":[[2015,12,19]]}},"container-title":["2015 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7406495\/7415286\/07415335.pdf?arnumber=7415335","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,16]],"date-time":"2022-06-16T12:20:51Z","timestamp":1655382051000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/7415335\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2015,12]]},"references-count":32,"URL":"https:\/\/doi.org\/10.1109\/apsipa.2015.7415335","relation":{},"subject":[],"published":{"date-parts":[[2015,12]]}}}