{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,8]],"date-time":"2025-09-08T06:24:14Z","timestamp":1757312654703,"version":"3.37.3"},"reference-count":57,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2019,2,8]],"date-time":"2019-02-08T00:00:00Z","timestamp":1549584000000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Machine Vision and Applications"],"published-print":{"date-parts":[[2019,3]]},"DOI":"10.1007\/s00138-019-01006-y","type":"journal-article","created":{"date-parts":[[2019,2,8]],"date-time":"2019-02-08T13:29:59Z","timestamp":1549632599000},"page":"217-229","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Spotting words in silent speech videos: a retrieval-based approach"],"prefix":"10.1007","volume":"30","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0350-2474","authenticated-orcid":false,"given":"Abhishek","family":"Jha","sequence":"first","affiliation":[]},{"given":"Vinay P.","family":"Namboodiri","sequence":"additional","affiliation":[]},{"given":"C. V.","family":"Jawahar","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2019,2,8]]},"reference":[{"key":"1006_CR1","doi-asserted-by":"crossref","unstructured":"Arandjelovi\u0107, R., Zisserman, A.: Three things everyone should know to improve object retrieval. In: CVPR (2012)","DOI":"10.1109\/CVPR.2012.6248018"},{"key":"1006_CR2","unstructured":"Assael, Y.M., Shillingford, B., Whiteson, S., de\u00a0Freitas, N.: Lipnet: Sentence-level lipreading. arXiv preprint arXiv:1611.01599 (2016)"},{"key":"1006_CR3","unstructured":"Bahdanau, D., Cho, K., Bengio, Y.: Neural machine translation by jointly learning to align and translate (2014). arXiv preprint arXiv:1409.0473"},{"key":"1006_CR4","unstructured":"Basu, S., Oliver, N., Pentland, A.: 3d modeling and tracking of human lip motions. In: ICCV (1998)"},{"key":"1006_CR5","volume-title":"Connectionist Speech Recognition: A Hybrid Approach","author":"HA Bourlard","year":"2012","unstructured":"Bourlard, H.A., Morgan, N.: Connectionist Speech Recognition: A Hybrid Approach, vol. 247. Springer, Berlin (2012)"},{"key":"1006_CR6","unstructured":"Bradski, G.: The opencv library. Dr. Dobb\u2019s J.: Softw. Tools Prof. Progr. 25(11), 120, 122\u2013125 (2000)"},{"key":"1006_CR7","unstructured":"Brooke\u00a0N.M, S.S.: Pca image coding schemes and visual speech intelligibility. In: Proceedings of the Institute of Acoustics, vol.\u00a016 (1994)"},{"key":"1006_CR8","doi-asserted-by":"crossref","unstructured":"Chan, W., Jaitly, N., Le, Q., Vinyals, O.: Listen, attend and spell: a neural network for large vocabulary conversational speech recognition. In: ICASSP, pp. 4960\u20134964 (2016)","DOI":"10.1109\/ICASSP.2016.7472621"},{"key":"1006_CR9","doi-asserted-by":"crossref","unstructured":"Chatfield, K., Simonyan, K., Vedaldi, A., Zisserman, A.: Return of the devil in the details: delving deep into convolutional nets. In: BMVC (2014)","DOI":"10.5244\/C.28.6"},{"key":"1006_CR10","unstructured":"Cho, K., Van\u00a0Merri\u00ebnboer, B., Gulcehre, C., Bahdanau, D., Bougares, F., Schwenk, H., Bengio, Y.: Learning phrase representations using rnn encoder-decoder for statistical machine translation (2014). arXiv preprint arXiv:1406.1078"},{"key":"1006_CR11","unstructured":"Chollet, F., et al.: Keras. https:\/\/keras.io (2015)"},{"key":"1006_CR12","unstructured":"Chorowski, J., Jaitly, N.: Towards better decoding and language model integration in sequence to sequence models (2016). arXiv preprint arXiv:1612.02695"},{"key":"1006_CR13","doi-asserted-by":"crossref","unstructured":"Chung, J.S., Senior, A., Vinyals, O., Zisserman, A.: Lip reading sentences in the wild. In: CVPR (2016)","DOI":"10.1109\/CVPR.2017.367"},{"key":"1006_CR14","unstructured":"Chung, J.S., Zisserman, A.: Lip reading in the wild. In: ACCV (2016)"},{"key":"1006_CR15","unstructured":"Chung, J.S., Zisserman, A.: Out of time: automated lip sync in the wild. In: ACCV (2016)"},{"issue":"5","key":"1006_CR16","doi-asserted-by":"publisher","first-page":"2421","DOI":"10.1121\/1.2229005","volume":"120","author":"M Cooke","year":"2006","unstructured":"Cooke, M., Barker, J., Cunningham, S., Shao, X.: An audio-visual corpus for speech perception and automatic speech recognition. J. Acoust. Soc. Am. 120(5), 2421\u20132424 (2006)","journal-title":"J. Acoust. Soc. Am."},{"key":"1006_CR17","doi-asserted-by":"crossref","unstructured":"Doetsch, P., Kozielski, M., Ney, H.: Fast and robust training of recurrent neural networks for offline handwriting recognition. In: ICFHR (2014)","DOI":"10.1109\/ICFHR.2014.54"},{"key":"1006_CR18","doi-asserted-by":"crossref","unstructured":"Fergus, R., Perona, P., Zisserman, A.: A visual category filter for google images. In: ECCV (2004)","DOI":"10.1007\/978-3-540-24670-1_19"},{"key":"1006_CR19","doi-asserted-by":"crossref","unstructured":"Fern\u00e1ndez, S., Graves, A., Schmidhuber, J.: An application of recurrent neural networks to discriminative keyword spotting. In: ICANN (2007)","DOI":"10.1007\/978-3-540-74695-9_23"},{"key":"1006_CR20","doi-asserted-by":"crossref","unstructured":"Fischer, A., Keller, A., Frinken, V., Bunke, H.: HMM-based word spotting in handwritten documents using subword models. In: ICMR (2010)","DOI":"10.1109\/ICPR.2010.834"},{"issue":"6","key":"1006_CR21","doi-asserted-by":"publisher","first-page":"381","DOI":"10.1145\/358669.358692","volume":"24","author":"MA Fischler","year":"1981","unstructured":"Fischler, M.A., Bolles, R.C.: Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Commun. ACM 24(6), 381\u2013395 (1981)","journal-title":"Commun. ACM"},{"issue":"2","key":"1006_CR22","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1109\/TPAMI.2011.113","volume":"34","author":"V Frinken","year":"2012","unstructured":"Frinken, V., Fischer, A., Manmatha, R., Bunke, H.: A novel word spotting method based on recurrent neural networks. IEEE TPAMI 34(2), 211\u2013224 (2012)","journal-title":"IEEE TPAMI"},{"key":"1006_CR23","doi-asserted-by":"publisher","first-page":"310","DOI":"10.1016\/j.patcog.2017.02.023","volume":"68","author":"AP Giotis","year":"2017","unstructured":"Giotis, A.P., Sfikas, G., Gatos, B., Nikou, C.: A survey of document image word spotting techniques. Pattern Recognit. 68, 310\u2013332 (2017)","journal-title":"Pattern Recognit."},{"key":"1006_CR24","doi-asserted-by":"crossref","unstructured":"Gish, H., Ng, K.: A segmental speech model with applications to word spotting. In: ICASSP, vol.\u00a02 (1993)","DOI":"10.1109\/ICASSP.1993.319337"},{"key":"1006_CR25","doi-asserted-by":"crossref","unstructured":"Graves, A., Fern\u00e1ndez, S., Gomez, F., Schmidhuber, J.: Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks. In: ICML (2006)","DOI":"10.1145\/1143844.1143891"},{"key":"1006_CR26","doi-asserted-by":"crossref","unstructured":"Graves, A., Fern\u00e1ndez, S., Schmidhuber, J.: Bidirectional LSTM networks for improved phoneme classification and recognition. In: ICANN (2005)","DOI":"10.1007\/11550907_126"},{"key":"1006_CR27","unstructured":"Graves, A., Jaitly, N.: Towards end-to-end speech recognition with recurrent neural networks. In: ICML, pp. 1764\u20131772 (2014)"},{"key":"1006_CR28","unstructured":"Hannun, A., Case, C., Casper, J., Catanzaro, B., Diamos, G., Elsen, E., Prenger, R., Satheesh, S., Sengupta, S., Coates, A., et\u00a0al.: Deep speech: scaling up end-to-end speech recognition (2014). arXiv preprint arXiv:1412.5567"},{"key":"1006_CR29","unstructured":"Hassanat, A.B.: Visual words for automatic lip-reading (2014). arXiv preprint arXiv:1409.6689"},{"key":"1006_CR30","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"1006_CR31","unstructured":"Hennecke, M.E.: Audio-visual speech recognition: preprocessing, learning and sensory integration. PhD thesis, Stanford Univ. (1997)"},{"key":"1006_CR32","unstructured":"Hinton, G., Deng, L., Yu, D., Dahl, G.E., Mohamed, A.r., Jaitly, N., Senior, A., Vanhoucke, V., Nguyen, P., Sainath, T.N., et\u00a0al.: Deep neural networks for acoustic modeling in speech recognition: the shared views of four research groups. IEEE Signal Process. Mag. 29(6), 82\u201397 (2012)"},{"issue":"3","key":"1006_CR33","doi-asserted-by":"publisher","first-page":"157","DOI":"10.1007\/BF02626995","volume":"5","author":"TK Ho","year":"1992","unstructured":"Ho, T.K., Hull, J.J., Srihari, S.N.: A computational model for recognition of multifont word images. Mach. Vis. Appl. 5(3), 157\u2013168 (1992)","journal-title":"Mach. Vis. Appl."},{"issue":"8","key":"1006_CR34","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural Comput. 9(8), 1735\u20131780 (1997)","journal-title":"Neural Comput."},{"key":"1006_CR35","doi-asserted-by":"crossref","unstructured":"Jha, A., Namboodiri, V., Jawahar, C.V.: Word spotting in silent lip videos. In: WACV (2018)","DOI":"10.1109\/WACV.2018.00023"},{"issue":"1","key":"1006_CR36","doi-asserted-by":"publisher","first-page":"221","DOI":"10.1109\/TPAMI.2012.59","volume":"35","author":"S Ji","year":"2013","unstructured":"Ji, S., Xu, W., Yang, M., Yu, K.: 3d convolutional neural networks for human action recognition. IEEE TPAMI 35(1), 221\u2013231 (2013)","journal-title":"IEEE TPAMI"},{"issue":"4","key":"1006_CR37","doi-asserted-by":"publisher","first-page":"317","DOI":"10.1016\/j.specom.2008.10.002","volume":"51","author":"J Keshet","year":"2009","unstructured":"Keshet, J., Grangier, D., Bengio, S.: Discriminative keyword spotting. Speech Commun. 51(4), 317\u2013329 (2009)","journal-title":"Speech Commun."},{"key":"1006_CR38","first-page":"1755","volume":"10","author":"DE King","year":"2009","unstructured":"King, D.E.: Dlib-ml: A machine learning toolkit. J. Mach. Learn. Res. 10, 1755\u20131758 (2009)","journal-title":"J. Mach. Learn. Res."},{"key":"1006_CR39","doi-asserted-by":"crossref","unstructured":"Krishnan, P., Jawahar, C.V.: Bringing semantics in word image retrieval. In: ICDAR (2013)","DOI":"10.1109\/ICDAR.2013.150"},{"issue":"5","key":"1006_CR40","first-page":"767","volume":"10","author":"JS Lee","year":"2008","unstructured":"Lee, J.S., Park, C.H.: Robust audio-visual speech recognition based on late integration. IEEE TMM 10(5), 767\u2013779 (2008)","journal-title":"IEEE TMM"},{"key":"1006_CR41","doi-asserted-by":"crossref","unstructured":"Liu, H., Fan, T., Wu, P.: Audio-visual keyword spotting based on adaptive decision fusion under noisy conditions for human-robot interaction. In: ICRA, pp. 6644\u20136651 (2014)","DOI":"10.1109\/ICRA.2014.6907840"},{"key":"1006_CR42","doi-asserted-by":"crossref","unstructured":"Manmatha, R., Han, C., Riseman, E.M.: Word spotting: A new approach to indexing handwriting. In: CVPR (1996)","DOI":"10.1109\/CVPR.1996.517139"},{"issue":"1","key":"1006_CR43","doi-asserted-by":"publisher","first-page":"14","DOI":"10.1109\/TASL.2011.2109382","volume":"20","author":"AR Mohamed","year":"2012","unstructured":"Mohamed, A.R., Dahl, G.E., Hinton, G., et al.: Acoustic modeling using deep belief networks. IEEE Trans. Audio Speech Lang. Process. 20(1), 14\u201322 (2012)","journal-title":"IEEE Trans. Audio Speech Lang. Process."},{"key":"1006_CR44","doi-asserted-by":"crossref","unstructured":"Robinson, T., Hochberg, M., Renals, S.: The use of recurrent neural networks in continuous speech recognition. In: Automatic Speech and Speaker Recognition, pp. 233\u2013258. Springer, Berlin (1996)","DOI":"10.1007\/978-1-4613-1367-0_10"},{"key":"1006_CR45","unstructured":"Rohlicek, J.R., Russell, W., Roukos, S., Gish, H.: Continuous hidden Markov modeling for speaker-independent word spotting. In: ICASSP (1989)"},{"key":"1006_CR46","unstructured":"Stafylakis, T., Tzimiropoulos, G.: Combining residual networks with LSTMs for lipreading (2017). arXiv preprint arXiv:1703.04105"},{"key":"1006_CR47","unstructured":"Stafylakis, T., Tzimiropoulos, G.: Zero-shot keyword spotting for visual speech recognition in-the-wild (2018). arXiv preprint arXiv:1807.08469"},{"issue":"1","key":"1006_CR48","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s00138-012-0445-1","volume":"24","author":"S Stillittano","year":"2013","unstructured":"Stillittano, S., Girondel, V., Caplier, A.: Lip contour segmentation and tracking compliant with lip-reading application constraints. Mach. Vis. Appl. 24(1), 1\u201318 (2013)","journal-title":"Mach. Vis. Appl."},{"key":"1006_CR49","doi-asserted-by":"crossref","unstructured":"Sudholt, S., Fink, G.A.: Phocnet: A deep convolutional neural network for word spotting in handwritten documents. In: ICFHR (2016)","DOI":"10.1109\/ICFHR.2016.0060"},{"key":"1006_CR50","unstructured":"Sutskever, I., Vinyals, O., Le, Q.V.: Sequence to sequence learning with neural networks. In: NIPS (2014)"},{"key":"1006_CR51","doi-asserted-by":"crossref","unstructured":"Tsai, S.S., Chen, D., Takacs, G., Chandrasekhar, V., Vedantham, R., Grzeszczuk, R., Girod, B.: Fast geometric re-ranking for image-based retrieval. In: ICIP (2010)","DOI":"10.1109\/ICIP.2010.5648942"},{"key":"1006_CR52","doi-asserted-by":"crossref","unstructured":"Wand, M., Koutn\u00edk, J., Schmidhuber, J.: Lipreading with long short-term memory. In: ICASSP (2016)","DOI":"10.1109\/ICASSP.2016.7472852"},{"key":"1006_CR53","doi-asserted-by":"crossref","unstructured":"Wang, K., Belongie, S.: Word spotting in the wild. In: ECCV (2010)","DOI":"10.1007\/978-3-642-15549-9_43"},{"issue":"3","key":"1006_CR54","first-page":"326","volume":"18","author":"P Wu","year":"2016","unstructured":"Wu, P., Liu, H., Li, X., Fan, T., Zhang, X.: A novel lip descriptor for audio-visual keyword spotting based on adaptive decision fusion. IEEE TMM 18(3), 326\u2013338 (2016)","journal-title":"IEEE TMM"},{"key":"1006_CR55","unstructured":"Xiong, W., Droppo, J., Huang, X., Seide, F., Seltzer, M., Stolcke, A., Yu, D., Zweig, G.: Achieving human parity in conversational speech recognition (2016). arXiv preprint arXiv:1610.05256"},{"key":"1006_CR56","doi-asserted-by":"crossref","unstructured":"Zhang, X.Y., Yin, F., Zhang, Y.M., Liu, C.L., Bengio, Y.: Drawing and recognizing chinese characters with recurrent neural network. IEEE TPAMI 849\u2014862 (2017)","DOI":"10.1109\/TPAMI.2017.2695539"},{"issue":"9","key":"1006_CR57","doi-asserted-by":"publisher","first-page":"590","DOI":"10.1016\/j.imavis.2014.06.004","volume":"32","author":"Z Zhou","year":"2014","unstructured":"Zhou, Z., Zhao, G., Hong, X., Pietik\u00e4inen, M.: A review of recent advances in visual speech decoding. Image Vis. Comput. 32(9), 590\u2013605 (2014)","journal-title":"Image Vis. Comput."}],"container-title":["Machine Vision and Applications"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/article\/10.1007\/s00138-019-01006-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-019-01006-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-019-01006-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,11]],"date-time":"2022-09-11T16:24:17Z","timestamp":1662913457000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/s00138-019-01006-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,2,8]]},"references-count":57,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2019,3]]}},"alternative-id":["1006"],"URL":"https:\/\/doi.org\/10.1007\/s00138-019-01006-y","relation":{},"ISSN":["0932-8092","1432-1769"],"issn-type":[{"type":"print","value":"0932-8092"},{"type":"electronic","value":"1432-1769"}],"subject":[],"published":{"date-parts":[[2019,2,8]]},"assertion":[{"value":"24 October 2018","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 January 2019","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 February 2019","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}