{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T04:41:00Z","timestamp":1750308060698,"version":"3.41.0"},"publisher-location":"Cham","reference-count":35,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783319585291"},{"type":"electronic","value":"9783319585307"}],"license":[{"start":{"date-parts":[[2017,1,1]],"date-time":"2017-01-01T00:00:00Z","timestamp":1483228800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2017,1,1]],"date-time":"2017-01-01T00:00:00Z","timestamp":1483228800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2017]]},"DOI":"10.1007\/978-3-319-58530-7_29","type":"book-chapter","created":{"date-parts":[[2017,5,13]],"date-time":"2017-05-13T15:32:20Z","timestamp":1494689540000},"page":"369-387","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Silent Speech Interaction for Ambient Assisted Living Scenarios"],"prefix":"10.1007","author":[{"given":"Ant\u00f3nio","family":"Teixeira","sequence":"first","affiliation":[]},{"given":"Nuno","family":"Vitor","sequence":"additional","affiliation":[]},{"given":"Jo\u00e3o","family":"Freitas","sequence":"additional","affiliation":[]},{"given":"Samuel","family":"Silva","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2017,5,14]]},"reference":[{"key":"29_CR1","unstructured":"Abreu, H.: Visual speech recognition for European Portuguese. Master thesis, Universidade do Minho (2014)"},{"key":"29_CR2","unstructured":"Bradski, G., Kaehler, A.: Learning OpenCV: computer vision with the OpenCV library. O\u2019Reilly Media, Inc. (2008)"},{"issue":"4","key":"29_CR3","doi-asserted-by":"publisher","first-page":"367","DOI":"10.1016\/j.specom.2010.01.001","volume":"52","author":"JS Brumberg","year":"2010","unstructured":"Brumberg, J.S., Nieto-Castanon, A., Kennedy, P.R., Guenther, F.H.: Brain-computer interfaces for speech communication. Speech Commun. 52(4), 367\u2013379 (2010). http:\/\/dx.doi.org\/10.1016\/j.specom.2010.01.001","journal-title":"Speech Commun."},{"key":"29_CR4","doi-asserted-by":"crossref","unstructured":"Dalka, P., Bratoszewski, P., Czyzewski, A.: Visual lip contour detection for the purpose of speech recognition. In: Proceedings of the International Signals and Electronic Systems (ICSES) Conference, pp. 1\u20134, September 2014","DOI":"10.1109\/ICSES.2014.6948716"},{"key":"29_CR5","unstructured":"De Smedt, K.: 11 computational models of incremental grammatical encoding. In: Computational Psycholinguistics: AI and Connectionist Models of Human Language Processing, pp. 279\u2013307 (1996)"},{"issue":"4","key":"29_CR6","doi-asserted-by":"publisher","first-page":"270","DOI":"10.1016\/j.specom.2009.08.002","volume":"52","author":"B Denby","year":"2010","unstructured":"Denby, B., Schultz, T., Honda, K., Hueber, T., Gilbert, J.M., Brumberg, J.S.: Silent speech interfaces. Speech Commun. 52(4), 270\u2013287 (2010)","journal-title":"Speech Commun."},{"key":"29_CR7","unstructured":"Freitas, J., Candeias, S., Dias, M.S., Lleida, E., Ortega, A., Teixeira, A., Orvalho, V.: The IRIS project: a liaison between industry and academia towards natural multimodal communication. In: Proceedings of the IberSPeech. Las Palmas, Spain (2014)"},{"key":"29_CR8","volume-title":"An Introduction to Silent Speech Interfaces","author":"J Freitas","year":"2016","unstructured":"Freitas, J., Teixeira, A., Sales Dias, M., Silva, S.: An Introduction to Silent Speech Interfaces. Springer, Heidelberg (2016)"},{"key":"29_CR9","doi-asserted-by":"crossref","unstructured":"Freitas, J., Teixeira, A., Bastos, C., Dias, M.: Towards a multimodal silent speech interface for European Portuguese. In: Speech Technologies, pp. 125\u2013149. InTech (2011)","DOI":"10.5772\/16935"},{"key":"29_CR10","doi-asserted-by":"crossref","unstructured":"Freitas, J., Teixeira, A., Dias, M.S.: Towards a silent speech interface for portuguese. In: Proceedings o the Biosignals, pp. 91\u2013100 (2012)","DOI":"10.5772\/16935"},{"key":"29_CR11","unstructured":"Freitas, J., Teixeira, A., Dias, M.S.: Multimodal silent speech interface based on video, depth, surface electromyography and ultrasonic doppler: data collection and first recognition results. In: International Workshop on Speech Production in Automatic Speech Recognition (2013)"},{"key":"29_CR12","unstructured":"Freitas, J., Teixeira, A.J., Dias, M.S.: Multimodal corpora for silent speech interaction. In: LREC, pp. 4507\u20134511 (2014)"},{"key":"29_CR13","doi-asserted-by":"crossref","unstructured":"Frisky, A.Z.K., Wang, C.Y., Santoso, A., Wang, J.C.: Lip-based visual speech recognition system. In: Proceedings of the International Security Technology (ICCST) Carnahan Conference, pp. 315\u2013319, September 2015","DOI":"10.1109\/CCST.2015.7389703"},{"key":"29_CR14","doi-asserted-by":"crossref","unstructured":"Galatas, G., Potamianos, G., Makedon, F.: Audio-visual speech recognition incorporating facial depth information captured by the kinect. In: 2012 Proceedings of the 20th European Signal Processing Conference (EUSIPCO), pp. 2714\u20132717. IEEE (2012)","DOI":"10.1145\/2413097.2413100"},{"key":"29_CR15","doi-asserted-by":"crossref","unstructured":"Gokturk, S.B., Yalcin, H., Bamji, C.: A time-of-flight depth sensor-system description, issues and solutions. In: Conference on Computer Vision and Pattern Recognition Workshopp, CVPRW 2004, pp. 35\u201335. IEEE (2004)","DOI":"10.1109\/CVPR.2004.291"},{"issue":"5","key":"29_CR16","doi-asserted-by":"publisher","first-page":"647","DOI":"10.1177\/0278364911434148","volume":"31","author":"P Henry","year":"2012","unstructured":"Henry, P., Krainin, M., Herbst, E., Ren, X., Fox, D.: Rgb-d mapping: using kinect-style depth cameras for dense 3D modeling of indoor environments. Int. J. Robot. Res. 31(5), 647\u2013663 (2012)","journal-title":"Int. J. Robot. Res."},{"key":"29_CR17","unstructured":"Lanaria, V.: VLC, the world\u2019s most popular media player, turns 15 years old: here\u2019s why you should download it now (2016)"},{"issue":"6","key":"29_CR18","doi-asserted-by":"publisher","first-page":"223","DOI":"10.1016\/S1364-6613(99)01319-4","volume":"3","author":"WJ Levelt","year":"1999","unstructured":"Levelt, W.J.: Models of word production. Trends Cogn. Sci. 3(6), 223\u2013232 (1999)","journal-title":"Trends Cogn. Sci."},{"key":"29_CR19","doi-asserted-by":"crossref","unstructured":"Matsumoto, M.: Silent speech decoder using adaptive collection. In: Proceedings of the Companion Publication of the 19th International Conference on Intelligent User Interfaces, IUI Companion 2014, ACM, New York, pp. 73\u201376 (2014). http:\/\/doi.acm.org\/10.1145\/2559184.2559190","DOI":"10.1145\/2559184.2559190"},{"key":"29_CR20","unstructured":"Microsoft: Face tracking (2016). https:\/\/msdn.microsoft.com\/pt-pt\/library\/dn782034.aspx"},{"key":"29_CR21","unstructured":"Microsoft: high detail face points (2016). https:\/\/msdn.microsoft.com\/en-us\/library\/microsoft.kinect.face.highdetailfacepoints"},{"key":"29_CR22","doi-asserted-by":"crossref","unstructured":"Oikonomidis, I., Kyriazis, N., Argyros, A.A.: Efficient model-based 3D tracking of hand articulations using kinect. In: BmVC, vol. 1, p. 3 (2011)","DOI":"10.5244\/C.25.101"},{"key":"29_CR23","unstructured":"Porbadnigk, A., Wester, M., p Calliess, J., Schultz, T.: Eeg-based speech recognition impact of temporal effects. In: 2nd International Conference on Bio-inspired Systems and Signal Processing (Biosignals 2009) (2009)"},{"key":"29_CR24","volume-title":"Fundamentals of speech recognition","author":"L Rabiner","year":"1993","unstructured":"Rabiner, L., Juang, B.H.: Fundamentals of speech recognition. Prentice hall, Englewood Cliffs (1993)"},{"key":"29_CR25","doi-asserted-by":"crossref","unstructured":"Rao, R.A., Mersereau, R.M.: Lip modeling for visual speech recognition. In: Proceedings of the Conference on Signals, Systems and Computers Record of the Twenty-Eighth Asilomar Conference vol. 1, pp. 587\u2013590, 1 October 1994","DOI":"10.1109\/ACSSC.1994.471520"},{"key":"29_CR26","unstructured":"Rodriguez, Y.L., Teixeira, A.: On the detection and classification of frames from European Portuguese oral and nasal vowels. In: Proceedings of the FALA 2010 (2010)"},{"key":"29_CR27","doi-asserted-by":"crossref","unstructured":"Saenko, K., Darrell, T., Glass, J.R.: Articulatory features for robust visual speech recognition. In: Proceedings of the 6th International Conference on Multimodal Interfaces, ICMI 2004, ACM, New York, pp. 152\u2013158 (2004). http:\/\/doi.acm.org\/10.1145\/1027933.1027960","DOI":"10.1145\/1027933.1027960"},{"key":"29_CR28","doi-asserted-by":"crossref","unstructured":"Sahni, H., Bedri, A., Reyes, G., Thukral, P., Guo, Z., Starner, T., Ghovanloo, M.: The tongue and ear interface: a wearable system for silent speech recognition. In: Proceedings of the 2014 ACM International Symposium on Wearable Computers, ISWC 2014, ACM, New York, pp. 47\u201354 (2014). http:\/\/doi.acm.org\/10.1145\/2634317.2634322","DOI":"10.1145\/2634317.2634322"},{"key":"29_CR29","unstructured":"Seikel, J.A., King, D.W., Drumright, D.G.: Anatomy and physiology for speech, language, and hearing. Delmar Learning, 4th edn. (2009)"},{"key":"29_CR30","first-page":"271","volume-title":"Multimodal Interaction with W3C Standards: Towards Natural User Interfaces to Everything","author":"A Teixeira","year":"2016","unstructured":"Teixeira, A., Almeida, N., Pereira, C., Silva, M., Vieira, D., Silva, S.: Applications of the multimodal interaction architecture in ambient assisted living. In: Dahl, D. (ed.) Multimodal Interaction with W3C Standards: Towards Natural User Interfaces to Everything, pp. 271\u2013291. Springer, New York (2016)"},{"key":"29_CR31","unstructured":"TeraRanger: Time-of-flight principle (2016). http:\/\/www.teraranger.com\/technology\/time-of-flight-principle\/"},{"key":"29_CR32","doi-asserted-by":"crossref","unstructured":"Wand, M., Koutn, J., et al.: Lipreading with long short-term memory. In: 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6115\u20136119. IEEE (2016)","DOI":"10.1109\/ICASSP.2016.7472852"},{"key":"29_CR33","doi-asserted-by":"crossref","unstructured":"Werda, S., Mahdi, W., Hamadou, A.B.: Lip localization and viseme classification for visual speech recognition. arXiv preprint arXiv:1301.4558 (2007)","DOI":"10.3233\/ICA-2008-15305"},{"key":"29_CR34","volume-title":"Data Mining - Practical Machine Learning Tools and Techniques","author":"IH Witten","year":"2011","unstructured":"Witten, I.H., Frank, E., Hall, M.A.: Data Mining - Practical Machine Learning Tools and Techniques, 3rd edn. Morgan Kaufmann, San Francisco (2011)","edition":"3"},{"key":"29_CR35","doi-asserted-by":"crossref","unstructured":"Yargic, A., Dogan, M.: A lip reading application on MS Kinect camera. In: 2013 IEEE International Symposium on Innovations in Intelligent Systems and Applications (INISTA), pp. 1\u20135. IEEE (2013)","DOI":"10.1109\/INISTA.2013.6577656"}],"container-title":["Lecture Notes in Computer Science","Human Aspects of IT for the Aged Population. Aging, Design and User Experience"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-319-58530-7_29","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T15:30:46Z","timestamp":1750260646000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-319-58530-7_29"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017]]},"ISBN":["9783319585291","9783319585307"],"references-count":35,"URL":"https:\/\/doi.org\/10.1007\/978-3-319-58530-7_29","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2017]]},"assertion":[{"value":"14 May 2017","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ITAP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Human Aspects of IT for the Aged Population","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vancouver","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Canada","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2017","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"9 July 2017","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14 July 2017","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"3","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"itap2017","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"This content has been made available to all.","name":"free","label":"Free to read"}]}}