{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,23]],"date-time":"2024-10-23T06:35:17Z","timestamp":1729665317799,"version":"3.28.0"},"reference-count":14,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2014,7]]},"DOI":"10.1109\/icmew.2014.6890554","type":"proceedings-article","created":{"date-parts":[[2014,9,10]],"date-time":"2014-09-10T15:50:12Z","timestamp":1410364212000},"page":"1-6","source":"Crossref","is-referenced-by-count":2,"title":["Realtime speech-driven facial animation using Gaussian Mixture Models"],"prefix":"10.1109","author":[{"family":"Changwei Luo","sequence":"first","affiliation":[]},{"family":"Jun Yu","sequence":"additional","affiliation":[]},{"family":"Xian Li","sequence":"additional","affiliation":[]},{"family":"Zengfu Wang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"13","doi-asserted-by":"publisher","DOI":"10.1137\/0916069"},{"key":"14","doi-asserted-by":"crossref","first-page":"145","DOI":"10.1109\/TSMCC.2009.2035631","article-title":"A review of active appearance models","volume":"40","author":"gao","year":"2010","journal-title":"IEEE Trans Syst Man Cybern Part C"},{"key":"11","doi-asserted-by":"publisher","DOI":"10.1016\/S0923-5965(99)00055-7"},{"key":"12","doi-asserted-by":"publisher","DOI":"10.1145\/1964921.1964972"},{"key":"3","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.2002.1021892"},{"key":"2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2012.6288925"},{"key":"1","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR.2004.1334656"},{"key":"10","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2008-590","article-title":"Lip-s2008: Visual speech synthesis challenge","author":"theobald","year":"2008","journal-title":"Proceedings of Interspeech"},{"key":"7","doi-asserted-by":"crossref","DOI":"10.1145\/566654.566594","article-title":"Trainable video realistic speech animation","author":"ezzat","year":"2002","journal-title":"Proceedings ACM SIG-GRAPH"},{"key":"6","doi-asserted-by":"publisher","DOI":"10.1109\/89.661472"},{"key":"5","doi-asserted-by":"publisher","DOI":"10.1016\/j.specom.2007.09.001"},{"key":"4","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2009-643","article-title":"Direct, modular and hybrid audio to visual speech conversion methods-a comparative study","author":"takacs","year":"2009","journal-title":"Proc INTERSPEECH"},{"key":"9","article-title":"3d shape regression for real-time facial animation","author":"cao","year":"2013","journal-title":"Proceedings SIGGRAPH 2013"},{"key":"8","article-title":"A data-driven approach for facial expression synthesis in video","author":"li","year":"2012","journal-title":"CVPR"}],"event":{"name":"2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)","start":{"date-parts":[[2014,7,14]]},"location":"Chengdu, China","end":{"date-parts":[[2014,7,18]]}},"container-title":["2014 IEEE International Conference on Multimedia and Expo Workshops (ICMEW)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6882610\/6890528\/06890554.pdf?arnumber=6890554","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,16]],"date-time":"2022-04-16T20:21:08Z","timestamp":1650140468000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/6890554\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2014,7]]},"references-count":14,"URL":"https:\/\/doi.org\/10.1109\/icmew.2014.6890554","relation":{},"subject":[],"published":{"date-parts":[[2014,7]]}}}