{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:38:40Z","timestamp":1740101920910,"version":"3.37.3"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,10,26]],"date-time":"2022-10-26T00:00:00Z","timestamp":1666742400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,10,26]],"date-time":"2022-10-26T00:00:00Z","timestamp":1666742400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001475","name":"Nanyang Technological University","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001475","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001459","name":"Singapore Ministry of Education","doi-asserted-by":"publisher","award":["Tier 1 RG97\/20,Tier 1 RG24\/20,Tier 2 MOE2019-T2-1-176"],"award-info":[{"award-number":["Tier 1 RG97\/20,Tier 1 RG24\/20,Tier 2 MOE2019-T2-1-176"]}],"id":[{"id":"10.13039\/501100001459","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,10,26]]},"DOI":"10.1109\/wf-iot54382.2022.10152230","type":"proceedings-article","created":{"date-parts":[[2023,6,23]],"date-time":"2023-06-23T23:49:29Z","timestamp":1687564169000},"page":"1-6","source":"Crossref","is-referenced-by-count":1,"title":["Facial Landmark Predictions with Applications to Metaverse"],"prefix":"10.1109","author":[{"given":"Qiao","family":"Han","sequence":"first","affiliation":[{"name":"Nanyang Technological University Singapore,Computer Science and Engineering,Singapore"}]},{"given":"Jun","family":"Zhao","sequence":"additional","affiliation":[{"name":"Nanyang Technological University Singapore,Computer Science and Engineering,Singapore"}]},{"given":"Kwok-Yan","family":"Lam","sequence":"additional","affiliation":[{"name":"Nanyang Technological University Singapore,Computer Science and Engineering,Singapore"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3313831.3376606"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2015.08.011"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2013.434"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2018.00019"},{"key":"ref6","article-title":"A new language independent, photo-realistic talking head driven by voice only","author":"Zhang","year":"2013","journal-title":"Microsoft. ISCA - International Speech Communication Association"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178899"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.3073658"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00009"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-93764-9_35"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2947741"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953092"},{"article-title":"Photo-realistic expressive text to talking head synthesis","volume-title":"Proceedings of the Annual Conference of the International Speech Communication Association","author":"Wan","key":"ref13"},{"key":"ref14","first-page":"3307","article-title":"Text driven 3D photo-realistic talking head","volume-title":"Proceedings of the Annual Conference of the International Speech Communication Association","author":"Wang"},{"issue":"8","key":"ref15","doi-asserted-by":"crossref","first-page":"2325","DOI":"10.1016\/j.patcog.2006.12.001","article-title":"A coupled hmm approach to video-realistic speech animation","volume":"40","author":"Xie","year":"2007","journal-title":"Pattern Recognition"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3197517.3201292"},{"key":"ref17","article-title":"LRS3-TED: a large-scale dataset for visual speech recognition","author":"Afouras","year":"2018","journal-title":"arXiv preprint"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.169"},{"volume-title":"The LJ speech dataset","year":"2017","author":"Ito","key":"ref19"},{"key":"ref20","article-title":"Maximizing mutual information for Tacotron","author":"Liu","year":"2019","journal-title":"arXiv preprint"},{"key":"ref21","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv preprint"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1117\/12.2520589"},{"volume-title":"Animating face using disentangled audio representations","year":"2019","author":"Mittal","key":"ref23"}],"event":{"name":"2022 IEEE 8th World Forum on Internet of Things (WF-IoT)","start":{"date-parts":[[2022,10,26]]},"location":"Yokohama, Japan","end":{"date-parts":[[2022,11,11]]}},"container-title":["2022 IEEE 8th World Forum on Internet of Things (WF-IoT)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10151825\/10152024\/10152230.pdf?arnumber=10152230","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T10:24:03Z","timestamp":1709288643000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10152230\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10,26]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/wf-iot54382.2022.10152230","relation":{},"subject":[],"published":{"date-parts":[[2022,10,26]]}}}