{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,11]],"date-time":"2025-10-11T01:43:12Z","timestamp":1760146992690,"version":"build-2065373602"},"reference-count":40,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Consumer Electron."],"published-print":{"date-parts":[[2025,5]]},"DOI":"10.1109\/tce.2025.3565518","type":"journal-article","created":{"date-parts":[[2025,4,29]],"date-time":"2025-04-29T13:31:34Z","timestamp":1745933494000},"page":"5404-5413","source":"Crossref","is-referenced-by-count":0,"title":["Audio-Driven Talking Face Generation With Segmented Static Facial References for Customized Health Device Interactions"],"prefix":"10.1109","volume":"71","author":[{"given":"Zige","family":"Wang","sequence":"first","affiliation":[{"name":"Shaanxi University of Chinese Medicine, Xianyang, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-8280-0134","authenticated-orcid":false,"given":"Yashuai","family":"Wang","sequence":"additional","affiliation":[{"name":"Lab of ASGO, Northwestern Polytechnical University, Xi&#x2019;an, China"}]},{"given":"Tianyu","family":"Liu","sequence":"additional","affiliation":[{"name":"Lab of ASGO, Northwestern Polytechnical University, Xi&#x2019;an, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9690-7026","authenticated-orcid":false,"given":"Peng","family":"Zhang","sequence":"additional","affiliation":[{"name":"Lab of ASGO, Northwestern Polytechnical University, Xi&#x2019;an, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8234-0823","authenticated-orcid":false,"given":"Lei","family":"Xie","sequence":"additional","affiliation":[{"name":"Lab of ASGO, Northwestern Polytechnical University, Xi&#x2019;an, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-5095-5206","authenticated-orcid":false,"given":"Yangming","family":"Guo","sequence":"additional","affiliation":[{"name":"Northwestern Polytechnical University, Xi&#x2019;an, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/tce.2024.3439577"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TCE.2023.3321331"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TCE.2012.6170068"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TCE.2023.3315415"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TCE.2023.3342635"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TCE.2024.3406963"},{"key":"ref7","article-title":"Diff2Lip: Audio conditioned diffusion models for lip-synchronization","author":"Mukhopadhyay","year":"2023","journal-title":"arXiv:2308.09716"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00151"},{"key":"ref9","article-title":"DreamTalk: When expressive talking head generation meets diffusion probabilistic models","author":"Ma","year":"2023","journal-title":"arXiv:2312.09767"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33019299"},{"key":"ref11","article-title":"You said that","author":"Chung","year":"2017","journal-title":"arXiv:1705.02966"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/bf01420984"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1023\/a:1007977618277"},{"volume-title":"Out of Time: Automated Lip Sync in the Wild","year":"2017","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.3156\/jsoft.29.5_177_2"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3351066"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.109865"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_32"},{"key":"ref19","doi-asserted-by":"crossref","DOI":"10.1016\/j.specom.2023.103028","article-title":"LPIPS-AttnWav2Lip: Generic audio-driven lip synchronization for talking head generation in the wild","volume":"157","author":"Chen","year":"2024","journal-title":"Speech Commun."},{"key":"ref20","article-title":"GeneFace++: Generalized and stable real-time audio-driven 3D talking face generation","author":"Ye","year":"2023","journal-title":"arXiv:2305.00787"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ROMAN.1992.253860"},{"issue":"4","key":"ref22","doi-asserted-by":"crossref","first-page":"310","DOI":"10.36548\/jsws.2023.4.003","article-title":"Recent advancements of embedded system in HMI","volume":"5","author":"Darney","year":"2023","journal-title":"IRO J. Sustain. Wireless Syst."},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-62110-9_47"},{"key":"ref24","article-title":"Deep learning for fatigue estimation on the basis of multimodal human-machine interactions","author":"Gordienko","year":"2017","journal-title":"arXiv:1801.06048"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.30630\/joiv.6.3.949"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/s11063-023-11433-8"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-54184-6_6"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.367"},{"key":"ref29","first-page":"1","article-title":"Lip reading in profile","volume-title":"Proc. Brit. Mach. Vis. Conf.","author":"Chung"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2018-1929"},{"key":"ref31","first-page":"1","article-title":"Out of time: Automated lip sync in the wild","volume-title":"Proc. Workshop Multi-View Lip-Reading, ACCV","author":"Chung"},{"key":"ref32","article-title":"Audio-visual segmentation","author":"Zhou","year":"2023","journal-title":"arXiv:2207.05042"},{"issue":"11","key":"ref33","first-page":"1075","article-title":"Artificial intelligence-supported clear aligner orthodontic technology","volume":"59","author":"Xie","year":"2024","journal-title":"Chin. J. Stomatol."},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.17925\/HI.2020.14.1.9"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.2196\/mhealth.3789"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TCE.2021.3130228"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/TCE.2021.3129316"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/mce.2019.2962163"},{"key":"ref39","article-title":"The value creation potential of digital humans","author":"Zirar","year":"2023","journal-title":"arXiv:2311.09226"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.62951\/icistech.v4i2.99"}],"container-title":["IEEE Transactions on Consumer Electronics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/30\/11128999\/10980001.pdf?arnumber=10980001","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T17:36:43Z","timestamp":1760117803000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10980001\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5]]},"references-count":40,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tce.2025.3565518","relation":{},"ISSN":["0098-3063","1558-4127"],"issn-type":[{"type":"print","value":"0098-3063"},{"type":"electronic","value":"1558-4127"}],"subject":[],"published":{"date-parts":[[2025,5]]}}}