{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T13:27:55Z","timestamp":1777642075461,"version":"3.51.4"},"reference-count":40,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,10,1]],"date-time":"2026-10-01T00:00:00Z","timestamp":1790812800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,10,1]],"date-time":"2026-10-01T00:00:00Z","timestamp":1790812800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,3,2]],"date-time":"2026-03-02T00:00:00Z","timestamp":1772409600000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"DOI":"10.13039\/100014013","name":"UK Research and Innovation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100014013","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100018693","name":"Horizon Europe","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100018693","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Pattern Recognition"],"published-print":{"date-parts":[[2026,10]]},"DOI":"10.1016\/j.patcog.2026.113415","type":"journal-article","created":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T16:24:14Z","timestamp":1772555054000},"page":"113415","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["HMamba-3DFT: A hierarchical mamba framework for emotion-driven semantic 3D facial tracking"],"prefix":"10.1016","volume":"178","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-3918-648X","authenticated-orcid":false,"given":"Haodong","family":"Jin","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4249-2264","authenticated-orcid":false,"given":"Muwei","family":"Jian","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7402-6682","authenticated-orcid":false,"given":"Derui","family":"Ding","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7655-9228","authenticated-orcid":false,"given":"Hui","family":"Yu","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.patcog.2026.113415_bib0001","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2024.110927","article-title":"Distilling interaction knowledge for semi-supervised egocentric action recognition","volume":"157","author":"Wang","year":"2025","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.patcog.2026.113415_bib0002","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2024.111104","article-title":"Spatio-temporal interactive reasoning model for multi-group activity recognition","volume":"159","author":"Huang","year":"2025","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.patcog.2026.113415_bib0003","article-title":"AniFaceDiff: animating stylized avatars via parametric conditioned diffusion models","volume":"160","author":"Chen","year":"2025","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.patcog.2026.113415_bib0004","series-title":"IEEE Transactions on Visualization and Computer Graphics","article-title":"GaussianHand: real-time 3D Gaussian rendering for hand avatar animation","author":"Zhao","year":"2025"},{"issue":"3","key":"10.1016\/j.patcog.2026.113415_bib0005","first-page":"3200","article-title":"Human action recognition from various data modalities: a review","volume":"45","author":"Sun","year":"2023","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"10","key":"10.1016\/j.patcog.2026.113415_bib0006","doi-asserted-by":"crossref","first-page":"2684","DOI":"10.1109\/TPAMI.2019.2916873","article-title":"NTU RGB D 120: a large-scale benchmark for 3D human activity understanding","volume":"42","author":"Liu","year":"2020","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"2","key":"10.1016\/j.patcog.2026.113415_bib0007","doi-asserted-by":"crossref","first-page":"721","DOI":"10.1109\/TIP.2017.2766780","article-title":"Blind stereoscopic video quality assessment: from depth perception to overall experience","volume":"27","author":"Chen","year":"2018","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.patcog.2026.113415_bib0008","doi-asserted-by":"crossref","first-page":"3786","DOI":"10.1109\/TCSVT.2022.3229079","article-title":"DeepStream: video streaming enhancements using compressed deep neural networks","volume":"35","author":"Amirpour","year":"2025","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"2","key":"10.1016\/j.patcog.2026.113415_bib0009","doi-asserted-by":"crossref","first-page":"606","DOI":"10.1109\/TAFFC.2023.3286351","article-title":"MGEED: a multimodal genuine emotion and expression detection database","volume":"15","author":"Wang","year":"2024","journal-title":"IEEE Trans. Affect. Comput."},{"key":"10.1016\/j.patcog.2026.113415_bib0010","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2024.110951","article-title":"Poster : a simpler and stronger facial expression recognition network","volume":"157","author":"Mao","year":"2025","journal-title":"Pattern Recognit."},{"issue":"8","key":"10.1016\/j.patcog.2026.113415_bib0011","doi-asserted-by":"crossref","first-page":"6185","DOI":"10.1109\/TPAMI.2025.3557245","article-title":"Revisiting one-stage deep uncalibrated photometric stereo via Fourier embedding","volume":"47","author":"Ju","year":"2025","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.patcog.2026.113415_bib0012","series-title":"Proc. 26th Annu. Conf","first-page":"187","article-title":"A morphable model for the synthesis of 3D faces","author":"Blanz","year":"1999"},{"issue":"3","key":"10.1016\/j.patcog.2026.113415_bib0013","doi-asserted-by":"crossref","first-page":"152","DOI":"10.1016\/j.cag.2011.12.002","article-title":"Perception-driven facial expression synthesis","volume":"36","author":"Yu","year":"2012","journal-title":"Computers & Graphics"},{"key":"10.1016\/j.patcog.2026.113415_bib0014","doi-asserted-by":"crossref","first-page":"3844","DOI":"10.1109\/TIP.2021.3065819","article-title":"Real-time 3D facial tracking via cascaded compositional learning","volume":"30","author":"Lou","year":"2021","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.patcog.2026.113415_bib0015","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"13059","article-title":"High-fidelity face tracking for ar\/vr via deep lighting adaptation","author":"Chen","year":"2021"},{"key":"10.1016\/j.patcog.2026.113415_bib0016","series-title":"Proceedings of the First Conference on Language Modeling (COLM)","first-page":"2024","article-title":"Mamba: linear-time sequence modeling with selective state spaces","author":"Gu","year":"2024"},{"key":"10.1016\/j.patcog.2026.113415_bib0017","first-page":"62429","article-title":"Vision mamba: efficient visual representation learning with bidirectional state space model","volume":"235","author":"Zhu","year":"2024","journal-title":"Proceedings of the 41st International Conference on Machine Learning (ICML)"},{"key":"10.1016\/j.patcog.2026.113415_bib0018","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"20311","article-title":"Emoca: emotion driven monocular face capture and animation","author":"Dan\u011b\u010dek","year":"2022"},{"issue":"4","key":"10.1016\/j.patcog.2026.113415_bib0019","doi-asserted-by":"crossref","first-page":"2512","DOI":"10.1109\/TCSVT.2023.3301930","article-title":"Estimating high-resolution surface normals via low-resolution photometric stereo images","volume":"34","author":"Ju","year":"2024","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.patcog.2026.113415_bib0020","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops","article-title":"Accurate 3d face reconstruction with weakly-supervised learning: from single image to image set","author":"Deng","year":"2019"},{"key":"10.1016\/j.patcog.2026.113415_bib0021","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"394","article-title":"A hierarchical representation network for accurate and detailed face reconstruction from in-the-wild images","author":"Lei","year":"2023"},{"key":"10.1016\/j.patcog.2026.113415_bib0022","series-title":"Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision","first-page":"6237","article-title":"AU-aware dynamic 3D face reconstruction from videos with transformer","author":"Kuang","year":"2024"},{"key":"10.1016\/j.patcog.2026.113415_bib0023","article-title":"LiteNeRFAvatar: a lightweight NeRF with local feature learning for dynamic human avatar","volume":"160","author":"Pan","year":"2025","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.patcog.2026.113415_bib0024","series-title":"European Conference on Computer Vision","first-page":"237","article-title":"Videomamba: state space model for efficient video understanding","author":"Li","year":"2025"},{"key":"10.1016\/j.patcog.2026.113415_bib0025","series-title":"Technical Report","article-title":"Vivim: A Video Vision Mamba for Ultrasound Video Segmentation","author":"Yang","year":"2025"},{"key":"10.1016\/j.patcog.2026.113415_bib0026","unstructured":"Z. Wang, J.-Q. Zheng, C. Ma, T. Guo, VMambaMorph: a multi-modality deformable image registration framework based on visual state space model with cross-scan module, arXiv: 2404.05105edition, 2024."},{"key":"10.1016\/j.patcog.2026.113415_bib0027","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"1252","article-title":"MambaVO: deep visual odometry based on sequential matching refinement and training smoothing","author":"Wang","year":"2025"},{"issue":"6","key":"10.1016\/j.patcog.2026.113415_bib0028","doi-asserted-by":"crossref","first-page":"17","DOI":"10.1145\/3130800.3130813","article-title":"Learning a model of facial shape and expression from 4D scans","volume":"36","author":"Li","year":"2017","journal-title":"ACM Trans. Graph."},{"key":"10.1016\/j.patcog.2026.113415_bib0029","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"9033","article-title":"Accurate 3D face reconstruction with facial component tokens","author":"Zhang","year":"2023"},{"key":"10.1016\/j.patcog.2026.113415_bib0030","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"4156","article-title":"Decoupled multi-task learning with cyclical self-regulation for face parsing","author":"Zheng","year":"2022"},{"key":"10.1016\/j.patcog.2026.113415_bib0031","series-title":"VoxCeleb: a large-scale speaker identification dataset","first-page":"2616","author":"Nagrani","year":"2017"},{"key":"10.1016\/j.patcog.2026.113415_bib0032","series-title":"The 7th International Conference on Automatic Face and Gesture Recognition","first-page":"211","article-title":"A 3D facial expression database for facial behavior research","author":"Yin","year":"2006"},{"key":"10.1016\/j.patcog.2026.113415_bib0033","unstructured":"C. Wuu, N. Zheng, S. Ardisson, Multiface: a dataset for neural face rendering, arXiv preprint arXiv: 2207.00000, 2022."},{"key":"10.1016\/j.patcog.2026.113415_bib0034","series-title":"European Conference on Computer Vision","first-page":"250","article-title":"Towards metrical reconstruction of human faces","author":"Zielonka","year":"2022"},{"issue":"4","key":"10.1016\/j.patcog.2026.113415_bib0035","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3450626.3459936","article-title":"Learning an animatable detailed 3D face model from in-the-wild images","volume":"40","author":"Feng","year":"2021","journal-title":"ACM Trans. Graph. (ToG)"},{"key":"10.1016\/j.patcog.2026.113415_bib0036","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"3192","article-title":"Video swin transformer","author":"Liu","year":"2022"},{"key":"10.1016\/j.patcog.2026.113415_bib0037","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV)","first-page":"20687","article-title":"EmoTalk: speech-driven emotional disentanglement for 3D face animation","author":"Peng","year":"2023"},{"key":"10.1016\/j.patcog.2026.113415_bib0038","series-title":"Proceedings of the Thirty-Fourth International Joint Conference on Artificial Intelligence (IJCAI)","first-page":"1548","article-title":"GLDiTalker: speech-driven 3D facial animation with graph latent diffusion transformer","author":"Lin","year":"2025"},{"key":"10.1016\/j.patcog.2026.113415_bib0039","series-title":"Proceedings of the 31st ACM International Conference on Multimedia (MM)","first-page":"5292","article-title":"SelfTalk: a self-supervised commutative training diagram to comprehend 3D talking faces","author":"Peng","year":"2023"},{"issue":"4","key":"10.1016\/j.patcog.2026.113415_bib0040","doi-asserted-by":"crossref","first-page":"681","DOI":"10.1109\/TBIOM.2025.3570599","article-title":"3-D face de-identification with preserving multi-facial attributes: a benchmark","volume":"7","author":"Liu","year":"2025","journal-title":"IEEE Trans. Biometr. Behav. Ident. Sci."}],"container-title":["Pattern Recognition"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0031320326003808?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0031320326003808?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,4,29]],"date-time":"2026-04-29T06:22:08Z","timestamp":1777443728000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0031320326003808"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,10]]},"references-count":40,"alternative-id":["S0031320326003808"],"URL":"https:\/\/doi.org\/10.1016\/j.patcog.2026.113415","relation":{},"ISSN":["0031-3203"],"issn-type":[{"value":"0031-3203","type":"print"}],"subject":[],"published":{"date-parts":[[2026,10]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"HMamba-3DFT: A hierarchical mamba framework for emotion-driven semantic 3D facial tracking","name":"articletitle","label":"Article Title"},{"value":"Pattern Recognition","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.patcog.2026.113415","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 The Author(s). Published by Elsevier Ltd.","name":"copyright","label":"Copyright"}],"article-number":"113415"}}