{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,5]],"date-time":"2026-05-05T01:47:26Z","timestamp":1777945646891,"version":"3.51.4"},"reference-count":24,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Pattern Recognition Letters"],"published-print":{"date-parts":[[2026,6]]},"DOI":"10.1016\/j.patrec.2026.04.005","type":"journal-article","created":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T23:50:02Z","timestamp":1775260202000},"page":"72-78","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["RHRAF: Residual hypergraph refinement with adaptive fusion for open-set facial expression image retrieval"],"prefix":"10.1016","volume":"204","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-0865-5008","authenticated-orcid":false,"given":"Tao","family":"Su","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6954-6587","authenticated-orcid":false,"given":"Duanpo","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Xuyang","family":"Teng","sequence":"additional","affiliation":[]},{"given":"Yuhan","family":"Gao","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.patrec.2026.04.005_bib0001","series-title":"Proceedings of the IEEE\/CVF International Conference on Computer Vision","first-page":"3146","article-title":"Poster: a pyramid cross-fusion transformer network for facial expression recognition","author":"Zheng","year":"2023"},{"key":"10.1016\/j.patrec.2026.04.005_bib0002","doi-asserted-by":"crossref","first-page":"491","DOI":"10.1109\/LSP.2024.3521321","article-title":"ResEmoteNet: bridging accuracy and loss reduction in facial emotion recognition","volume":"32","author":"Roy","year":"2024","journal-title":"IEEE Signal Process. Lett."},{"key":"10.1016\/j.patrec.2026.04.005_bib0003","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2024.110951","article-title":"Poster++: a simpler and stronger facial expression recognition network","volume":"157","author":"Mao","year":"2025","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.patrec.2026.04.005_bib0004","doi-asserted-by":"crossref","first-page":"166","DOI":"10.1016\/j.patrec.2025.01.021","article-title":"Entire-detail motion dual-branch network for micro-expression recognition","volume":"189","author":"Ma","year":"2025","journal-title":"Pattern Recognit. Lett."},{"key":"10.1016\/j.patrec.2026.04.005_bib0005","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"646","article-title":"Open-set facial expression recognition","volume":"38","author":"Zhang","year":"2024"},{"key":"10.1016\/j.patrec.2026.04.005_bib0006","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"1563","article-title":"Towards open set deep networks","author":"Bendale","year":"2016"},{"key":"10.1016\/j.patrec.2026.04.005_bib0007","unstructured":"S. Vaze, K. Han, A. Vedaldi, A. Zisserman, Open-set recognition: a good closed-set classifier is all you need?, in: International Conference on Learning Representations, 2021."},{"issue":"11","key":"10.1016\/j.patrec.2026.04.005_bib0008","first-page":"8065","article-title":"Adversarial reciprocal points learning for open set recognition","volume":"44","author":"Chen","year":"2021","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"4","key":"10.1016\/j.patrec.2026.04.005_bib0009","doi-asserted-by":"crossref","first-page":"2206","DOI":"10.1109\/TPAMI.2023.3332768","article-title":"Hypergraph-based multi-modal representation for open-set 3D object retrieval","volume":"46","author":"Feng","year":"2023","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.patrec.2026.04.005_bib0010","doi-asserted-by":"crossref","first-page":"4627","DOI":"10.1109\/TMM.2025.3535298","article-title":"Hypergraph-based remaining prototype alignment for open-set cross-domain image retrieval","volume":"27","author":"Xu","year":"2025","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.patrec.2026.04.005_bib0011","series-title":"Proceedings of International Conference on Neural Networks (ICNN\u201997)","first-page":"1563","article-title":"Image retrieval system capable of learning the user\u2019s sensibility using neural networks","volume":"3","author":"Kageyama","year":"1997"},{"key":"10.1016\/j.patrec.2026.04.005_bib0012","doi-asserted-by":"crossref","first-page":"135","DOI":"10.1016\/j.patrec.2025.11.003","article-title":"Collaborative feature alignment with global-local fusion for fine-grained sketch-based image retrieval","volume":"199","author":"Zhang","year":"2025","journal-title":"Pattern Recognit. Lett."},{"key":"10.1016\/j.patrec.2026.04.005_bib0013","unstructured":"D. Dai, Y. Li, Y. Liu, M. Jia, Z. YuanHui, G. Wang, 15m multimodal facial image-text dataset, (2024). arXiv preprint arXiv: 2407.08515."},{"key":"10.1016\/j.patrec.2026.04.005_bib0014","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2025.103271","article-title":"HumanVLM: foundation for human-scene vision-language model","author":"Dai","year":"2025","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.patrec.2026.04.005_bib0015","article-title":"Multivariate feedback-based image-text joint learning for sketch-less facial image retrieval","author":"Liu","year":"2025","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"3","key":"10.1016\/j.patrec.2026.04.005_bib0016","doi-asserted-by":"crossref","first-page":"3181","DOI":"10.1109\/TPAMI.2022.3182052","article-title":"HGNN+: general hypergraph neural networks","volume":"45","author":"Gao","year":"2022","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.patrec.2026.04.005_bib0017","unstructured":"M. Kamachi, M. Lyons, J. Gyoba, The Japanese female facial expression (Jaffe) database, URL http:\/\/www.kasrl.org\/jaffe.html 21 (1998) 32."},{"key":"10.1016\/j.patrec.2026.04.005_bib0018","series-title":"2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)","first-page":"2106","article-title":"Static facial expression analysis in tough conditions: data, evaluation protocol and benchmark","author":"Dhall","year":"2011"},{"key":"10.1016\/j.patrec.2026.04.005_bib0019","article-title":"Karolinska directed emotional faces","author":"Lundqvist","year":"1998","journal-title":"Cognit. Emotion"},{"key":"10.1016\/j.patrec.2026.04.005_bib0020","series-title":"Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval","first-page":"635","article-title":"Scalable deep multimodal learning for cross-modal retrieval","author":"Hu","year":"2019"},{"key":"10.1016\/j.patrec.2026.04.005_bib0021","series-title":"Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"3142","article-title":"Cross-modal center loss for 3D cross-modal retrieval","author":"Jing","year":"2021"},{"key":"10.1016\/j.patrec.2026.04.005_bib0022","doi-asserted-by":"crossref","first-page":"165","DOI":"10.1016\/j.neucom.2018.11.042","article-title":"Multi-modal semantic autoencoder for cross-modal retrieval","volume":"331","author":"Wu","year":"2019","journal-title":"Neurocomputing"},{"key":"10.1016\/j.patrec.2026.04.005_bib0023","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"1945","article-title":"Triplet-center loss for multi-view 3D object retrieval","author":"He","year":"2018"},{"key":"10.1016\/j.patrec.2026.04.005_bib0024","series-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","first-page":"2852","article-title":"Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild","author":"Li","year":"2017"}],"container-title":["Pattern Recognition Letters"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0167865526001261?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0167865526001261?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,5,2]],"date-time":"2026-05-02T12:40:31Z","timestamp":1777725631000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0167865526001261"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,6]]},"references-count":24,"alternative-id":["S0167865526001261"],"URL":"https:\/\/doi.org\/10.1016\/j.patrec.2026.04.005","relation":{},"ISSN":["0167-8655"],"issn-type":[{"value":"0167-8655","type":"print"}],"subject":[],"published":{"date-parts":[[2026,6]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"RHRAF: Residual hypergraph refinement with adaptive fusion for open-set facial expression image retrieval","name":"articletitle","label":"Article Title"},{"value":"Pattern Recognition Letters","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.patrec.2026.04.005","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}]}}