{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T12:00:31Z","timestamp":1773748831345,"version":"3.50.1"},"reference-count":27,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T00:00:00Z","timestamp":1773705600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T00:00:00Z","timestamp":1773705600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"DOI":"10.1007\/s11042-026-21461-w","type":"journal-article","created":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T11:03:39Z","timestamp":1773745419000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Cross-database facial expression recognition based on Spatio-temporal Feature Point Attention Deep Transfer Network"],"prefix":"10.1007","volume":"85","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3658-9593","authenticated-orcid":false,"given":"Jingjie","family":"Yan","sequence":"first","affiliation":[]},{"given":"Yuebo","family":"Yue","sequence":"additional","affiliation":[]},{"given":"Kai","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Xiaoyang","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Ying","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Jingsheng","family":"Wei","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,3,17]]},"reference":[{"key":"21461_CR1","doi-asserted-by":"publisher","unstructured":"Kolakowska A, Landowska A, Szwoch M, Szwoch W, Wrobel MR (2013) Emotion recognition and its application in software engineering. In: 2013 6th international conference on human system interactions (HSI), IEEE pp 532\u2013539. https:\/\/doi.org\/10.1109\/HSI.2013.6577877","DOI":"10.1109\/HSI.2013.6577877"},{"issue":"21","key":"21461_CR2","doi-asserted-by":"publisher","first-page":"16002","DOI":"10.1109\/JIOT.2020.3038631","volume":"8","author":"T Zhang","year":"2021","unstructured":"Zhang T, Liu M, Yuan T, Al-Nabhan N (2021) Emotion-aware and intelligent internet of medical things toward emotion recognition during COVID-19 pandemic. IEEE Internet Things J 8(21):16002\u201316013. https:\/\/doi.org\/10.1109\/JIOT.2020.3038631","journal-title":"IEEE Internet Things J"},{"key":"21461_CR3","doi-asserted-by":"publisher","unstructured":"Cosentino S, Randria EIS, Lin J-Y, Pellegrini T, Sessa S, Takanishi A (2018) Group emotion recognition strategies for entertainment robots. In: 2018 IEEE\/RSJ international conference on intelligent robots and systems (IROS), IEEE, pp 813\u2013818. https:\/\/doi.org\/10.1109\/IROS.2018.8593503","DOI":"10.1109\/IROS.2018.8593503"},{"issue":"3","key":"21461_CR4","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3388790","volume":"53","author":"S Zepf","year":"2021","unstructured":"Zepf S, Hernandez J, Schmitt A, Minker W, Picard RW (2021) Driver emotion recognition for intelligent vehicles. ACM Comput Surv 53(3):1\u201330. https:\/\/doi.org\/10.1145\/3388790","journal-title":"ACM Comput Surv"},{"key":"21461_CR5","doi-asserted-by":"publisher","unstructured":"Li J, Hu R, Mukherjee M (2022) Discriminative region transfer network for cross-database micro-expression recognition. In: ICC 2022 - IEEE international conference on communications, IEEE, pp 5082\u20135087. https:\/\/doi.org\/10.1109\/ICC45855.2022.9838815","DOI":"10.1109\/ICC45855.2022.9838815"},{"key":"21461_CR6","doi-asserted-by":"publisher","unstructured":"Liu N et al (2018) Super wide regression network for unsupervised cross-database facial expression recognition. In 2018 IEEE international conference on acoustics, speech and signal processing (ICASSP), IEEE, pp 1897\u20131901. https:\/\/doi.org\/10.1109\/ICASSP.2018.8461322","DOI":"10.1109\/ICASSP.2018.8461322"},{"key":"21461_CR7","unstructured":"Ganin Y, Lempitsky V (2015) Unsupervised domain adaptation by backpropagation. In proceedings of the 32nd international conference on machine learning, vol 37. PMLR, pp 1180\u20131189"},{"issue":"2","key":"21461_CR8","doi-asserted-by":"publisher","first-page":"199","DOI":"10.1109\/TNN.2010.2091281","volume":"22","author":"SJ Pan","year":"2011","unstructured":"Pan SJ, Tsang IW, Kwok JT, Yang Q (2011) Domain adaptation via transfer component analysis. IEEE Trans Neural Netw 22(2):199\u2013210. https:\/\/doi.org\/10.1109\/TNN.2010.2091281","journal-title":"IEEE Trans Neural Netw"},{"issue":"4","key":"21461_CR9","doi-asserted-by":"publisher","first-page":"1713","DOI":"10.1109\/TNNLS.2020.2988928","volume":"32","author":"Y Zhu","year":"2021","unstructured":"Zhu Y et al (2021) Deep subdomain adaptation network for image classification. IEEE Trans Neural Netw Learn Syst 32(4):1713\u20131722. https:\/\/doi.org\/10.1109\/TNNLS.2020.2988928","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"21461_CR10","doi-asserted-by":"publisher","unstructured":"Yan K, Zheng W, Cui Z, Zong Y (2016) Cross-database facial expression recognition via unsupervised domain adaptive dictionary learning. In: International conference on neural information processing. Springer, Cham, pp 427\u2013434. https:\/\/doi.org\/10.1007\/978-3-319-46672-9_48","DOI":"10.1007\/978-3-319-46672-9_48"},{"issue":"1","key":"21461_CR11","doi-asserted-by":"publisher","first-page":"21","DOI":"10.1109\/TAFFC.2016.2563432","volume":"9","author":"W Zheng","year":"2018","unstructured":"Zheng W, Zong Y, Zhou X, Xin M (2018) Cross-domain color facial expression recognition using transductive transfer subspace learning. IEEE Trans Affect Comput 9(1):21\u201337. https:\/\/doi.org\/10.1109\/TAFFC.2016.2563432","journal-title":"IEEE Trans Affect Comput"},{"key":"21461_CR12","doi-asserted-by":"publisher","unstructured":"Zavarez MV, Berriel RF, Oliveira-Santos T (2017) Cross-database facial expression recognition based on fine-tuned deep convolutional network. In: 2017 30th SIBGRAPI conference on graphics, patterns and images (SIBGRAPI), IEEE, pp. 405\u2013412. https:\/\/doi.org\/10.1109\/SIBGRAPI.2017.60","DOI":"10.1109\/SIBGRAPI.2017.60"},{"key":"21461_CR13","doi-asserted-by":"publisher","unstructured":"Wang L, Su J, Zhang K (2019) Cross-database facial expression recognition with domain alignment and compact feature learning. In: International symposium on neural networks. Springer, Cham, pp 341\u2013350. https:\/\/doi.org\/10.1007\/978-3-030-22808-8_34","DOI":"10.1007\/978-3-030-22808-8_34"},{"key":"21461_CR14","doi-asserted-by":"crossref","unstructured":"Zou WH (2022) Learn-to-decompose: Cascaded decomposition network for cross-domain few-shot facial expression recognition. In: Computer vision -- ECCV 2022. Springer Nature Switzerland, pp 683\u2013700","DOI":"10.1007\/978-3-031-19800-7_40"},{"key":"21461_CR15","doi-asserted-by":"publisher","unstructured":"Xia W, Zheng W, Zong Y, Jiang X (2021) Motion attention deep transfer network for cross-database micro-expression recognition. In: Proc. of ICPR international workshops and challenges, pp 679\u2013693. https:\/\/doi.org\/10.1007\/978-3-030-68796-0_49","DOI":"10.1007\/978-3-030-68796-0_49"},{"key":"21461_CR16","doi-asserted-by":"publisher","unstructured":"Dollar P, Rabaud V, Cottrell G, Belongie S (2005) Behavior recognition via sparse spatio-temporal feature. In: 2005 IEEE international workshop on visual surveillance and performance evaluation of tracking and surveillance, IEEE, pp 65\u201372. https:\/\/doi.org\/10.1109\/VSPETS.2005.1570899","DOI":"10.1109\/VSPETS.2005.1570899"},{"issue":"5","key":"21461_CR17","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0196391","volume":"13","author":"SR Livingstone","year":"2018","unstructured":"Livingstone SR, Russo FA (2018) The Ryerson audio-visual database of emotional speech and song (RAVDESS): a dynamic, multimodal set of facial and vocal expressions in North American English. PLoS One 13(5):e0196391. https:\/\/doi.org\/10.1371\/journal.pone.0196391","journal-title":"PLoS One"},{"key":"21461_CR18","doi-asserted-by":"publisher","unstructured":"Gunes H, Piccardi M (2006) A bmodal face and body gesture database for automatic analysis of human nonverbal affective behavior. In: 18th international conference on pattern recognition (ICPR\u201906), IEEE, pp 1148\u20131153. https:\/\/doi.org\/10.1109\/ICPR.2006.39","DOI":"10.1109\/ICPR.2006.39"},{"key":"21461_CR19","doi-asserted-by":"publisher","unstructured":"Martin O, Kotsia I, Macq B, Pitas I (2006) The eNTERFACE\u201905 audio-visual emotion database. In: 22nd international conference on data engineering workshops (ICDEW\u201906), IEEE, pp 8\u20138. https:\/\/doi.org\/10.1109\/ICDEW.2006.145","DOI":"10.1109\/ICDEW.2006.145"},{"key":"21461_CR20","doi-asserted-by":"publisher","first-page":"610","DOI":"10.1587\/transinf.E97.D.610","volume":"97-D","author":"J Yan","year":"2014","unstructured":"Yan J, Zheng W, Xin M, Yan J (2014) Integrating facial expression and body gesture in videos for emotion recognition. EICE Trans. Inf. Syst. 97-D:610\u2013613","journal-title":"EICE Trans. Inf. Syst."},{"key":"21461_CR21","doi-asserted-by":"publisher","unstructured":"Shan C, Gong S, McOwan PW (2007) Beyond facial expressions: Learning human emotion from body gestures. In: Procedings of the British machine vision conference 2007, British Machine Vision Association, p 43.1\u201343.10. https:\/\/doi.org\/10.5244\/C.21.43","DOI":"10.5244\/C.21.43"},{"key":"21461_CR22","doi-asserted-by":"crossref","unstructured":"Gabor D (1946) Theory of communication. Journal of the Institution of Electrical Engineers - Part I: General, pp 429\u2013457","DOI":"10.1049\/ji-3-2.1946.0074"},{"key":"21461_CR23","doi-asserted-by":"publisher","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognitio. In: 2016 IEEE conference on computer vision and pattern recognition (CVPR), IEEE, pp 770\u2013778 https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"issue":"8","key":"21461_CR24","doi-asserted-by":"publisher","DOI":"10.3390\/electronics13081470","volume":"13","author":"J Yan","year":"2024","unstructured":"Yan J et al (2024) Multi-representation joint dynamic domain adaptation network for cross-database facial expression recognition. Electronics 13(8):1470. https:\/\/doi.org\/10.3390\/electronics13081470","journal-title":"Electronics"},{"key":"21461_CR25","doi-asserted-by":"publisher","unstructured":"Boqing Gong Y, Shi F, Sha, Grauman K,(2012) Geodesic flow kernel for unsupervised domain adaptation. In: 2012 IEEE conference on computer vision and pattern recognition, IEEE, pp 2066\u20132073 https:\/\/doi.org\/10.1109\/CVPR.2012.6247911","DOI":"10.1109\/CVPR.2012.6247911"},{"key":"21461_CR26","doi-asserted-by":"publisher","unstructured":"Fernando B, Habrard A, Sebban M, Tuytelaars T (2013) Unsupervised visual domain adaptation using subspace alignment. In: 2013 IEEE International Conference on Computer Vision, IEEE, pp 2960\u20132967 https:\/\/doi.org\/10.1109\/ICCV.2013.368","DOI":"10.1109\/ICCV.2013.368"},{"issue":"2","key":"21461_CR27","doi-asserted-by":"publisher","first-page":"2648","DOI":"10.1109\/TNNLS.2023.3347722","volume":"36","author":"Y Liu","year":"2025","unstructured":"Liu Y, Cheng D, Zhang D et al (2025) Capsule networks with residual pose routing. IEEE Trans Neural Netw Learn Syst 36(2):2648\u20132661. https:\/\/doi.org\/10.1109\/TNNLS.2023.3347722","journal-title":"IEEE Trans Neural Netw Learn Syst"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-026-21461-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-026-21461-w","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-026-21461-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T11:03:41Z","timestamp":1773745421000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-026-21461-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3,17]]},"references-count":27,"journal-issue":{"issue":"4","published-online":{"date-parts":[[2026,4]]}},"alternative-id":["21461"],"URL":"https:\/\/doi.org\/10.1007\/s11042-026-21461-w","relation":{},"ISSN":["1573-7721"],"issn-type":[{"value":"1573-7721","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3,17]]},"assertion":[{"value":"15 January 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"13 November 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 February 2026","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 March 2026","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"Not applicable.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to participate"}},{"value":"Not applicable.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to publish"}},{"value":"The authors declare no conflict of interest.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"284"}}