{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,12]],"date-time":"2026-01-12T22:59:45Z","timestamp":1768258785677,"version":"3.49.0"},"reference-count":33,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2022,10,9]],"date-time":"2022-10-09T00:00:00Z","timestamp":1665273600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,10,9]],"date-time":"2022-10-09T00:00:00Z","timestamp":1665273600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100004826","name":"Natural Science Foundation of Beijing Municipality","doi-asserted-by":"publisher","award":["KZ201911417048"],"award-info":[{"award-number":["KZ201911417048"]}],"id":[{"id":"10.13039\/501100004826","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Multimed Info Retr"],"published-print":{"date-parts":[[2022,12]]},"DOI":"10.1007\/s13735-022-00250-9","type":"journal-article","created":{"date-parts":[[2022,10,9]],"date-time":"2022-10-09T05:02:11Z","timestamp":1665291731000},"page":"553-566","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":8,"title":["Your heart rate betrays you: multimodal learning with spatio-temporal fusion networks for micro-expression recognition"],"prefix":"10.1007","volume":"11","author":[{"given":"Ren","family":"Zhang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1805-9249","authenticated-orcid":false,"given":"Ning","family":"He","sequence":"additional","affiliation":[]},{"given":"Shengjie","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Ying","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Kang","family":"Yan","sequence":"additional","affiliation":[]},{"given":"Yuzhe","family":"He","sequence":"additional","affiliation":[]},{"given":"Ke","family":"Lu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,9]]},"reference":[{"key":"250_CR1","doi-asserted-by":"crossref","unstructured":"O\u201dSullivan M, Frank MG, Tiwana HJ (2009) Police lie detection accuracy: the effect of lie scenario. Law Hum Behav 33(6):542\u2013543","DOI":"10.1007\/s10979-009-9191-y"},{"issue":"7297","key":"250_CR2","doi-asserted-by":"publisher","first-page":"412","DOI":"10.1038\/465412a","volume":"465","author":"S Weinberger","year":"2010","unstructured":"Weinberger S (2010) Intent to deceive? Nature 465(7297):412\u2013415","journal-title":"Nature"},{"issue":"6","key":"250_CR3","doi-asserted-by":"publisher","first-page":"915","DOI":"10.1109\/TPAMI.2007.1110","volume":"29","author":"G Zhao","year":"2007","unstructured":"Zhao G, Pietikainen M (2007) Dynamic texture recognition using local binary patterns with an application to facial expressions. IEEE Trans Pattern Anal Mach Intell 29(6):915\u2013928","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"4","key":"250_CR4","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2185520.2185561","volume":"31","author":"H-Y Wu","year":"2012","unstructured":"Wu H-Y, Rubinstein M, Shih E, Guttag J, Durand F, Freeman W (2012) Eulerian video magnification for revealing subtle changes in the world. ACM Trans Graph (TOG) 31(4):1\u20138","journal-title":"ACM Trans Graph (TOG)"},{"key":"250_CR5","doi-asserted-by":"crossref","unstructured":"Liu S-Q, Lan X, Yuen PC (2018) Remote photoplethysmography correspondence feature for 3d mask face presentation attack detection. In: Proceedings of the European conference on computer vision (ECCV), pp 558\u2013573","DOI":"10.1007\/978-3-030-01270-0_34"},{"key":"250_CR6","doi-asserted-by":"crossref","unstructured":"Liu Y, Jourabloo A, Liu X (2018) Learning deep models for face anti-spoofing: binary or auxiliary supervision. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 389\u2013398","DOI":"10.1109\/CVPR.2018.00048"},{"issue":"26","key":"250_CR7","doi-asserted-by":"publisher","first-page":"21434","DOI":"10.1364\/OE.16.021434","volume":"16","author":"W Verkruysse","year":"2008","unstructured":"Verkruysse W, Svaasand LO, Nelson JS (2008) Remote plethysmographic imaging using ambient light. Opt Express 16(26):21434\u201321445","journal-title":"Opt Express"},{"key":"250_CR8","unstructured":"Rouast PV, Adam MP, Dorner V, Lux E (2016) Remote photoplethysmography: evaluation of contactless heart rate measurement in an information systems setting. In: Applied informatics and technology innovation conference, pp 1\u201317"},{"issue":"4","key":"250_CR9","doi-asserted-by":"publisher","first-page":"299","DOI":"10.1109\/TAFFC.2015.2485205","volume":"7","author":"Y-J Liu","year":"2015","unstructured":"Liu Y-J, Zhang J-K, Yan W-J, Wang S-J, Zhao G, Fu X (2015) A main directional mean optical flow feature for spontaneous micro-expression recognition. IEEE Trans Affect Comput 7(4):299\u2013310","journal-title":"IEEE Trans Affect Comput"},{"key":"250_CR10","doi-asserted-by":"publisher","first-page":"82","DOI":"10.1016\/j.image.2017.11.006","volume":"62","author":"S-T Liong","year":"2018","unstructured":"Liong S-T, See J, Wong KS, Phan RC-W (2018) Less is more: micro-expression recognition from video using apex frame. Signal Process Image Commun 62:82\u201392","journal-title":"Signal Process Image Commun"},{"key":"250_CR11","doi-asserted-by":"crossref","unstructured":"Khor H-Q, See J, Phan RCW, Lin W (2018) Enriched long-term recurrent convolutional network for facial micro-expression recognition. In: 2018 13th IEEE international conference on automatic face & gesture recognition (FG 2018), pp 667\u2013674. IEEE","DOI":"10.1109\/FG.2018.00105"},{"key":"250_CR12","doi-asserted-by":"crossref","unstructured":"Liu Y, Du H, Zheng L, Gedeon T (2019) A neural micro-expression recognizer. In: 2019 14th IEEE international conference on automatic face & gesture recognition (FG 2019), pp 1\u20134. IEEE","DOI":"10.1109\/FG.2019.8756583"},{"key":"250_CR13","unstructured":"Krishnamurthy G, Majumder N, Poria S, Cambria E (2018) A deep learning approach for multimodal deception detection. arXiv preprintarXiv:1803.00344"},{"issue":"8","key":"250_CR14","doi-asserted-by":"publisher","first-page":"1863","DOI":"10.3390\/s19081863","volume":"19","author":"N Samadiani","year":"2019","unstructured":"Samadiani N, Huang G, Cai B, Luo W, Chi C-H, Xiang Y, He J (2019) A review on automatic facial expression recognition systems assisted by multimodal sensor data. Sensors 19(8):1863","journal-title":"Sensors"},{"key":"250_CR15","doi-asserted-by":"crossref","unstructured":"Li X, Pfister T, Huang X, Zhao G, Pietik\u00e4inen M (2013) A spontaneous micro-expression database: inducement, collection and baseline. In: 2013 10th ieee international conference and workshops on automatic face and gesture recognition (FG), pp 1\u20136. IEEE","DOI":"10.1109\/FG.2013.6553717"},{"issue":"1","key":"250_CR16","doi-asserted-by":"publisher","first-page":"116","DOI":"10.1109\/TAFFC.2016.2573832","volume":"9","author":"AK Davison","year":"2016","unstructured":"Davison AK, Lansley C, Costen N, Tan K, Yap MH (2016) Samm: a spontaneous micro-facial movement dataset. IEEE Trans Affect Comput 9(1):116\u2013129","journal-title":"IEEE Trans Affect Comput"},{"issue":"10","key":"250_CR17","doi-asserted-by":"publisher","first-page":"119","DOI":"10.3390\/jimaging4100119","volume":"4","author":"AK Davison","year":"2018","unstructured":"Davison AK, Merghani W, Yap MH (2018) Objective classes for micro-facial expression recognition. J Imaging 4(10):119","journal-title":"J Imaging"},{"key":"250_CR18","doi-asserted-by":"publisher","first-page":"424","DOI":"10.1109\/TAFFC.2017.2654440","volume":"9","author":"F Qu","year":"2017","unstructured":"Qu F, Wang S-J, Yan W-J, Li H, Wu S, Fu X (2017) Cas(me)2): a database for spontaneous macro-expression and micro-expression spotting and recognition. IEEE Trans Affect Comput 9:424\u2013436","journal-title":"IEEE Trans Affect Comput"},{"key":"250_CR19","unstructured":"Simonyan K, Zisserman A (2014) Two-stream convolutional networks for action recognition in videos. arXiv preprint arXiv:1406.2199"},{"key":"250_CR20","doi-asserted-by":"publisher","first-page":"354","DOI":"10.1016\/j.neucom.2020.06.005","volume":"410","author":"C Wang","year":"2020","unstructured":"Wang C, Peng M, Bi T, Chen T (2020) Micro-attention for micro-expression recognition. Neurocomputing 410:354\u2013362","journal-title":"Neurocomputing"},{"key":"250_CR21","doi-asserted-by":"crossref","unstructured":"Zhang R, He N, Wu Y, He Y, Yan K (2021) To balance: balanced micro-expression recognition. Multimedia Systems","DOI":"10.1007\/s00530-021-00842-1"},{"key":"250_CR22","doi-asserted-by":"crossref","unstructured":"Lucey P, Cohn JF, Kanade T, Saragih J, Ambadar Z, Matthews I (2010) The extended Cohn\u2013Kanade dataset (CK+): a complete dataset for action unit and emotion-specified expression. In: 2010 IEEE computer society conference on computer vision and pattern recognition-workshops, pp 94\u2013101. IEEE","DOI":"10.1109\/CVPRW.2010.5543262"},{"issue":"3","key":"250_CR23","doi-asserted-by":"publisher","first-page":"626","DOI":"10.1109\/TMM.2019.2931351","volume":"22","author":"Z Xia","year":"2019","unstructured":"Xia Z, Hong X, Gao X, Feng X, Zhao G (2019) Spatiotemporal recurrent convolutional networks for recognizing spontaneous micro-expressions. IEEE Trans Multimedia 22(3):626\u2013640","journal-title":"IEEE Trans Multimedia"},{"key":"250_CR24","unstructured":"Liu C et\u00a0al (2009) Beyond pixels: exploring new representations and applications for motion analysis. Ph.D. thesis, Massachusetts Institute of Technology"},{"key":"250_CR25","first-page":"1755","volume":"10","author":"DE King","year":"2009","unstructured":"King DE (2009) Dlib-ml: a machine learning toolkit. J Mach Learn Res 10:1755\u20131758","journal-title":"J Mach Learn Res"},{"key":"250_CR26","doi-asserted-by":"crossref","unstructured":"Zhang C, Liu S, Xu X, Zhu C (2019) C3ae: exploring the limits of compact model for age estimation. In: 2019 IEEE\/CVF conference on computer vision and pattern recognition (CVPR)","DOI":"10.1109\/CVPR.2019.01287"},{"key":"250_CR27","doi-asserted-by":"crossref","unstructured":"Szegedy C, Liu W, Jia Y, Sermanet P, Reed S, Anguelov D, Erhan D, Vanhoucke V, Rabinovich A (2015) Going deeper with convolutions. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 1\u20139","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"250_CR28","unstructured":"Simonyan K, Zisserman A (2014) Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556"},{"key":"250_CR29","unstructured":"Krizhevsky A, Sutskever I, Hinton G (2012) Imagenet classification with deep convolutional neural networks. In: NIPS, pp 1097\u20131105"},{"key":"250_CR30","doi-asserted-by":"crossref","unstructured":"Liong S-T, Gan YS, See J, Khor H-Q, Huang Y-C (2019) Shallow triple stream three-dimensional CNN (ststnet) for micro-expression recognition. In: 2019 14th IEEE international conference on automatic face & gesture recognition (FG 2019), pp 1\u20135. IEEE","DOI":"10.1109\/FG.2019.8756567"},{"key":"250_CR31","doi-asserted-by":"publisher","first-page":"129","DOI":"10.1016\/j.image.2019.02.005","volume":"74","author":"YS Gan","year":"2019","unstructured":"Gan YS, Liong S-T, Yau W-C, Huang Y-C, Tan L-K (2019) Off-apexnet on micro-expression recognition system. Signal Process Image Commun 74:129\u2013139","journal-title":"Signal Process Image Commun"},{"key":"250_CR32","doi-asserted-by":"crossref","unstructured":"Zhou L, Mao Q, Xue L (2019) Dual-inception network for cross-database micro-expression recognition. In: 2019 14th IEEE international conference on automatic face & gesture recognition (FG 2019), pp 1\u20135. IEEE","DOI":"10.1109\/FG.2019.8756579"},{"key":"250_CR33","doi-asserted-by":"crossref","unstructured":"Van\u00a0Quang N, Chun J, Tokuyama T (2019) Capsulenet for micro-expression recognition. In: 2019 14th IEEE international conference on automatic face & gesture recognition (FG 2019), pp 1\u20137. IEEE","DOI":"10.1109\/FG.2019.8756544"}],"container-title":["International Journal of Multimedia Information Retrieval"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13735-022-00250-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s13735-022-00250-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13735-022-00250-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,17]],"date-time":"2022-12-17T14:24:16Z","timestamp":1671287056000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s13735-022-00250-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10,9]]},"references-count":33,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2022,12]]}},"alternative-id":["250"],"URL":"https:\/\/doi.org\/10.1007\/s13735-022-00250-9","relation":{},"ISSN":["2192-6611","2192-662X"],"issn-type":[{"value":"2192-6611","type":"print"},{"value":"2192-662X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,10,9]]},"assertion":[{"value":"31 May 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 August 2022","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 August 2022","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 October 2022","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"No potential conflict of interest was reported by the authors.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"This article does not contain any studies with human participants or animals performed by any of the ethical standards.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Human and animal rights"}}]}}