{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T04:45:18Z","timestamp":1761972318552,"version":"build-2065373602"},"reference-count":36,"publisher":"Institute of Electronics, Information and Communications Engineers (IEICE)","issue":"11","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEICE Trans. Fundamentals"],"published-print":{"date-parts":[[2025,11,1]]},"DOI":"10.1587\/transfun.2024eap1128","type":"journal-article","created":{"date-parts":[[2025,4,21]],"date-time":"2025-04-21T18:07:03Z","timestamp":1745258823000},"page":"1504-1513","source":"Crossref","is-referenced-by-count":0,"title":["MES-RANet: Two-Stage Relation-Aware Graph Convolutional Network for Macro and Micro-Expression Spotting in Long Videos"],"prefix":"10.1587","volume":"E108.A","author":[{"given":"Ruicong","family":"ZHI","sequence":"first","affiliation":[{"name":"School of Computer and Communication Engineering, University of Science and Technology Beijing"},{"name":"Beijing Key Laboratory of Knowledge Engineering for Materials Science"}]},{"given":"Jing","family":"HU","sequence":"additional","affiliation":[{"name":"School of Computer and Communication Engineering, University of Science and Technology Beijing"},{"name":"Beijing Key Laboratory of Knowledge Engineering for Materials Science"}]},{"given":"Jinming","family":"PING","sequence":"additional","affiliation":[{"name":"School of Computer and Communication Engineering, University of Science and Technology Beijing"},{"name":"Beijing Key Laboratory of Knowledge Engineering for Materials Science"}]},{"given":"Fei","family":"WAN","sequence":"additional","affiliation":[{"name":"School of Computer and Communication Engineering, University of Science and Technology Beijing"},{"name":"Beijing Key Laboratory of Knowledge Engineering for Materials Science"}]}],"member":"532","reference":[{"key":"1","doi-asserted-by":"publisher","unstructured":"[1] W.J. Yan, Q. Wu, J. Liang, Y.H. Chen, and X. Fu, \u201cHow fast are the leaked facial expressions: The duration of micro-expressions,\u201d J. Nonverbal Behav., vol.37, pp.217-230, 2013. 10.1007\/s10919-013-0159-8","DOI":"10.1007\/s10919-013-0159-8"},{"key":"2","doi-asserted-by":"publisher","unstructured":"[2] X. Ben, Y. Ren, J. Zhang, S.J. Wang, K. Kpalma, W. Meng, and Y.J. Liu, \u201cVideo-based facial micro-expression analysis: A survey of datasets, features and algorithms,\u201d IEEE Trans. Pattern Anal. Mach. Intell., vol.44, no.9, pp.5826-5846, 2021. 10.1109\/TPAMI.2021.3067464","DOI":"10.1109\/TPAMI.2021.3067464"},{"key":"3","doi-asserted-by":"crossref","unstructured":"[3] P. Ekman, \u201cLie catching and microexpressions,\u201d The Philosophy of Deception, Clancy Martin, ed., Chap.7, pp.118-136, Oxford University Press, 2009. 10.1093\/acprof:oso\/9780195327939.003.0008","DOI":"10.1093\/acprof:oso\/9780195327939.003.0008"},{"key":"4","doi-asserted-by":"publisher","unstructured":"[4] S. Porter and L. Ten Brinke, \u201cReading between the lies: Identifying concealed and falsified emotions in universal facial expressions,\u201d Psychol. Sci., vol.19, no.5, pp.508-514, 2008. 10.1111\/j.1467-9280.2008.02116.x","DOI":"10.1111\/j.1467-9280.2008.02116.x"},{"key":"5","doi-asserted-by":"crossref","unstructured":"[5] A. Moilanen, G. Zhao, and M. Pietik\u00e4inen, \u201cSpotting rapid facial movements from videos using appearance-based feature difference analysis,\u201d 2014 22nd International Conference on Pattern Recognition, pp.1722-1727, IEEE, 2014. 10.1109\/ICPR.2014.303","DOI":"10.1109\/ICPR.2014.303"},{"key":"6","doi-asserted-by":"crossref","unstructured":"[6] S. Yin, S. Wu, T. Xu, S. Liu, S. Zhao, and E. Chen, \u201cAu-aware graph convolutional network for macroand micro-expression spotting,\u201d 2023 IEEE International Conference on Multimedia and Expo (ICME), pp.228-233, IEEE, 2023. 10.1109\/icme55011.2023.00047","DOI":"10.1109\/ICME55011.2023.00047"},{"key":"7","doi-asserted-by":"crossref","unstructured":"[7] L. Xue, T. Zhu, and J. Hao, \u201cA two-stage deep neural network for macro-and micro-expression spotting from long-term videos,\u201d 2021 14th International Symposium on Computational Intelligence and Design (ISCID), pp.282-286, IEEE, 2021. 10.1109\/iscid52796.2021.00072","DOI":"10.1109\/ISCID52796.2021.00072"},{"key":"8","doi-asserted-by":"publisher","unstructured":"[8] H.J. Kramer, L.A. Parra, K.H. Lara, P.D. Hastings, and K.H. Lagattuta, \u201cConsistency among social groups in judging emotions across time.,\u201d Emotion, vol.22, no.5, pp.880-893, 2022. 10.1037\/emo0000836","DOI":"10.1037\/emo0000836"},{"key":"9","doi-asserted-by":"publisher","unstructured":"[9] P. Kuppens, Z. Oravecz, and F. Tuerlinckx, \u201cFeelings change: Accounting for individual differences in the temporal dynamics of affect.,\u201d Journal of Personality and Social Psychology, vol.99, no.6, pp.1042-1060, 2010. 10.1037\/a0020962","DOI":"10.1037\/a0020962"},{"key":"10","doi-asserted-by":"publisher","unstructured":"[10] M. Houben, W. Van Den Noortgate, and P. Kuppens, \u201cThe relation between short-term emotion dynamics and psychological well-being: A meta-analysis,\u201d Psychological Bulletin, vol.141, no.4, pp.901-930, 2015. 10.1037\/a0038822","DOI":"10.1037\/a0038822"},{"key":"11","doi-asserted-by":"crossref","unstructured":"[11] S.T. Liong, J. See, K. Wong, A.C. Le Ngo, Y.H. Oh, and R. Phan, \u201cAutomatic apex frame spotting in micro-expression database,\u201d 2015 3rd IAPR Asian conference on pattern recognition (ACPR), pp.665-669, IEEE, 2015. 10.1109\/acpr.2015.7486586","DOI":"10.1109\/ACPR.2015.7486586"},{"key":"12","doi-asserted-by":"crossref","unstructured":"[12] Y. Gan and S.T. Liong, \u201cBi-directional vectors from apex in CNN for micro-expression recognition,\u201d 2018 IEEE 3rd International Conference on Image, Vision and Computing (ICIVC), pp.168-172, IEEE, 2018. 10.1109\/icivc.2018.8492829","DOI":"10.1109\/ICIVC.2018.8492829"},{"key":"13","doi-asserted-by":"crossref","unstructured":"[13] L. Zhou, Q. Mao, and L. Xue, \u201cCross-database micro-expression recognition: A style aggregated and attention transfer approach,\u201d 2019 IEEE International Conference on Multimedia &amp; Expo Workshops (ICMEW), pp.102-107, IEEE, 2019. 10.1109\/icmew.2019.00025","DOI":"10.1109\/ICMEW.2019.00025"},{"key":"14","doi-asserted-by":"crossref","unstructured":"[14] Y. Li, X. Huang, and G. Zhao, \u201cCan micro-expression be recognized based on single apex frame?,\u201d 2018 25th IEEE International Conference on Image Processing (ICIP), pp.3094-3098, IEEE, 2018. 10.1109\/icip.2018.8451376","DOI":"10.1109\/ICIP.2018.8451376"},{"key":"15","doi-asserted-by":"crossref","unstructured":"[15] N. Van Quang, J. Chun, and T. Tokuyama, \u201cCapsulenet for micro-expression recognition,\u201d 2019 14th IEEE International Conference on Automatic Face &amp; Gesture Recognition (FG 2019), pp.1-7, IEEE, 2019. 10.1109\/fg.2019.8756544","DOI":"10.1109\/FG.2019.8756544"},{"key":"16","doi-asserted-by":"crossref","unstructured":"[16] H. Pan, L. Xie, and Z. Wang, \u201cLocal bilinear convolutional neural network for spotting macro-and micro-expression intervals in long video sequences,\u201d 2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020), pp.749-753, IEEE, 2020. 10.1109\/fg47880.2020.00052","DOI":"10.1109\/FG47880.2020.00052"},{"key":"17","doi-asserted-by":"crossref","unstructured":"[17] Z. Tian, C. Shen, H. Chen, and T. He, \u201cFcos: Fully convolutional one-stage object detection,\u201d Proc. IEEE\/CVF International Conference on Computer Vision, pp.9627-9636, 2019. 10.1109\/iccv.2019.00972","DOI":"10.1109\/ICCV.2019.00972"},{"key":"18","doi-asserted-by":"publisher","unstructured":"[18] J. Liu, X. Li, J. Zhang, G. Zhai, Y. Su, Y. Zhang, and B. Wang, \u201cDuration-aware and mode-aware micro-expression spotting for long video sequences,\u201d Signal Processing: Image Communication, vol.129, p.117192, 2024. 10.1016\/j.image.2024.117192","DOI":"10.1016\/j.image.2024.117192"},{"key":"19","doi-asserted-by":"crossref","unstructured":"[19] G.B. Liong, J. See, and L.K. Wong, \u201cShallow optical flow three-stream CNN for macro- and micro-expression spotting from long videos,\u201d 2021 IEEE International Conference on Image Processing (ICIP), pp.2643-2647, IEEE, 2021. 10.1109\/icip42928.2021.9506349","DOI":"10.1109\/ICIP42928.2021.9506349"},{"key":"20","doi-asserted-by":"crossref","unstructured":"[20] M. Bai and R. Goecke, \u201cCan expression sensitivity improve macro-and micro-expression spotting in long videos?,\u201d Proc. 2nd International Workshop on Multimodal and Responsible Affective Computing, pp.30-38, 2024. 10.1145\/3689092.3689396","DOI":"10.1145\/3689092.3689396"},{"key":"21","doi-asserted-by":"crossref","unstructured":"[21] W.W. Yu, J. Jiang, and Y.J. Li, \u201cLSSNet: A two-stream convolutional neural network for spotting macro-and micro-expression in long videos,\u201d Proc. 29th ACM International Conference on Multimedia, pp.4745-4749, 2021. 10.1145\/3474085.3479215","DOI":"10.1145\/3474085.3479215"},{"key":"22","doi-asserted-by":"crossref","unstructured":"[22] J. Carreira and A. Zisserman, \u201cQuo vadis, action recognition? A new model and the kinetics dataset,\u201d Proc. IEEE Conference on Computer Vision and Pattern Recognition, pp.6299-6308, 2017. 10.1109\/cvpr.2017.502","DOI":"10.1109\/CVPR.2017.502"},{"key":"23","doi-asserted-by":"crossref","unstructured":"[23] Z. Zhang, S. Zhao, X. Mao, S. Liu, H. Wang, T. Xu, and E. Chen, \u201cA multi-scale feature learning network with optical flow correction for micro-and macro-expression spotting,\u201d Proc. 32nd ACM International Conference on Multimedia, pp.11497-11502, 2024. 10.1145\/3664647.3689143","DOI":"10.1145\/3664647.3689143"},{"key":"24","doi-asserted-by":"publisher","unstructured":"[24] X. Guo, X. Zhang, L. Li, and Z. Xia, \u201cMicro-expression spotting with multi-scale local transformer in long videos,\u201d Pattern Recognition Letters, vol.168, pp.146-152, 2023. 10.1016\/j.patrec.2023.03.012","DOI":"10.1016\/j.patrec.2023.03.012"},{"key":"25","doi-asserted-by":"crossref","unstructured":"[25] X. He, X. Wu, J. Peng, Q. Li, X. Ma, and Y. He, \u201cBidirectional cross-scale feature fusion for long video micro-expression 3D spotting network,\u201d 2022 IEEE 21st International Conference on Cognitive Informatics &amp; Cognitive Computing (ICCI* CC), pp.110-115, IEEE, 2022. 10.1109\/ICCICC57084.2022.10101555","DOI":"10.1109\/ICCICC57084.2022.10101555"},{"key":"26","doi-asserted-by":"publisher","unstructured":"[26] S.J. Wang, Y. He, J. Li, and X. Fu, \u201cMesnet: A convolutional neural network for spotting multi-scale micro-expression intervals in long videos,\u201d IEEE Trans. Image Process., vol.30, pp.3956-3969, 2021. 10.1109\/tip.2021.3064258","DOI":"10.1109\/TIP.2021.3064258"},{"key":"27","doi-asserted-by":"crossref","unstructured":"[27] Y.W. Chao, S. Vijayanarasimhan, B. Seybold, D.A. Ross, J. Deng, and R. Sukthankar, \u201cRethinking the faster R-CNN architecture for temporal action localization,\u201d Proc. IEEE Conference on Computer Vision and Pattern Recognition, pp.1130-1139, 2018. 10.1109\/cvpr.2018.00124","DOI":"10.1109\/CVPR.2018.00124"},{"key":"28","doi-asserted-by":"crossref","unstructured":"[28] X. Dai, B. Singh, G. Zhang, L.S. Davis, and Y. Qiu Chen, \u201cTemporal context network for activity localization in videos,\u201d Proc. IEEE International Conference on Computer Vision, pp.5793-5802, 2017. 10.1109\/iccv.2017.610","DOI":"10.1109\/ICCV.2017.610"},{"key":"29","doi-asserted-by":"crossref","unstructured":"[29] Y. Zhao, Y. Xiong, L. Wang, Z. Wu, X. Tang, and D. Lin, \u201cTemporal action detection with structured segment networks,\u201d Proc. IEEE International Conference on Computer Vision (ICCV), pp.2914-2923, 2017. 10.1109\/iccv.2017.317","DOI":"10.1109\/ICCV.2017.317"},{"key":"30","doi-asserted-by":"publisher","unstructured":"[30] F. Qu, S.J. Wang, W.J. Yan, H. Li, S. Wu, and X. Fu, \u201cCAS(ME)<sup>2<\/sup>: A database for spontaneous macro-expression and micro-expression spotting and recognition,\u201d IEEE Trans. Affective Comput., vol.9, no.4, pp.424-436, 2017. 10.1109\/taffc.2017.2654440","DOI":"10.1109\/TAFFC.2017.2654440"},{"key":"31","doi-asserted-by":"crossref","unstructured":"[31] C.H. Yap, C. Kendrick, and M.H. Yap, \u201cSAMM long videos: A spontaneous facial micro-and macro-expressions dataset,\u201d 2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020), pp.771-776, IEEE, 2020. 10.1109\/fg47880.2020.00029","DOI":"10.1109\/FG47880.2020.00029"},{"key":"32","doi-asserted-by":"crossref","unstructured":"[33] Y. He, S.J. Wang, J. Li, and M.H. Yap, \u201cSpotting macro- and micro-expression intervals in long video sequences,\u201d 2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020), pp.742-748, IEEE, 2020. 10.1109\/fg47880.2020.00036","DOI":"10.1109\/FG47880.2020.00036"},{"key":"33","doi-asserted-by":"crossref","unstructured":"[34] L.W. Zhang, J. Li, S.J. Wang, X.H. Duan, W.J. Yan, H.Y. Xie, and S.C. Huang, \u201cSpatio-temporal fusion for macro- and micro-expression spotting in long video sequences,\u201d 2020 15th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2020), pp.734-741, IEEE, 2020. 10.1109\/fg47880.2020.00037","DOI":"10.1109\/FG47880.2020.00037"},{"key":"34","doi-asserted-by":"crossref","unstructured":"[35] C.H. Yap, M.H. Yap, A. Davison, C. Kendrick, J. Li, S.J. Wang, and R. Cunningham, \u201c3D-CNN for facial micro- and macro-expression spotting on long video sequences using temporal oriented reference frame,\u201d Proc. 30th ACM International Conference on Multimedia, pp.7016-7020, 2022. 10.1145\/3503161.3551570","DOI":"10.1145\/3503161.3551570"},{"key":"35","doi-asserted-by":"publisher","unstructured":"[36] B. Yang, J. Wu, K. Ikeda, G. Hattori, M. Sugano, Y. Iwasawa, and Y. Matsuo, \u201cDeep learning pipeline for spotting macro- and micro-expressions in long video sequences based on action units and optical flow,\u201d Pattern Recognition Letters, vol.165, pp.63-74, 2023. 10.1016\/j.patrec.2022.12.001","DOI":"10.1016\/j.patrec.2022.12.001"},{"key":"36","doi-asserted-by":"crossref","unstructured":"[37] G.B. Liong, S.T. Liong, J. See, and C.S. Chan, \u201cMTSN: A multi-temporal stream network for spotting facial macro- and micro-expression with hard and soft pseudo-labels,\u201d Proc. 2nd Workshop on Facial Micro-Expression: Advanced Techniques for Multi-Modal Facial Expression Analysis, pp.3-10, 2022. 10.1145\/3552465.3555040","DOI":"10.1145\/3552465.3555040"}],"container-title":["IEICE Transactions on Fundamentals of Electronics, Communications and Computer Sciences"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transfun\/E108.A\/11\/E108.A_2024EAP1128\/_pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T03:39:25Z","timestamp":1761968365000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transfun\/E108.A\/11\/E108.A_2024EAP1128\/_article"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,1]]},"references-count":36,"journal-issue":{"issue":"11","published-print":{"date-parts":[[2025]]}},"URL":"https:\/\/doi.org\/10.1587\/transfun.2024eap1128","relation":{},"ISSN":["0916-8508","1745-1337"],"issn-type":[{"type":"print","value":"0916-8508"},{"type":"electronic","value":"1745-1337"}],"subject":[],"published":{"date-parts":[[2025,11,1]]},"article-number":"2024EAP1128"}}