{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T14:49:13Z","timestamp":1774968553324,"version":"3.50.1"},"reference-count":63,"publisher":"MDPI AG","issue":"8","license":[{"start":{"date-parts":[[2024,4,18]],"date-time":"2024-04-18T00:00:00Z","timestamp":1713398400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"National Natural Science Foundation of China","award":["62077022"],"award-info":[{"award-number":["62077022"]}]},{"name":"National Natural Science Foundation of China","award":["62277041"],"award-info":[{"award-number":["62277041"]}]},{"name":"National Natural Science Foundation of China","award":["62077020"],"award-info":[{"award-number":["62077020"]}]},{"name":"National Natural Science Foundation of China","award":["62173286"],"award-info":[{"award-number":["62173286"]}]},{"name":"National Natural Science Foundation of China","award":["62211530433"],"award-info":[{"award-number":["62211530433"]}]},{"name":"National Natural Science Foundation of China","award":["62177018"],"award-info":[{"award-number":["62177018"]}]},{"name":"National Natural Science Foundation of China","award":["CCNUTEIII 2021-21"],"award-info":[{"award-number":["CCNUTEIII 2021-21"]}]},{"name":"National Natural Science Foundation of China","award":["20232BAB212026"],"award-info":[{"award-number":["20232BAB212026"]}]},{"name":"National Natural Science Foundation of China","award":["2022CFB971"],"award-info":[{"award-number":["2022CFB971"]}]},{"name":"National Natural Science Foundation of China","award":["JXJG-23-27-6"],"award-info":[{"award-number":["JXJG-23-27-6"]}]},{"name":"Research Project of National Collaborative Innovation Experimental Base for Teacher Development of Central China Normal University","award":["62077022"],"award-info":[{"award-number":["62077022"]}]},{"name":"Research Project of National Collaborative Innovation Experimental Base for Teacher Development of Central China Normal University","award":["62277041"],"award-info":[{"award-number":["62277041"]}]},{"name":"Research Project of National Collaborative Innovation Experimental Base for Teacher Development of Central China Normal University","award":["62077020"],"award-info":[{"award-number":["62077020"]}]},{"name":"Research Project of National Collaborative Innovation Experimental Base for Teacher Development of Central China Normal University","award":["62173286"],"award-info":[{"award-number":["62173286"]}]},{"name":"Research Project of National Collaborative Innovation Experimental Base for Teacher Development of Central China Normal University","award":["62211530433"],"award-info":[{"award-number":["62211530433"]}]},{"name":"Research Project of National Collaborative Innovation Experimental Base for Teacher Development of Central China Normal University","award":["62177018"],"award-info":[{"award-number":["62177018"]}]},{"name":"Research Project of National Collaborative Innovation Experimental Base for Teacher Development of Central China Normal University","award":["CCNUTEIII 2021-21"],"award-info":[{"award-number":["CCNUTEIII 2021-21"]}]},{"name":"Research Project of National Collaborative Innovation Experimental Base for Teacher Development of Central China Normal University","award":["20232BAB212026"],"award-info":[{"award-number":["20232BAB212026"]}]},{"name":"Research Project of National Collaborative Innovation Experimental Base for Teacher Development of Central China Normal University","award":["2022CFB971"],"award-info":[{"award-number":["2022CFB971"]}]},{"name":"Research Project of National Collaborative Innovation Experimental Base for Teacher Development of Central China Normal University","award":["JXJG-23-27-6"],"award-info":[{"award-number":["JXJG-23-27-6"]}]},{"name":"Jiangxi Provincial Natural Science Foundation","award":["62077022"],"award-info":[{"award-number":["62077022"]}]},{"name":"Jiangxi Provincial Natural Science Foundation","award":["62277041"],"award-info":[{"award-number":["62277041"]}]},{"name":"Jiangxi Provincial Natural Science Foundation","award":["62077020"],"award-info":[{"award-number":["62077020"]}]},{"name":"Jiangxi Provincial Natural Science Foundation","award":["62173286"],"award-info":[{"award-number":["62173286"]}]},{"name":"Jiangxi Provincial Natural Science Foundation","award":["62211530433"],"award-info":[{"award-number":["62211530433"]}]},{"name":"Jiangxi Provincial Natural Science Foundation","award":["62177018"],"award-info":[{"award-number":["62177018"]}]},{"name":"Jiangxi Provincial Natural Science Foundation","award":["CCNUTEIII 2021-21"],"award-info":[{"award-number":["CCNUTEIII 2021-21"]}]},{"name":"Jiangxi Provincial Natural Science Foundation","award":["20232BAB212026"],"award-info":[{"award-number":["20232BAB212026"]}]},{"name":"Jiangxi Provincial Natural Science Foundation","award":["2022CFB971"],"award-info":[{"award-number":["2022CFB971"]}]},{"name":"Jiangxi Provincial Natural Science Foundation","award":["JXJG-23-27-6"],"award-info":[{"award-number":["JXJG-23-27-6"]}]},{"name":"National Natural Science Foundation of Hubei Province","award":["62077022"],"award-info":[{"award-number":["62077022"]}]},{"name":"National Natural Science Foundation of Hubei Province","award":["62277041"],"award-info":[{"award-number":["62277041"]}]},{"name":"National Natural Science Foundation of Hubei Province","award":["62077020"],"award-info":[{"award-number":["62077020"]}]},{"name":"National Natural Science Foundation of Hubei Province","award":["62173286"],"award-info":[{"award-number":["62173286"]}]},{"name":"National Natural Science Foundation of Hubei Province","award":["62211530433"],"award-info":[{"award-number":["62211530433"]}]},{"name":"National Natural Science Foundation of Hubei Province","award":["62177018"],"award-info":[{"award-number":["62177018"]}]},{"name":"National Natural Science Foundation of Hubei Province","award":["CCNUTEIII 2021-21"],"award-info":[{"award-number":["CCNUTEIII 2021-21"]}]},{"name":"National Natural Science Foundation of Hubei Province","award":["20232BAB212026"],"award-info":[{"award-number":["20232BAB212026"]}]},{"name":"National Natural Science Foundation of Hubei Province","award":["2022CFB971"],"award-info":[{"award-number":["2022CFB971"]}]},{"name":"National Natural Science Foundation of Hubei Province","award":["JXJG-23-27-6"],"award-info":[{"award-number":["JXJG-23-27-6"]}]},{"name":"university teaching reform research project of Jiangxi Province","award":["62077022"],"award-info":[{"award-number":["62077022"]}]},{"name":"university teaching reform research project of Jiangxi Province","award":["62277041"],"award-info":[{"award-number":["62277041"]}]},{"name":"university teaching reform research project of Jiangxi Province","award":["62077020"],"award-info":[{"award-number":["62077020"]}]},{"name":"university teaching reform research project of Jiangxi Province","award":["62173286"],"award-info":[{"award-number":["62173286"]}]},{"name":"university teaching reform research project of Jiangxi Province","award":["62211530433"],"award-info":[{"award-number":["62211530433"]}]},{"name":"university teaching reform research project of Jiangxi Province","award":["62177018"],"award-info":[{"award-number":["62177018"]}]},{"name":"university teaching reform research project of Jiangxi Province","award":["CCNUTEIII 2021-21"],"award-info":[{"award-number":["CCNUTEIII 2021-21"]}]},{"name":"university teaching reform research project of Jiangxi Province","award":["20232BAB212026"],"award-info":[{"award-number":["20232BAB212026"]}]},{"name":"university teaching reform research project of Jiangxi Province","award":["2022CFB971"],"award-info":[{"award-number":["2022CFB971"]}]},{"name":"university teaching reform research project of Jiangxi Province","award":["JXJG-23-27-6"],"award-info":[{"award-number":["JXJG-23-27-6"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["Sensors"],"abstract":"<jats:p>Teaching gesture recognition is a technique used to recognize the hand movements of teachers in classroom teaching scenarios. This technology is widely used in education, including for classroom teaching evaluation, enhancing online teaching, and assisting special education. However, current research on gesture recognition in teaching mainly focuses on detecting the static gestures of individual students and analyzing their classroom behavior. To analyze the teacher\u2019s gestures and mitigate the difficulty of single-target dynamic gesture recognition in multi-person teaching scenarios, this paper proposes skeleton-based teaching gesture recognition (ST-TGR), which learns through spatio-temporal representation. This method mainly uses the human pose estimation technique RTMPose to extract the coordinates of the keypoints of the teacher\u2019s skeleton and then inputs the recognized sequence of the teacher\u2019s skeleton into the MoGRU action recognition network for classifying gesture actions. The MoGRU action recognition module mainly learns the spatio-temporal representation of target actions by stacking a multi-scale bidirectional gated recurrent unit (BiGRU) and using improved attention mechanism modules. To validate the generalization of the action recognition network model, we conducted comparative experiments on datasets including NTU RGB+D 60, UT-Kinect Action3D, SBU Kinect Interaction, and Florence 3D. The results indicate that, compared with most existing baseline models, the model proposed in this article exhibits better performance in recognition accuracy and speed.<\/jats:p>","DOI":"10.3390\/s24082589","type":"journal-article","created":{"date-parts":[[2024,4,18]],"date-time":"2024-04-18T07:55:54Z","timestamp":1713426954000},"page":"2589","update-policy":"https:\/\/doi.org\/10.3390\/mdpi_crossmark_policy","source":"Crossref","is-referenced-by-count":21,"title":["ST-TGR: Spatio-Temporal Representation Learning for Skeleton-Based Teaching Gesture Recognition"],"prefix":"10.3390","volume":"24","author":[{"given":"Zengzhao","family":"Chen","sequence":"first","affiliation":[{"name":"Faculty of Artificial Intelligence in Education, Central China Normal University, Wuhan 430079, China"},{"name":"National Engineering Research Center for E-Learning, Central China Normal University, Wuhan 430079, China"}]},{"given":"Wenkai","family":"Huang","sequence":"additional","affiliation":[{"name":"Faculty of Artificial Intelligence in Education, Central China Normal University, Wuhan 430079, China"}]},{"given":"Hai","family":"Liu","sequence":"additional","affiliation":[{"name":"Faculty of Artificial Intelligence in Education, Central China Normal University, Wuhan 430079, China"},{"name":"National Engineering Research Center for E-Learning, Central China Normal University, Wuhan 430079, China"}]},{"given":"Zhuo","family":"Wang","sequence":"additional","affiliation":[{"name":"Faculty of Artificial Intelligence in Education, Central China Normal University, Wuhan 430079, China"}]},{"given":"Yuqun","family":"Wen","sequence":"additional","affiliation":[{"name":"Faculty of Literature and Journalism, Xiangtan University, Xiangtan 411105, China"}]},{"given":"Shengming","family":"Wang","sequence":"additional","affiliation":[{"name":"National Engineering Research Center of Big Data, Center China Normal University, Wuhan 430079, China"}]}],"member":"1968","published-online":{"date-parts":[[2024,4,18]]},"reference":[{"key":"ref_1","doi-asserted-by":"crossref","first-page":"27","DOI":"10.1186\/s41235-020-00215-8","article-title":"Gesture during math instruction specifically benefits learners with high visuospatial working memory capacity","volume":"5","author":"Aldugom","year":"2020","journal-title":"Cogn. Res. Princ. Implic."},{"key":"ref_2","doi-asserted-by":"crossref","unstructured":"Ali, N.M., and Ali, M.S.M. (2019, January 9\u201311). Evaluation of Students\u2019 Acceptance of the Leap Motion Hand Gesture Application in Teaching Biochemistry. Proceedings of the 2019 2nd International Conference on new Trends in Computing Sciences (ICTCS), Amman, Jordan.","DOI":"10.1109\/ICTCS.2019.8923107"},{"key":"ref_3","doi-asserted-by":"crossref","first-page":"e12664","DOI":"10.1111\/desc.12664","article-title":"Gesture helps learners learn, but not merely by guiding their visual attention","volume":"21","author":"Wakefield","year":"2018","journal-title":"Dev. Sci."},{"key":"ref_4","doi-asserted-by":"crossref","unstructured":"Gu, Y., Hu, J., Zhou, Y., and Lu, L. (2020, January 10\u201313). Online Teaching Gestures Recognition Model Based on Deep Learning. Proceedings of the 2020 International Conference on Networking and Network Applications (NaNA), Haikou City, China.","DOI":"10.1109\/NaNA51271.2020.00076"},{"key":"ref_5","doi-asserted-by":"crossref","unstructured":"Qin, W., Mei, X., Chen, Y., Zhang, Q., Yao, Y., and Hu, S. (2021, January 3\u20134). Sign Language Recognition and Translation Method based on VTN. Proceedings of the 2021 International Conference on Digital Society and Intelligent Systems (DSInS), Chengdu, China.","DOI":"10.1109\/DSInS54396.2021.9670588"},{"key":"ref_6","doi-asserted-by":"crossref","first-page":"93785","DOI":"10.1109\/ACCESS.2022.3204110","article-title":"An Efficient Two-Stream Network for Isolated Sign Language Recognition Using Accumulative Video Motion","volume":"10","author":"Luqman","year":"2022","journal-title":"IEEE Access"},{"key":"ref_7","first-page":"124","article-title":"EHPE: Skeleton Cues-based Gaussian Coordinate Encoding for Efficient Human Pose Estimation","volume":"24","author":"Liu","year":"2024","journal-title":"IEEE Trans. Multimed."},{"key":"ref_8","doi-asserted-by":"crossref","unstructured":"Guo, X., Xu, W., Tang, W., and Wen, C. (2019, January 24\u201326). Research on Optimization of Static Gesture Recognition Based on Convolution Neural Network. Proceedings of the 2019 4th International Conference on Mechanical, Control and Computer Engineering (ICMCCE), Hohhot, China.","DOI":"10.1109\/ICMCCE48743.2019.00095"},{"key":"ref_9","unstructured":"Li, J., and Li, Z. (2021). Dynamic gesture recognition algorithm Combining Global Gesture Motion and Local Finger Motion for interactive teaching. IEEE Access, 1."},{"key":"ref_10","doi-asserted-by":"crossref","first-page":"4361","DOI":"10.1109\/TII.2021.3128240","article-title":"EDMF: Efficient Deep Matrix Factorization with Review Feature Learning for Industrial Recommender System","volume":"18","author":"Liu","year":"2022","journal-title":"IEEE Trans. Ind. Inform."},{"key":"ref_11","doi-asserted-by":"crossref","unstructured":"Mcbride, T., Vandayar, N., and Nixon, K. (2019, January 28\u201330). A Comparison of Skin Detection Algorithms for Hand Gesture Recognition. Proceedings of the Southern African Universities Power Engineering Conference\/Robotics Mechatronics\/Pattern Recognition Association of South Africa, Bloemfontein, South Africa.","DOI":"10.1109\/RoboMech.2019.8704839"},{"key":"ref_12","doi-asserted-by":"crossref","first-page":"50547","DOI":"10.1109\/ACCESS.2023.3278100","article-title":"Dynamic Gesture Recognition Based on Three-Stream Coordinate Attention Network and Knowledge Distillation","volume":"11","author":"Wan","year":"2023","journal-title":"IEEE Access"},{"key":"ref_13","unstructured":"Mian, L., and Jiping, Z. (2019). Research on future Intelligent Classroom Teaching System Design\u2014Using Gesture Recognition as Technical Support. China Electron. Educ., 14\u201321."},{"key":"ref_14","doi-asserted-by":"crossref","unstructured":"Li, W., Wen, L., Chang, M.C., Lim, S.N., and Lyu, S. (2017, January 22\u201329). Adaptive RNN tree for large-scale human action recognition. Proceedings of the 2017 IEEE International Conference on Computer Vision (ICCV), Venice, Italy.","DOI":"10.1109\/ICCV.2017.161"},{"key":"ref_15","doi-asserted-by":"crossref","unstructured":"Gao, Y., Li, C., Li, S., Cai, X., Ye, M., and Yuan, H. (2022). Variable Rate Independently Recurrent Neural Network (IndRNN) for Action Recognition. Appl. Sci., 12.","DOI":"10.3390\/app12073281"},{"key":"ref_16","doi-asserted-by":"crossref","unstructured":"Ryumin, D., Ivanko, D., and Ryumina, E. (2023). Audio-Visual Speech and Gesture Recognition by Sensors of Mobile Devices. Sensors, 23.","DOI":"10.3390\/s23042284"},{"key":"ref_17","doi-asserted-by":"crossref","unstructured":"Tu, J.H., Liu, M.Y., and Liu, H. (2018, January 23\u201327). Skeleton-based human action recognition using spatial temporal 3d convolutional neural networks. Proceedings of the 2018 IEEE International Conference on Multimedia and Expo (ICME), San Diego, CA, USA.","DOI":"10.1109\/ICME.2018.8486566"},{"key":"ref_18","first-page":"6650632","article-title":"3D skeletal human action recognition using a CNN fusion model","volume":"2021","author":"Li","year":"2021","journal-title":"Math. Probl. Eng."},{"key":"ref_19","unstructured":"Bruna, J., Zaremba, W., Szlam, A., and Lecun, Y. (2014, January 14\u201316). Spectral Networks and Locally Connected Networks on Graphs. Proceedings of the International Conference on Learning Representations (ICLR2014), CBLS, Banff, AB, Canada."},{"key":"ref_20","unstructured":"Yan, S., Xiong, Y., and Lin, D. (, January 2\u20137). Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition. Proceedings of the AAAI\u201918: AAAI Conference on Artificial Intelligence, New Orleans, LA, USA."},{"key":"ref_21","doi-asserted-by":"crossref","unstructured":"Chen, Y., Zhang, Z., and Yuan, C. (2021, January 11\u201317). Channel-wise topology refinement graph convolution for skeleton-based action recognition. Proceedings of the 18th IEEE\/CVF International Conference on Computer Vision (ICCV), Montreal, BC, Canada.","DOI":"10.1109\/ICCV48922.2021.01311"},{"key":"ref_22","doi-asserted-by":"crossref","unstructured":"Chi, H.G., Ha, M.H., Chi, S., Lee, S.W., Huang, Q., and Ramani, K. (2022, January 18\u201324). InfoGCN: Representation learning for human skeleton-based action recognition. Proceedings of the 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), New Orleans, LA, USA.","DOI":"10.1109\/CVPR52688.2022.01955"},{"key":"ref_23","doi-asserted-by":"crossref","unstructured":"Jiang, S., Sun, B., Wang, L., Bai, Y., Li, K., and Fu, Y. (2021, January 19\u201325). Skeleton Aware Multi-modal Sign Language Recognition. Proceedings of the 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), Nashville, TN, USA.","DOI":"10.1109\/CVPRW53098.2021.00380"},{"key":"ref_24","doi-asserted-by":"crossref","unstructured":"Liu, T., Liu, H., Yang, B., and Zhang, Z. (2024). LDCNet: Limb Direction Cues-aware Network for Flexible Human Pose Estimation in Industrial Behavioral Biometrics Systems. IEEE Trans. Ind. Inform., 1\u201311.","DOI":"10.1109\/TII.2023.3266366"},{"key":"ref_25","doi-asserted-by":"crossref","unstructured":"Liu, T., Li, Y., Liu, H., Zhang, Z., and Liu, S. (2023). RISIR: Rapid Infrared Spectral Imaging Restoration Model for Industrial Material Detection in Intelligent Video Systems. IEEE Trans. Ind. Inform., 1.","DOI":"10.1109\/TII.2019.2930463"},{"key":"ref_26","doi-asserted-by":"crossref","first-page":"2449","DOI":"10.1109\/TMM.2021.3081873","article-title":"MFDNet: Collaborative Poses Perception and Matrix Fisher Distribution for Head Pose Estimation","volume":"24","author":"Liu","year":"2022","journal-title":"IEEE Trans. Multimed."},{"key":"ref_27","doi-asserted-by":"crossref","unstructured":"Fang, H.S., Xie, S., Tai, Y.W., and Lu, C. (2017, January 22\u201329). RMPE: Regional Multi-Person Pose Estimation. Proceedings of the IEEE International Conference on Computer Vision (ICCV), Venice, Italy.","DOI":"10.1109\/ICCV.2017.256"},{"key":"ref_28","doi-asserted-by":"crossref","unstructured":"Xiao, B., Wu, H., and Wei, Y. (2018, January 8\u201314). Simple Baselines for Human Pose Estimation and Tracking. Proceedings of the Computer Vision\u2014ECCV 2018, Munich, Germany. Lecture Notes in Computer Science.","DOI":"10.1007\/978-3-030-01231-1_29"},{"key":"ref_29","doi-asserted-by":"crossref","first-page":"3349","DOI":"10.1109\/TPAMI.2020.2983686","article-title":"Deep High-Resolution Representation Learning for Visual Recognition","volume":"43","author":"Wang","year":"2021","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"ref_30","doi-asserted-by":"crossref","unstructured":"Cao, Z., Hidalgo, G., Simon, T., Wei, S.E., and Sheikh, Y. (2017, January 21\u201326). OpenPose: Realtime Multi-Person 2D Pose Estimation Using Part Affinity Fields. Proceedings of the 2017 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Honolulu, HI, USA.","DOI":"10.1109\/CVPR.2017.143"},{"key":"ref_31","doi-asserted-by":"crossref","unstructured":"Nie, X., Feng, J., Xing, J., and Yan, S. (2018, January 8\u201314). Pose Partition Networks for Multi-Person Pose Estimation. Proceedings of the European Conference on Computer Vision (ECCV), Munich, Germany.","DOI":"10.1007\/978-3-030-01228-1_42"},{"key":"ref_32","doi-asserted-by":"crossref","unstructured":"Kreiss, S., Bertoni, L., and Alahi, A. (2019, January 15\u201320). PifPaf: Composite Fields for Human Pose Estimation. Proceedings of the 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Long Beach, CA, USA.","DOI":"10.1109\/CVPR.2019.01225"},{"key":"ref_33","doi-asserted-by":"crossref","first-page":"6289","DOI":"10.1109\/TIP.2023.3331309","article-title":"Orientation Cues-Aware Facial Relationship Representation for Head Pose Estimation via Transformer","volume":"32","author":"Liu","year":"2023","journal-title":"IEEE Trans. Image Process."},{"key":"ref_34","unstructured":"Girdhar, R., and Ramanan, D. (2017, January 4\u20139). Attentional pooling for action recognition. Proceedings of the 31st International Conference on Neural Information Processing Systems (NIPS), Long Beach, CA, USA."},{"key":"ref_35","unstructured":"Ishikawa, H., Liu, C., Pajdla, T., and Shi, J. (December, January 30). Decoupled Spatial-Temporal Attention Network for Skeleton-Based Action-Gesture Recognition. Proceedings of the Computer Vision\u2014ACCV, Kyoto, Japan. Lecture Notes in Computer Science."},{"key":"ref_36","doi-asserted-by":"crossref","unstructured":"Wang, Z., She, Q., and Smolic, A. (2021, January 20\u201325). ACTION-Net: Multipath Excitation for Action Recognition. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR),  Nashville, TN, USA.","DOI":"10.1109\/CVPR46437.2021.01301"},{"key":"ref_37","unstructured":"Jiang, T., Lu, P., Zhang, L., Ma, N., Han, R., Lyu, C., Li, Y., and Chen, K. (2023, January 17\u201324). RTMPose: Real-Time Multi-Person Pose Estimation based on MMPose. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Vancouver, BC, Canada."},{"key":"ref_38","unstructured":"Bazarevsky, V., Grishchenko, I., Raveendran, K., Zhu, T.L., Zhang, F., and Grundmann, M. (2020). BlazePose: On-device Real-time Body Pose tracking. arXiv."},{"key":"ref_39","doi-asserted-by":"crossref","unstructured":"Li, Y., Zhang, S., Wang, Z., Yang, S., Yang, W., Xia, S.T., and Zhou, E. (2021, January 10\u201317). TokenPose: Learning Keypoint Tokens for Human Pose Estimation. Proceedings of the 2021 IEEE\/CVF International Conference on Computer Vision (ICCV), Montreal, QC, Canada.","DOI":"10.1109\/ICCV48922.2021.01112"},{"key":"ref_40","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Dollar, P., and Zitnick, L. (2014, January 6\u201312). Microsoft COCO: Common Objects in Context. Proceedings of the ECCV, ECCV Ed. European Conference on Computer Vision, Zurich, Switzerland.","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref_41","doi-asserted-by":"crossref","unstructured":"Shahroudy, A., Liu, J., Ng, T.T., and Wang, G. (2016, January 27\u201330). NTU RGB+D: A large scale dataset for 3D human activity analysis. Proceedings of the 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Las Vegas, NV, USA.","DOI":"10.1109\/CVPR.2016.115"},{"key":"ref_42","doi-asserted-by":"crossref","unstructured":"Yun, K., Honorio, J., Chattopadhyay, D., Berg, T.L., and Samaras, D. (2012, January 16\u201321). Two-person interaction detection using body-pose features and multiple instance learning. Proceedings of the 2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops (ICCVW), Providence, RI, USA.","DOI":"10.1109\/CVPRW.2012.6239234"},{"key":"ref_43","doi-asserted-by":"crossref","unstructured":"Xia, L., Chen, C.C., and Aggarwal, J.K. (2012, January 16\u201321). View invariant human action recognition using histograms of 3D joints. Proceedings of the 2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops (ICCVW), Providence, RI, USA.","DOI":"10.1109\/CVPRW.2012.6239233"},{"key":"ref_44","doi-asserted-by":"crossref","unstructured":"Seidenari, L., Varano, V., Berrett, S., Bimbo, A., and Pala, P. (2013, January 23\u201328). Recognizing actions from depth cameras as weakly aligned multi-part bag-of-poses. Proceedings of the 2013 IEEE Conference on Computer Vision and Pattern Recognition Workshops (ICCVW), Portland, OR, USA.","DOI":"10.1109\/CVPRW.2013.77"},{"key":"ref_45","doi-asserted-by":"crossref","unstructured":"Kim, T.S., and Reiter, A. (2017, January 21\u201326). Interpretable 3D Human Action Analysis with Temporal Convolutional Networks. Proceedings of the 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), Honolulu, HI, USA.","DOI":"10.1109\/CVPRW.2017.207"},{"key":"ref_46","doi-asserted-by":"crossref","unstructured":"Vemulapalli, R., Arrate, F., and Chellappa, R. (2014, January 23\u201328). Human action recognition by representing 3d skeletons as points in a lie group. Proceedings of the 2014 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Columbus, OH, USA.","DOI":"10.1109\/CVPR.2014.82"},{"key":"ref_47","doi-asserted-by":"crossref","first-page":"648","DOI":"10.1109\/TPAMI.2021.3107160","article-title":"Tensor Representations for Action Recognition","volume":"44","author":"Koniusz","year":"2022","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"ref_48","doi-asserted-by":"crossref","unstructured":"Zhang, P., Lan, C., Xing, J., Zeng, W., Xue, J., and Zheng, N. (2017, January 22\u201329). View adaptive recurrent neural networks for high performance human action recognition from skeleton data. Proceedings of the 2017 IEEE International Conference on Computer Vision (ICCV), Venice, Italy.","DOI":"10.1109\/ICCV.2017.233"},{"key":"ref_49","doi-asserted-by":"crossref","unstructured":"Maghoumi, M., and LaViola, J.J. (2019, January 7\u20139). DeepGRU: Deep Gesture Recognition Utility. Proceedings of the Advances in Visual Computing, Lake Tahoe, NV, USA.","DOI":"10.1007\/978-3-030-33720-9_2"},{"key":"ref_50","doi-asserted-by":"crossref","unstructured":"Luvizon, D., Picard, D., and Tabia, H. (2018, January 18\u201322). 2d\/3d pose estimation and action recognition using multitask deep learning. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Salt Lake City, UT, USA.","DOI":"10.1109\/CVPR.2018.00539"},{"key":"ref_51","doi-asserted-by":"crossref","unstructured":"Baradel, F., Wolf, C., Mille, J., and Taylor, G.W. (2018, January 18\u201322). Glimpse clouds: Human activity recognition from unstructured feature points. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Salt Lake City, UT, USA.","DOI":"10.1109\/CVPR.2018.00056"},{"key":"ref_52","doi-asserted-by":"crossref","first-page":"109528","DOI":"10.1016\/j.patcog.2023.109528","article-title":"Continual spatio-temporal graph convolutional networks","volume":"140","author":"Hedegaard","year":"2023","journal-title":"Pattern Recognit."},{"key":"ref_53","doi-asserted-by":"crossref","unstructured":"Lin, L., Zhang, J., and Liu, J. (2023, January 17\u201324). Actionlet-Dependent Contrastive Learning for Unsupervised Skeleton-Based Action Recognition. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Vancouver, BC, Canada.","DOI":"10.1109\/CVPR52729.2023.00234"},{"key":"ref_54","doi-asserted-by":"crossref","unstructured":"Ding, X., Yang, K., and Chen, W. (2020, January 6\u20139). A Semantics-Guided Graph Convolutional Network for Skeleton-Based Action Recognition. Proceedings of the 2020 the 4th International Conference on Innovation in Artificial Intelligence (ICIAI), Xiamen, China.","DOI":"10.1145\/3390557.3394129"},{"key":"ref_55","doi-asserted-by":"crossref","first-page":"1915","DOI":"10.1109\/TCSVT.2020.3015051","article-title":"Richly Activated Graph Convolutional Network for Robust Skeleton-Based Action Recognition","volume":"31","author":"Song","year":"2021","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"ref_56","doi-asserted-by":"crossref","first-page":"10040","DOI":"10.1109\/ACCESS.2020.2964115","article-title":"PGCN-TCA: Pseudo Graph Convolutional Network with Temporal and Channel-Wise Attention for Skeleton-Based Action Recognition","volume":"8","author":"Yang","year":"2020","journal-title":"IEEE Access"},{"key":"ref_57","unstructured":"Baradel, F., Wolf, C., and Mille, J. (2017). Pose-conditioned spatio-temporal attention for human action recognition. arXiv."},{"key":"ref_58","doi-asserted-by":"crossref","unstructured":"Liu, J., Shahroudy, A., Xu, D., and Wang, G. (2016, January 11\u201314). Spatio-temporal lstm with trust gates for 3d human action recognition. Proceedings of the Computer Vision\u2014ECCV 2016, Amsterdam, The Netherlands.","DOI":"10.1007\/978-3-319-46487-9_50"},{"key":"ref_59","doi-asserted-by":"crossref","first-page":"363","DOI":"10.1109\/TMM.2018.2859620","article-title":"Attention based multiview re-observation fusion network for skeletal action recognition","volume":"21","author":"Fan","year":"2018","journal-title":"IEEE Trans. Multimed."},{"key":"ref_60","doi-asserted-by":"crossref","unstructured":"Anirudh, R., Turaga, P., Su, J., and Srivastava, A. (2015, January 7\u201312). Elastic functional coding of human actions: From vector-fields to latent variables. Proceedings of the 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Boston, MA, USA.","DOI":"10.1109\/CVPR.2015.7298934"},{"key":"ref_61","doi-asserted-by":"crossref","first-page":"155","DOI":"10.1016\/j.cviu.2016.04.005","article-title":"R3DG features: Relative 3D geometry-based skeletal representations for human action recognition","volume":"152","author":"Vemulapalli","year":"2016","journal-title":"Comput. Vis. Image Underst."},{"key":"ref_62","doi-asserted-by":"crossref","unstructured":"Paoletti, G., Cavazza, J., Beyan, C., and Bue, A.D. (2021, January 10\u201315). Subspace Clustering for Action Recognition with Covariance Representations and Temporal Pruning. Proceedings of the 2020 25th International Conference on Pattern Recognition (ICPR), Milan, Italy.","DOI":"10.1109\/ICPR48806.2021.9412060"},{"key":"ref_63","doi-asserted-by":"crossref","first-page":"1586","DOI":"10.1109\/TIP.2017.2785279","article-title":"Skeleton-based human action recognition with global context-aware attention lstm networks","volume":"27","author":"Liu","year":"2018","journal-title":"IEEE Trans. Image Process."}],"container-title":["Sensors"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.mdpi.com\/1424-8220\/24\/8\/2589\/pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T14:30:04Z","timestamp":1760106604000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.mdpi.com\/1424-8220\/24\/8\/2589"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,18]]},"references-count":63,"journal-issue":{"issue":"8","published-online":{"date-parts":[[2024,4]]}},"alternative-id":["s24082589"],"URL":"https:\/\/doi.org\/10.3390\/s24082589","relation":{},"ISSN":["1424-8220"],"issn-type":[{"value":"1424-8220","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4,18]]}}}