{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T03:33:48Z","timestamp":1774668828277,"version":"3.50.1"},"reference-count":66,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2022,9,6]],"date-time":"2022-09-06T00:00:00Z","timestamp":1662422400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,9,6]],"date-time":"2022-09-06T00:00:00Z","timestamp":1662422400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100002858","name":"china postdoctoral science foundation","doi-asserted-by":"publisher","award":["2019M661098"],"award-info":[{"award-number":["2019M661098"]}],"id":[{"id":"10.13039\/501100002858","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62031013"],"award-info":[{"award-number":["62031013"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"national natural science foundation of china","doi-asserted-by":"publisher","award":["61671103"],"award-info":[{"award-number":["61671103"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Machine Vision and Applications"],"published-print":{"date-parts":[[2022,11]]},"DOI":"10.1007\/s00138-022-01328-4","type":"journal-article","created":{"date-parts":[[2022,9,6]],"date-time":"2022-09-06T11:08:56Z","timestamp":1662462536000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["Graph convolutional networks and LSTM for first-person multimodal hand action recognition"],"prefix":"10.1007","volume":"33","author":[{"given":"Rui","family":"Li","sequence":"first","affiliation":[]},{"given":"Hongyu","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,9,6]]},"reference":[{"issue":"7","key":"1328_CR1","doi-asserted-by":"publisher","first-page":"11735","DOI":"10.3390\/s140711735","volume":"14","author":"A Jalal","year":"2014","unstructured":"Jalal, A., Kamal, S., Kim, D.: A depth video sensor-based life-logging human activity recognition system for elderly care in smart indoor environments. Sensors 14(7), 11735\u201311759 (2014)","journal-title":"Sensors"},{"issue":"2","key":"1328_CR2","doi-asserted-by":"publisher","first-page":"527","DOI":"10.1109\/TCYB.2017.2779800","volume":"49","author":"H Liang","year":"2019","unstructured":"Liang, H., Yuan, J., Lee, J., et al.: Hough forest with optimized leaves for global hand pose estimation with arbitrary postures. IEEE Trans. Cybern. 49(2), 527\u2013541 (2019)","journal-title":"IEEE Trans. Cybern."},{"key":"1328_CR3","doi-asserted-by":"crossref","unstructured":"Mumtaz, A., Sargano, A.B., Habib, Z.: Violence detection in surveillance videos with deep network using transfer learning. In: 2018 2nd European Conference on Electrical Engineering and Computer Science (EECS), pp. 558\u2013563","DOI":"10.1109\/EECS.2018.00109"},{"key":"1328_CR4","unstructured":"Antotsiou, D., Garcia-Hernando, G., Kim, T.: Task-oriented hand motion retargeting for dexterous manipulation imitation. arXiv:1810.01845"},{"issue":"5","key":"1328_CR5","doi-asserted-by":"publisher","first-page":"2647","DOI":"10.1109\/TCSVT.2021.3057992","volume":"32","author":"R Li","year":"2022","unstructured":"Li, R., Wang, H., Liu, Z.: Survey on mapping human hand motion to robotic hands for teleoperation. IEEE Trans. Circuits Syst. Video Technol. 32(5), 2647\u20132665 (2022)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"2","key":"1328_CR6","doi-asserted-by":"publisher","first-page":"128","DOI":"10.1109\/TAI.2021.3076974","volume":"2","author":"T Ahmad","year":"2021","unstructured":"Ahmad, T., Jin, L., Zhang, X., et al.: Graph convolutional neural network for human action recognition: a comprehensive survey. IEEE Trans. Artif. Intell. 2(2), 128\u2013145 (2021)","journal-title":"IEEE Trans. Artif. Intell."},{"issue":"1","key":"1328_CR7","doi-asserted-by":"publisher","first-page":"4","DOI":"10.1109\/TNNLS.2020.2978386","volume":"32","author":"Z Wu","year":"2021","unstructured":"Wu, Z., Pan, S., Chen, F., et al.: A comprehensive survey on graph neural networks. IEEE Trans. Neural Netw. Learn. Syst. 32(1), 4\u201324 (2021)","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"issue":"14","key":"1328_CR8","doi-asserted-by":"publisher","first-page":"16183","DOI":"10.1109\/JSEN.2021.3075722","volume":"21","author":"F Li","year":"2021","unstructured":"Li, F., Zhu, A., Liu, Z., et al.: Pyramidal graph convolutional network for skeleton-based human action recognition. IEEE Sens. J. 21(14), 16183\u201316191 (2021)","journal-title":"IEEE Sens. J."},{"key":"1328_CR9","doi-asserted-by":"publisher","first-page":"144529","DOI":"10.1109\/ACCESS.2020.3014445","volume":"8","author":"W Li","year":"2020","unstructured":"Li, W., Liu, X., Liu, Z., et al.: Skeleton-based action recognition using multi-scale and multi-stream improved graph convolutional network. IEEE Access 8, 144529\u2013144542 (2020)","journal-title":"IEEE Access"},{"key":"1328_CR10","doi-asserted-by":"crossref","unstructured":"Liu, R., Xu, C., Zhang, T., et al.: Si-GCN: structure-induced graph convolution network for skeleton-based action recognition. In: 2019 International Joint Conference on Neural Networks (IJCNN), pp. 1\u20138","DOI":"10.1109\/IJCNN.2019.8851767"},{"issue":"8","key":"1328_CR11","doi-asserted-by":"publisher","first-page":"3047","DOI":"10.1109\/TNNLS.2019.2935173","volume":"31","author":"X Zhang","year":"2020","unstructured":"Zhang, X., Xu, C., Tian, X., et al.: Graph edge convolutional neural networks for skeleton-based action recognition. IEEE Trans. Neural Netw. Learn. Syst. 31(8), 3047\u20133060 (2020)","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"1328_CR12","doi-asserted-by":"publisher","first-page":"2263","DOI":"10.1109\/TIP.2021.3051495","volume":"30","author":"X Hao","year":"2021","unstructured":"Hao, X., Li, J., Guo, Y., et al.: Hypergraph neural network for skeleton-based action recognition. IEEE Trans. Image Process. 30, 2263\u20132275 (2021)","journal-title":"IEEE Trans. Image Process."},{"key":"1328_CR13","doi-asserted-by":"crossref","unstructured":"Shahroudy, A., Liu, J., Ng, T., et al.: NTU RGB+D: a large scale dataset for 3D human activity analysis. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1010\u20131019","DOI":"10.1109\/CVPR.2016.115"},{"key":"1328_CR14","doi-asserted-by":"crossref","unstructured":"Sijie, Y., Yuanjun, X., Dahua, L.: Spatial temporal graph convolutional networks for skeleton-based action recognition. In: 2018 32nd AAAI Conference on Artificial Intelligence, pp. 7444\u20137452","DOI":"10.1609\/aaai.v32i1.12328"},{"key":"1328_CR15","doi-asserted-by":"crossref","unstructured":"Shi, L., Zhang, Y., Cheng, J., et al.: Two-stream adaptive graph convolutional networks for skeleton-based action recognition. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 12018\u201312027","DOI":"10.1109\/CVPR.2019.01230"},{"key":"1328_CR16","doi-asserted-by":"publisher","first-page":"9532","DOI":"10.1109\/TIP.2020.3028207","volume":"29","author":"L Shi","year":"2020","unstructured":"Shi, L., Zhang, Y., Cheng, J., et al.: Skeleton-based action recognition with multi-stream adaptive graph convolutional networks. IEEE Trans. Image Process. 29, 9532\u20139545 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"1328_CR17","doi-asserted-by":"crossref","unstructured":"Li, M., Chen, S., Chen, X., et al.: Actional-structural graph convolutional networks for skeleton-based action recognition. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3590\u20133598","DOI":"10.1109\/CVPR.2019.00371"},{"key":"1328_CR18","doi-asserted-by":"crossref","unstructured":"Zhang, X., Xu, C., Tao, D.: Context aware graph convolution for skeleton-based action recognition. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 14321\u201314330","DOI":"10.1109\/CVPR42600.2020.01434"},{"key":"1328_CR19","doi-asserted-by":"crossref","unstructured":"Nam, S., Lee, S.: JT-MGCN: joint-temporal motion graph convolutional network for skeleton-based action recognition. In: 2020 25th International Conference on Pattern Recognition (ICPR), pp. 6383\u20136390","DOI":"10.1109\/ICPR48806.2021.9412533"},{"key":"1328_CR20","doi-asserted-by":"crossref","unstructured":"Zhang, G., Zhang, X.: Multi-heads attention graph convolutional networks for skeleton-based action recognition. In: 2019 IEEE Visual Communications and Image Processing (VCIP), pp. 1\u20134","DOI":"10.1109\/VCIP47243.2019.8965914"},{"key":"1328_CR21","doi-asserted-by":"publisher","first-page":"305","DOI":"10.1109\/ACCESS.2019.2961770","volume":"8","author":"T Ahmad","year":"2020","unstructured":"Ahmad, T., Mao, H., Lin, L., et al.: Action recognition using attention-joints graph convolutional neural networks. IEEE Access 8, 305\u2013313 (2020)","journal-title":"IEEE Access"},{"key":"1328_CR22","doi-asserted-by":"crossref","unstructured":"BanTeng, M.L., Wu, Z.: Channel-wise dense connection graph convolutional network for skeleton-based action recognition. In: 2020 25th International Conference on Pattern Recognition (ICPR), pp. 3799\u20133806","DOI":"10.1109\/ICPR48806.2021.9412329"},{"key":"1328_CR23","doi-asserted-by":"publisher","first-page":"10040","DOI":"10.1109\/ACCESS.2020.2964115","volume":"8","author":"H Yang","year":"2020","unstructured":"Yang, H., Gu, Y., Zhu, J., et al.: PGCN-TCA: Pseudo graph convolutional network with temporal and channel-wise attention for skeleton-based action recognition. IEEE Access 8, 10040\u201310047 (2020)","journal-title":"IEEE Access"},{"key":"1328_CR24","doi-asserted-by":"publisher","first-page":"58256","DOI":"10.1109\/ACCESS.2021.3073107","volume":"9","author":"D Feng","year":"2021","unstructured":"Feng, D., Wu, Z., Zhang, J., et al.: Multi-scale spatial temporal graph neural network for skeleton-based action recognition. IEEE Access 9, 58256\u201358265 (2021)","journal-title":"IEEE Access"},{"key":"1328_CR25","doi-asserted-by":"publisher","first-page":"36475","DOI":"10.1109\/ACCESS.2020.3049029","volume":"9","author":"H Xia","year":"2021","unstructured":"Xia, H., Gao, X.: Multi-scale mixed dense graph convolution network for skeleton-based action recognition. IEEE Access 9, 36475\u201336484 (2021)","journal-title":"IEEE Access"},{"key":"1328_CR26","doi-asserted-by":"crossref","unstructured":"Cheng, K., Zhang, Y., He, X., et al.: Skeleton-based action recognition with shift graph convolutional network. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 180\u2013189","DOI":"10.1109\/CVPR42600.2020.00026"},{"issue":"2","key":"1328_CR27","doi-asserted-by":"publisher","first-page":"1028","DOI":"10.1109\/LRA.2021.3056361","volume":"6","author":"S Li","year":"2021","unstructured":"Li, S., Yi, J., Farha, Y.A., et al.: Pose refinement graph convolutional network for skeleton-based action recognition. IEEE Robot. Autom. Lett. 6(2), 1028\u20131035 (2021)","journal-title":"IEEE Robot. Autom. Lett."},{"key":"1328_CR28","doi-asserted-by":"crossref","unstructured":"Tang, Y., Tian, Y., Lu, J., et al.: Action recognition in RGB-D egocentric videos. In: 2017 IEEE International Conference on Image Processing (ICIP), pp. 3410\u20133414","DOI":"10.1109\/ICIP.2017.8296915"},{"key":"1328_CR29","doi-asserted-by":"crossref","unstructured":"Garcia-Hernando, G., Yuan, S., Baek, S., et al.: First-person hand action benchmark with RGB-D videos and 3D hand pose annotations. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 409\u2013419","DOI":"10.1109\/CVPR.2018.00050"},{"key":"1328_CR30","doi-asserted-by":"publisher","first-page":"70061","DOI":"10.1109\/ACCESS.2018.2880231","volume":"6","author":"J Liu","year":"2018","unstructured":"Liu, J., Akhtar, N., Mian, A.: Viewpoint invariant action recognition using RGB-D videos. IEEE Access 6, 70061\u201370071 (2018)","journal-title":"IEEE Access"},{"issue":"2","key":"1328_CR31","doi-asserted-by":"publisher","first-page":"633","DOI":"10.3390\/s18020633","volume":"18","author":"R Li","year":"2018","unstructured":"Li, R., Liu, Z., Tan, J.: Exploring 3D human action recognition: from offline to online. Sensors 18(2), 633 (2018)","journal-title":"Sensors"},{"key":"1328_CR32","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Pinz, A., Zisserman, A.: Convolutional two-stream network fusion for video action recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1933\u20131941","DOI":"10.1109\/CVPR.2016.213"},{"key":"1328_CR33","first-page":"20","volume":"9912","author":"L Wang","year":"2016","unstructured":"Wang, L., Xiong, Y., Wang, Z., et al.: Temporal segment networks: towards good practices for deep action recognition. ECCV 9912, 20\u201336 (2016)","journal-title":"ECCV"},{"key":"1328_CR34","doi-asserted-by":"crossref","unstructured":"Tekin, B., Bogo, F., Pollefeys, M.: H+O: unified egocentric recognition of 3D hand-object poses and interactions. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4506\u20134515","DOI":"10.1109\/CVPR.2019.00464"},{"key":"1328_CR35","doi-asserted-by":"crossref","unstructured":"Schwarz, M., Schulz, H., Behnke, S.: RGB-D object recognition and pose estimation based on pre-trained convolutional neural network features. In: 2015 IEEE International Conference on Robotics and Automation (ICRA), pp. 1329\u20131335","DOI":"10.1109\/ICRA.2015.7139363"},{"key":"1328_CR36","doi-asserted-by":"crossref","unstructured":"Eitel, A., Springenberg, J.T., Spinello, L., et al.: Multimodal deep learning for robust RGB-D object recognition. In: 2015 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), Hamburg, Germany, pp. 681\u2013687","DOI":"10.1109\/IROS.2015.7353446"},{"issue":"3","key":"1328_CR37","doi-asserted-by":"publisher","first-page":"2386","DOI":"10.1109\/LRA.2018.2812225","volume":"3","author":"FM Carlucci","year":"2018","unstructured":"Carlucci, F.M., Russo, P., Caputo, B.: (DE)2CO: deep depth colorization. IEEE Robot. Autom. Lett. 3(3), 2386\u20132393 (2018)","journal-title":"IEEE Robot. Autom. Lett."},{"key":"1328_CR38","doi-asserted-by":"crossref","unstructured":"Vemulapalli, R., Arrate, F., Chellappa, R.: Human action recognition by representing 3D skeletons as points in a Lie group. In: 2014 IEEE Conference on Computer Vision and Pattern Recognition, Columbus, pp. 588\u2013595","DOI":"10.1109\/CVPR.2014.82"},{"key":"1328_CR39","doi-asserted-by":"crossref","unstructured":"Huang, Z., Wan, C., Probst, T., et al.: Deep learning on Lie groups for skeleton-based action recognition. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1243\u20131252","DOI":"10.1109\/CVPR.2017.137"},{"key":"1328_CR40","doi-asserted-by":"crossref","unstructured":"Li, Y., Guo, T., Liu, X., et al.: Skeleton-based action recognition with Lie group and deep neural networks. In: 2019 IEEE 4th International Conference on Signal and Image Processing (ICSIP), pp. 26\u201330","DOI":"10.1109\/SIPROCESS.2019.8868548"},{"key":"1328_CR41","doi-asserted-by":"crossref","unstructured":"Yang, K., Ding, X., Chen, W.: Multi-scale spatial temporal graph convolutional LSTM network for skeleton-based human action recognition. In: Proceedings of the 2019 International Conference on Video, Signal and Image Processing, pp. 3\u20139","DOI":"10.1145\/3369318.3369325"},{"key":"1328_CR42","doi-asserted-by":"crossref","unstructured":"Xu, S., Rao, H., Hu, X., Hu, B.: Multi-level co-occurrence graph convolutional LSTM for skeleton-based action recognition. In: 2020 IEEE International Conference on E-health Networking, Application & Services (HEALTHCOM), pp. 1\u20137","DOI":"10.1109\/HEALTHCOM49281.2021.9399007"},{"key":"1328_CR43","doi-asserted-by":"crossref","unstructured":"Si, C., Chen, W., Wang, W., et al.: An attention enhanced graph convolutional LSTM network for skeleton-based action recognition. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1227\u20131236","DOI":"10.1109\/CVPR.2019.00132"},{"issue":"6","key":"1328_CR44","doi-asserted-by":"publisher","first-page":"677","DOI":"10.1109\/34.598226","volume":"19","author":"VI Pavlovic","year":"1997","unstructured":"Pavlovic, V.I., Sharma, R., Huang, T.S.: Visual interpretation of hand gestures for human-computer interaction. IEEE Trans. Pattern Anal. Mach. Intell. 19(6), 677\u2013695 (1997)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"1","key":"1328_CR45","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s10462-012-9356-9","volume":"43","author":"SS Rautaray","year":"2015","unstructured":"Rautaray, S.S., Agrawal, A.: Vision based hand gesture recognition for human computer interaction: a survey. Artif. Intell. Rev. 43(1), 1\u201354 (2015)","journal-title":"Artif. Intell. Rev."},{"key":"1328_CR46","doi-asserted-by":"publisher","first-page":"97","DOI":"10.1016\/j.jvcir.2015.01.015","volume":"28","author":"N Kiliboz","year":"2015","unstructured":"Kiliboz, N., Gudukbay, U.: A hand gesture recognition technique for human-computer interaction. J. Vis. Commun. Image Represent. 28, 97\u2013104 (2015)","journal-title":"J. Vis. Commun. Image Represent."},{"key":"1328_CR47","doi-asserted-by":"publisher","first-page":"875","DOI":"10.1007\/s00138-018-0996-x","volume":"30","author":"Y Li","year":"2019","unstructured":"Li, Y., Miao, Q., Qi, X., et al.: A spatiotemporal attention-based ResC3D model for large-scale gesture recognition. Mach. Vis. Appl. 30, 875\u2013888 (2019)","journal-title":"Mach. Vis. Appl."},{"key":"1328_CR48","doi-asserted-by":"publisher","first-page":"243","DOI":"10.1007\/s001380050144","volume":"12","author":"C Huang","year":"2001","unstructured":"Huang, C., Jeng, S.: A model-based hand gesture recognition system. Mach. Vis. Appl. 12, 243\u2013258 (2001)","journal-title":"Mach. Vis. Appl."},{"key":"1328_CR49","doi-asserted-by":"crossref","unstructured":"Panwar, M., Mehra, P.S.: Hand gesture recognition for human computer interaction. In: 2011 Proceedings of the International Conference on Image Information Processing, pp. 1\u20137","DOI":"10.1109\/ICIIP.2011.6108940"},{"key":"1328_CR50","doi-asserted-by":"publisher","first-page":"1157","DOI":"10.1007\/s00138-019-01043-7","volume":"30","author":"Z Lu","year":"2019","unstructured":"Lu, Z., Qin, S., Li, X., et al.: One-shot learning hand gesture recognition based on modified 3D convolutional neural networks. Mach. Vis. Appl. 30, 1157\u20131180 (2019)","journal-title":"Mach. Vis. Appl."},{"key":"1328_CR51","doi-asserted-by":"publisher","first-page":"1309","DOI":"10.1007\/s00138-014-0620-7","volume":"25","author":"J Molina","year":"2014","unstructured":"Molina, J., Mart\u00ednez, J.M.: A synthetic training framework for providing gesture scalability to 2.5D pose-based hand gesture recognition systems. Mach. Vis. Appl. 25, 1309\u20131315 (2014)","journal-title":"Mach. Vis. Appl."},{"key":"1328_CR52","doi-asserted-by":"crossref","unstructured":"Zanfir, M., Leordeanu, M., Sminchisescu, C.: The moving pose: an efficient 3D kinematics descriptor for low-latency action recognition and detection. In: 2013 IEEE International Conference on Computer Vision, pp. 2752\u20132759","DOI":"10.1109\/ICCV.2013.342"},{"key":"1328_CR53","doi-asserted-by":"crossref","unstructured":"Sun, D., Zeng, F., Luo, B., et al.: Information enhanced graph convolutional networks for skeleton-based action recognition. In: 2020 International Joint Conference on Neural Networks (IJCNN), pp. 1\u20137","DOI":"10.1109\/IJCNN48605.2020.9207025"},{"key":"1328_CR54","doi-asserted-by":"publisher","first-page":"228108","DOI":"10.1109\/ACCESS.2020.3046142","volume":"8","author":"Z Zhang","year":"2020","unstructured":"Zhang, Z., Wang, Z., Zhuang, S., et al.: Structure-feature fusion adaptive graph convolutional networks for skeleton-based action recognition. IEEE Access 8, 228108\u2013228117 (2020)","journal-title":"IEEE Access"},{"key":"1328_CR55","doi-asserted-by":"crossref","unstructured":"Wu, C., Wu, X., Kittler, J.: Spatial residual layer and dense connection block enhanced spatial temporal graph convolutional network for skeleton-based action recognition. In: 2019 IEEE\/CVF International Conference on Computer Vision Workshop (ICCVW), pp. 1740\u20131748","DOI":"10.1109\/ICCVW.2019.00216"},{"key":"1328_CR56","doi-asserted-by":"crossref","unstructured":"Liu, K., Gao, L., Mefraz Khan, N., et al.: Graph convolutional networks-hidden conditional random field model for skeleton-based action recognition. In: 2019 IEEE International Symposium on Multimedia (ISM), pp. 25\u2013256","DOI":"10.1109\/ISM46123.2019.00013"},{"key":"1328_CR57","unstructured":"He, K., Zhang, X., Ren, S., et al.: Deep residual learning for image recognition. arXiv:1512.03385v1"},{"key":"1328_CR58","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: 2015 Proc. ICLR, pp. 1\u201314"},{"issue":"1","key":"1328_CR59","doi-asserted-by":"publisher","first-page":"61386","DOI":"10.1109\/ACCESS.2018.2872798","volume":"6","author":"R Li","year":"2018","unstructured":"Li, R., Liu, Z., Tan, J.: Reassessing hierarchical representation for action recognition in still images. IEEE Access 6(1), 61386\u201361400 (2018)","journal-title":"IEEE Access"},{"issue":"6","key":"1328_CR60","doi-asserted-by":"publisher","first-page":"2368","DOI":"10.1109\/TITS.2014.2337331","volume":"15","author":"E Ohn-Bar","year":"2014","unstructured":"Ohn-Bar, E., Trivedi, M.M.: Hand gesture recognition in real time for automotive interfaces: A multimodal vision-based approach and evaluations. IEEE Trans. Intell. Transp. Syst. 15(6), 2368\u20132377 (2014)","journal-title":"IEEE Trans. Intell. Transp. Syst."},{"key":"1328_CR61","doi-asserted-by":"crossref","unstructured":"Oreifej, O., Liu, Z.: HON4D: histogram of oriented 4D normals for activity recognition from depth sequences. In: 2013 IEEE Conference on Computer Vision and Pattern Recognition, pp. 716\u2013723","DOI":"10.1109\/CVPR.2013.98"},{"key":"1328_CR62","doi-asserted-by":"crossref","unstructured":"Rahmani, H., Mian, A.: 3D action recognition from novel viewpoints. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1506\u20131515","DOI":"10.1109\/CVPR.2016.167"},{"key":"1328_CR63","unstructured":"Du, Y., Wang, W., Wang, L.: Hierarchical recurrent neural network for skeleton based action recognition. In: 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1110\u20131118"},{"key":"1328_CR64","doi-asserted-by":"crossref","unstructured":"Zhang, X., Wang, Y., Gou, M., et al.: Efficient temporal sequence comparison and classification using gram matrix embeddings on a Riemannian manifold. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4498\u20134507","DOI":"10.1109\/CVPR.2016.487"},{"key":"1328_CR65","doi-asserted-by":"crossref","unstructured":"Garcia-Hernando, G., Kim, T.: Transition forests: learning discriminative temporal transitions for action recognition and detection. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 407\u2013415","DOI":"10.1109\/CVPR.2017.51"},{"issue":"11","key":"1328_CR66","doi-asserted-by":"publisher","first-page":"2186","DOI":"10.1109\/TPAMI.2016.2640292","volume":"39","author":"J Hu","year":"2017","unstructured":"Hu, J., Zheng, W., Lai, J., et al.: Jointly learning heterogeneous features for RGB-D activity recognition. IEEE Trans. Pattern Anal. Mach. Intell. 39(11), 2186\u20132200 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."}],"container-title":["Machine Vision and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-022-01328-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00138-022-01328-4\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-022-01328-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,3]],"date-time":"2024-10-03T08:55:07Z","timestamp":1727945707000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00138-022-01328-4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,9,6]]},"references-count":66,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2022,11]]}},"alternative-id":["1328"],"URL":"https:\/\/doi.org\/10.1007\/s00138-022-01328-4","relation":{},"ISSN":["0932-8092","1432-1769"],"issn-type":[{"value":"0932-8092","type":"print"},{"value":"1432-1769","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,9,6]]},"assertion":[{"value":"30 July 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 May 2022","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 July 2022","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 September 2022","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}],"article-number":"84"}}