{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,24]],"date-time":"2025-09-24T10:27:03Z","timestamp":1758709623905,"version":"3.37.3"},"reference-count":27,"publisher":"Springer Science and Business Media LLC","issue":"7","license":[{"start":{"date-parts":[[2021,2,23]],"date-time":"2021-02-23T00:00:00Z","timestamp":1614038400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,2,23]],"date-time":"2021-02-23T00:00:00Z","timestamp":1614038400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61871196","61972167"],"award-info":[{"award-number":["61871196","61972167"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2021,10]]},"DOI":"10.1007\/s11760-021-01868-8","type":"journal-article","created":{"date-parts":[[2021,2,23]],"date-time":"2021-02-23T22:03:58Z","timestamp":1614117838000},"page":"1379-1386","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["RGB+2D skeleton: local hand-crafted and 3D convolution feature coding for action recognition"],"prefix":"10.1007","volume":"15","author":[{"given":"Yi-Xiang","family":"Zhang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5536-5224","authenticated-orcid":false,"given":"Hong-Bo","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Ji-Xiang","family":"Du","sequence":"additional","affiliation":[]},{"given":"Qing","family":"Lei","sequence":"additional","affiliation":[]},{"given":"Lijie","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Bineng","family":"Zhong","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,2,23]]},"reference":[{"key":"1868_CR1","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1016\/j.patrec.2018.02.010","volume":"119","author":"J Wang","year":"2019","unstructured":"Wang, J., Chen, Y., Hao, S., Peng, X., Hu, L.: Deep learning for sensor-based activity recognition: a survey. Pattern Recognit. Lett. 119, 3\u201311 (2019)","journal-title":"Pattern Recognit. Lett."},{"key":"1868_CR2","doi-asserted-by":"publisher","first-page":"4","DOI":"10.1016\/j.imavis.2017.01.010","volume":"60","author":"S Herath","year":"2017","unstructured":"Herath, S., Harandi, M., Porikli, F.: Going deeper into action recognition: a survey. Image Visiom Comput. 60, 4\u201321 (2017)","journal-title":"Image Visiom Comput."},{"key":"1868_CR3","doi-asserted-by":"crossref","unstructured":"Zhang, H.B., Zhang, Y.X., Zhong, B., Lei, Q., Yang, L., Du, J.X., Chen, D.S.: A comprehensive survey of vision-based human action recognition methods. Sensors 19(5), (2019)","DOI":"10.3390\/s19051005"},{"key":"1868_CR4","doi-asserted-by":"crossref","unstructured":"DasDawn, D., Shaikh, S.: A comprehensive survey of human action recognition with spatio-temporal interest point (stip) detector. Visual Comput., 32(3) (2016)","DOI":"10.1007\/s00371-015-1066-2"},{"key":"1868_CR5","doi-asserted-by":"publisher","first-page":"130","DOI":"10.1016\/j.patcog.2015.11.019","volume":"53","author":"LL Presti","year":"2016","unstructured":"Presti, L.L., Cascia, M.L.: 3D skeleton-based human action classification: a survey. Pattern Recognit. 53, 130\u2013147 (2016)","journal-title":"Pattern Recognit."},{"key":"1868_CR6","doi-asserted-by":"publisher","first-page":"118","DOI":"10.1016\/j.cviu.2018.04.007","volume":"171","author":"P Wang","year":"2018","unstructured":"Wang, P., Li, W., Ogunbona, P., Wan, J., Escalera, S.: Rgb-d-based human motion recognition with deep learning: a survey. Comput. Vis. Image Underst. 171, 118\u2013139 (2018)","journal-title":"Comput. Vis. Image Underst."},{"issue":"5","key":"1868_CR7","doi-asserted-by":"publisher","first-page":"914","DOI":"10.1109\/TPAMI.2013.198","volume":"36","author":"J Wang","year":"2014","unstructured":"Wang, J., Liu, Z., Wu, Y., Yuan, J.: Learning actionlet ensemble for 3D human action recognition. IEEE Trans. Pattern Anal. 36(5), 914\u2013927 (2014)","journal-title":"IEEE Trans. Pattern Anal."},{"key":"1868_CR8","doi-asserted-by":"publisher","first-page":"195","DOI":"10.1016\/j.patrec.2016.07.021","volume":"87","author":"M Li","year":"2017","unstructured":"Li, M., Leung, H.: Graph-based approach for 3D human skeletal action recognition. Pattern Recognit. Lett. 87, 195\u2013202 (2017)","journal-title":"Pattern Recognit. Lett."},{"key":"1868_CR9","doi-asserted-by":"crossref","unstructured":"Tran, D., Bourdev, L., Fergus, R., Torresani, L., Paluri, M.: Learning spatiotemporal features with 3D convolutional networks. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 4489\u20134497 (2015)","DOI":"10.1109\/ICCV.2015.510"},{"issue":"1","key":"1868_CR10","doi-asserted-by":"publisher","first-page":"24","DOI":"10.1016\/j.jvcir.2013.04.007","volume":"25","author":"F Ofli","year":"2014","unstructured":"Ofli, F., Chaudhry, R., Kurillo, G., Vidal, R., Bajcsy, R.: Sequence of the most informative joints (SMIJ): a new representation for human skeletal action recognition. J. Vis. Commun. Image Represent. 25(1), 24\u201338 (2014)","journal-title":"J. Vis. Commun. Image Represent."},{"key":"1868_CR11","doi-asserted-by":"crossref","unstructured":"Wu, D., Shao, L.: Leveraging hierarchical parametric networks for skeletal joints based action segmentation and recognition. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 724\u2013731 (2014)","DOI":"10.1109\/CVPR.2014.98"},{"issue":"4","key":"1868_CR12","doi-asserted-by":"publisher","first-page":"1227","DOI":"10.1007\/s11760-020-01644-0","volume":"14","author":"X Liu","year":"2020","unstructured":"Liu, X., Li, Y., Xia, R.: Rotation-based spatial-temporal feature learning from skeleton sequences for action recognition. Signal Image Video Process. 14(4), 1227\u20131234 (2020)","journal-title":"Signal Image Video Process."},{"key":"1868_CR13","doi-asserted-by":"crossref","unstructured":"Xia, L., Chen, C., Aggarwal, J.\u00a0K.: View invariant human action recognition using histograms of 3D joints. In: IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 20\u201327 (2012)","DOI":"10.1109\/CVPRW.2012.6239233"},{"key":"1868_CR14","doi-asserted-by":"crossref","unstructured":"Kerola, T., Inoue, N., Shinoda, K.: Spectral graph skeletons for 3D action recognition. In: Asia Conference on Computer Vision, pp. 417\u2013432. Springer International Publishing, Cham (2015)","DOI":"10.1007\/978-3-319-16817-3_27"},{"key":"1868_CR15","doi-asserted-by":"crossref","unstructured":"Yan, S., Xiong, Y., Lin, D.: Spatial temporal graph convolutional networks for skeleton-based action recognition. In: AAAI Conference on Artificial Intelligence, pp. 7444\u20137452. New Orleans, LA, United states (2018)","DOI":"10.1609\/aaai.v32i1.12328"},{"key":"1868_CR16","doi-asserted-by":"crossref","unstructured":"Huang, Q., Zhou, F., Qin, R., Zhao, Y.: View transform graph attention recurrent networks for skeleton-based action recognition. Signal Image Video Process. (2020)","DOI":"10.1007\/s11760-020-01781-6"},{"key":"1868_CR17","doi-asserted-by":"crossref","unstructured":"Rahmani, H., Mahmood, A., Huynh, D.Q, Mian, A.: Hopc: Histogram of oriented principal components of 3D pointclouds for action recognition. In Asia Conference on Computer Vision, pp. 742\u2013757. Springer International Publishing, Cham (2014)","DOI":"10.1007\/978-3-319-10605-2_48"},{"key":"1868_CR18","doi-asserted-by":"publisher","first-page":"660","DOI":"10.1016\/j.compeleceng.2018.01.037","volume":"72","author":"S Nazir","year":"2018","unstructured":"Nazir, S., Yousaf, M.H., Velastin, S.A.: Evaluating a bag-of-visual features approach using spatio-temporal features for action recognition. Comput. Electr. Eng. 72, 660\u2013669 (2018)","journal-title":"Comput. Electr. Eng."},{"key":"1868_CR19","doi-asserted-by":"crossref","unstructured":"Dedeo\u011flu, Y., T\u00f6reyin, B., G\u00fcd\u00fckbay, U., \u00c7etin, A.E.: Silhouette-based method for object classification and human action recognition in video. In: International Conference on Human\u2013Computer Interaction, pp 64\u201377. Springer-Verlag, Berlin, Heidelberg (2006)","DOI":"10.1007\/11754336_7"},{"key":"1868_CR20","doi-asserted-by":"publisher","first-page":"109","DOI":"10.1016\/j.cviu.2016.03.013","volume":"150","author":"X Peng","year":"2016","unstructured":"Peng, X., Wang, L., Wang, X., Qiao, Y.: Bag of visual words and fusion methods for action recognition: comprehensive study and good practice. Comput. Vis. Image Underst. 150, 109\u2013125 (2016)","journal-title":"Comput. Vis. Image Underst."},{"issue":"1","key":"1868_CR21","doi-asserted-by":"publisher","first-page":"199","DOI":"10.1007\/s11760-014-0726-4","volume":"10","author":"L Pei","year":"2016","unstructured":"Pei, L., Ye, M., Zhao, X., Xiang, T., Li, T.: Learning spatio-temporal features for action recognition from the side of the video. Signal Image Video Process. 10(1), 199\u2013206 (2016)","journal-title":"Signal Image Video Process."},{"key":"1868_CR22","doi-asserted-by":"crossref","unstructured":"Wang, L., Xiong, Y., Wang, Z., Qiao, Y., Lin, D., Tang, X., Van\u00a0Gool, L.: Temporal segment networks: Towards good practices for deep action recognition. In: European Conference on Computer Vision, pp. 20\u201336. Springer International Publishing, Cham (2016)","DOI":"10.1007\/978-3-319-46484-8_2"},{"issue":"5","key":"1868_CR23","doi-asserted-by":"publisher","first-page":"1045","DOI":"10.1109\/TPAMI.2017.2691321","volume":"40","author":"A Shahroudy","year":"2018","unstructured":"Shahroudy, A., Ng, T., Gong, Y., Wang, G.: Deep multimodal feature analysis for action recognition in RGB+D videos. IEEE Trans. Pattern Anal. 40(5), 1045\u20131058 (2018)","journal-title":"IEEE Trans. Pattern Anal."},{"key":"1868_CR24","doi-asserted-by":"crossref","unstructured":"Wang, H., Schmid, C.: Action recognition with improved trajectories. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 3551\u20133558 (2013)","DOI":"10.1109\/ICCV.2013.441"},{"key":"1868_CR25","unstructured":"Liu, L., Shao, L.: Learning discriminative representations from RGB-D video data. In: International Joint Conferences on Artificial Intelligence, IJCAI 13, p. 1493-C1500. AAAI Press (2013)"},{"key":"1868_CR26","unstructured":"Wang, J., Yuan, J., Chen, Z., Wu, Y.: Spatial Locality-Aware Sparse Coding and Dictionary Learning, vol. 25, pp. 491\u2013505. Singapore (2012)"},{"key":"1868_CR27","doi-asserted-by":"crossref","unstructured":"Oreifej, O., Liu, Z.: Hon4d: Histogram of oriented 4D normals for activity recognition from depth sequences. In: IEEE Conference on Computer Vision and Pattern Recognition, pp. 716\u2013723 (2013)","DOI":"10.1109\/CVPR.2013.98"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-021-01868-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-021-01868-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-021-01868-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,18]],"date-time":"2022-12-18T19:09:00Z","timestamp":1671390540000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-021-01868-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,2,23]]},"references-count":27,"journal-issue":{"issue":"7","published-print":{"date-parts":[[2021,10]]}},"alternative-id":["1868"],"URL":"https:\/\/doi.org\/10.1007\/s11760-021-01868-8","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"type":"print","value":"1863-1703"},{"type":"electronic","value":"1863-1711"}],"subject":[],"published":{"date-parts":[[2021,2,23]]},"assertion":[{"value":"11 June 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 November 2020","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 January 2021","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 February 2021","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Compliance with ethical standards"}},{"value":"All of authors have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of interest"}}]}}