{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,4,25]],"date-time":"2025-04-25T07:08:31Z","timestamp":1745564911597,"version":"3.40.3"},"publisher-location":"Cham","reference-count":30,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030863647"},{"type":"electronic","value":"9783030863654"}],"license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021]]},"DOI":"10.1007\/978-3-030-86365-4_24","type":"book-chapter","created":{"date-parts":[[2021,9,10]],"date-time":"2021-09-10T11:02:39Z","timestamp":1631271759000},"page":"293-304","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Structure-Aware Multi-scale Hierarchical Graph Convolutional Network for Skeleton Action Recognition"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0770-9018","authenticated-orcid":false,"given":"Changxiang","family":"He","sequence":"first","affiliation":[]},{"given":"Shuting","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Ying","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Xiaofei","family":"Qin","sequence":"additional","affiliation":[]},{"given":"Jiayuan","family":"Zeng","sequence":"additional","affiliation":[]},{"given":"Xuedian","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,9,7]]},"reference":[{"key":"24_CR1","doi-asserted-by":"publisher","first-page":"1038","DOI":"10.1109\/TMM.2018.2808769","volume":"20","author":"Y Zhang","year":"2018","unstructured":"Zhang, Y., Cao, C., Cheng, J., Lu, H.: EgoGesture: a new dataset and benchmark for egocentric hand gesture recognition. IEEE Trans. Multimed. 20, 1038\u20131050 (2018)","journal-title":"IEEE Trans. Multimed."},{"key":"24_CR2","doi-asserted-by":"publisher","first-page":"2329","DOI":"10.1016\/j.patcog.2015.03.006","volume":"48","author":"M Ziaeefard","year":"2015","unstructured":"Ziaeefard, M., Bergevin, R.: Semantic human activity recognition: a literature review. Pattern Recognit. 48, 2329\u20132345 (2015)","journal-title":"Pattern Recognit."},{"issue":"1","key":"24_CR3","doi-asserted-by":"publisher","first-page":"70","DOI":"10.1016\/j.patrec.2014.04.011","volume":"48","author":"JK Aggarwal","year":"2014","unstructured":"Aggarwal, J.K., Xia, L.: Human activity recognition from 3D data: a review. Pattern Recognit. Lett. 48(1), 70\u201380 (2014)","journal-title":"Pattern Recognit. Lett."},{"issue":"C","key":"24_CR4","doi-asserted-by":"publisher","first-page":"85","DOI":"10.1016\/j.cviu.2017.01.011","volume":"158","author":"F Han","year":"2017","unstructured":"Han, F., Reily, B., Hoff, W., Zhan, H.: Space-time representation of people based on 3D skeletal data: a review. Comput. Vis. Image Underst. 158(C), 85\u2013105 (2017)","journal-title":"Comput. Vis. Image Underst."},{"issue":"2","key":"24_CR5","doi-asserted-by":"publisher","first-page":"4","DOI":"10.1109\/MMUL.2012.24","volume":"19","author":"Z Zhang","year":"2012","unstructured":"Zhang, Z.: Microsoft kinect sensor and its effect. IEEE Multimed. 19(2), 4\u201310 (2012)","journal-title":"IEEE Multimed."},{"key":"24_CR6","doi-asserted-by":"crossref","unstructured":"Zhu, W., Lan, C., Xing, J., Zeng, W., Xie, X.: Co-occurrence feature learning for skeleton based action recognition using regularized deep LSTM networks. In: AAAI 2016 (2016)","DOI":"10.1609\/aaai.v30i1.10451"},{"key":"24_CR7","doi-asserted-by":"crossref","unstructured":"Ke, Q., Bennamoun, M., An, S., Sohel, F., Boussaid, F.: A new representation of skeleton sequences for 3D action recognition. In: CVPR 2017 (2017)","DOI":"10.1109\/CVPR.2017.486"},{"key":"24_CR8","doi-asserted-by":"crossref","unstructured":"Yan, S., Xiong, Y., Lin, D.: Spatial temporal graph convolutional networks for skeleton-based action recognition. In: AAAI 2018 (2018)","DOI":"10.1609\/aaai.v32i1.12328"},{"key":"24_CR9","doi-asserted-by":"crossref","unstructured":"Liu, Z., Zhang, H., Chen, Z., Wang, Z., Ouyang, W.: Disentangling and unifying graph convolutions for skeleton-based action recognition. IEEE (2020)","DOI":"10.1109\/CVPR42600.2020.00022"},{"key":"24_CR10","doi-asserted-by":"crossref","unstructured":"Li, M., Chen, S., Chen, X., Zhang, Y., Wang, Y., Tian, Q.: Actional-structural graph convolutional networks for skeleton-based action recognition. In: CVPR 2019 (2019)","DOI":"10.1109\/CVPR.2019.00371"},{"key":"24_CR11","doi-asserted-by":"crossref","unstructured":"Shi, L., Zhang, Y., Cheng, J., Lu, H.: Skeleton-based action recognition with directed graph neural networks. In: CVPR 2020 (2020)","DOI":"10.1109\/CVPR.2019.00810"},{"key":"24_CR12","doi-asserted-by":"crossref","unstructured":"Shi, L., Zhang, Y., Cheng, J., Lu, H.: Two-stream adaptive graph convolutional networks for skeleton-based action recognition (2018)","DOI":"10.1109\/CVPR.2019.01230"},{"key":"24_CR13","unstructured":"Luo, W., Li, Y., Urtasun, R., Zemel, R.: Understanding the effective receptive field in deep convolutional neural networks (2017)"},{"key":"24_CR14","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"106","DOI":"10.1007\/978-3-030-01246-5_7","volume-title":"Computer Vision \u2013 ECCV 2018","author":"C Si","year":"2018","unstructured":"Si, C., Jing, Y., Wang, W., Wang, L., Tan, T.: Skeleton-based action recognition with spatial reasoning and temporal stack learning. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018, Part I. LNCS, vol. 11205, pp. 106\u2013121. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01246-5_7"},{"key":"24_CR15","doi-asserted-by":"crossref","unstructured":"Wang, J., Liu, Z., Wu, Y., Yuan, J.: Mining actionlet ensemble for action recognition with depth cameras. Human Action Recognition with Depth Cameras (2014)","DOI":"10.1007\/978-3-319-04561-0"},{"key":"24_CR16","unstructured":"Hussein, M.E., Torki, M., Gowayyed, M.A., El-Saban, M.: Human action recognition using a temporal hierarchy of covariance descriptors on 3D joint locations. In: International Joint Conference on Artificial Intelligence (2013)"},{"key":"24_CR17","doi-asserted-by":"crossref","unstructured":"Vemulapalli, R., Arrate, F., Chellappa, R.: Human action recognition by representing 3D skeletons as points in a lie group. In: CVPR 2014 (2014)","DOI":"10.1109\/CVPR.2014.82"},{"key":"24_CR18","unstructured":"Du, Y., Wang, W., Wang, L.: Hierarchical recurrent neural network for skeleton based action recognition. In: CVPR, pp. 1110\u20131118 (2015)"},{"key":"24_CR19","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"816","DOI":"10.1007\/978-3-319-46487-9_50","volume-title":"Computer Vision \u2013 ECCV 2016","author":"J Liu","year":"2016","unstructured":"Liu, J., Shahroudy, A., Xu, D., Wang, G.: Spatio-temporal LSTM with trust gates for 3D human action recognition. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016, Part III. LNCS, vol. 9907, pp. 816\u2013833. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46487-9_50"},{"key":"24_CR20","doi-asserted-by":"crossref","unstructured":"Song, S., Lan, C., Xing, J., Zeng, W., Liu, J.: An end-to-end spatio-temporal attention model for human action recognition from skeleton data (2016)","DOI":"10.1609\/aaai.v31i1.11212"},{"key":"24_CR21","doi-asserted-by":"crossref","unstructured":"Kim, T.S., Reiter, A.: Interpretable 3D human action analysis with temporal convolutional networks. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW) (2017)","DOI":"10.1109\/CVPRW.2017.207"},{"key":"24_CR22","doi-asserted-by":"publisher","first-page":"346","DOI":"10.1016\/j.patcog.2017.02.030","volume":"68","author":"M Liu","year":"2017","unstructured":"Liu, M., Hong, L., Chen, C.: Enhanced skeleton visualization for view invariant human action recognition. Pattern Recognit. 68, 346\u2013362 (2017)","journal-title":"Pattern Recognit."},{"key":"24_CR23","doi-asserted-by":"crossref","unstructured":"Gao, X., Hu, W., Tang, J., Liu, J., Guo, Z.: Optimized skeleton-based action recognition via sparsified graph regression. In: The 27th ACM International Conference (2019)","DOI":"10.1145\/3343031.3351170"},{"key":"24_CR24","unstructured":"Kipf, T.N., Welling, M.: Semi-supervised classification with graph convolutional networks (2016)"},{"key":"24_CR25","unstructured":"Martins, A., Astudillo, R.F.: From softmax to sparsemax: a sparse model of attention and multi-label classification. JMLR.org (2016)"},{"key":"24_CR26","doi-asserted-by":"crossref","unstructured":"Shahroudy, A., Liu, J., Ng, T.T., Wang, G.: NTU RGB+D: a large scale dataset for 3D human activity analysis, pp. 1010\u20131019. IEEE Computer Society (2016)","DOI":"10.1109\/CVPR.2016.115"},{"key":"24_CR27","unstructured":"Kay, W., Carreira, J., Simonyan, K., Zhang, B., Zisserman, A.: The kinetics human action video dataset (2017)"},{"key":"24_CR28","unstructured":"Zhe, C., Simon, T., Wei, S.E., Sheikh, Y.: Realtime multi-person 2D pose estimation using part affinity fields. In: CVPR 2017 (2017)"},{"key":"24_CR29","doi-asserted-by":"crossref","unstructured":"Li, C., Zhong, Q., Xie, D., Pu, S.: Co-occurrence feature learning from skeleton data for action recognition and detection with hierarchical aggregation. In: Twenty-Seventh International Joint Conference on Artificial Intelligence, IJCAI-2018 (2018)","DOI":"10.24963\/ijcai.2018\/109"},{"issue":"2","key":"24_CR30","doi-asserted-by":"publisher","first-page":"452","DOI":"10.3390\/s21020452","volume":"21","author":"W Yang","year":"2021","unstructured":"Yang, W., Zhang, J., Cai, J., Xu, Z.: Shallow graph convolutional network for skeleton-based action recognition. Sensors 21(2), 452 (2021)","journal-title":"Sensors"}],"container-title":["Lecture Notes in Computer Science","Artificial Neural Networks and Machine Learning \u2013 ICANN 2021"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-86365-4_24","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,8]],"date-time":"2023-01-08T23:37:45Z","timestamp":1673221065000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-86365-4_24"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"ISBN":["9783030863647","9783030863654"],"references-count":30,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-86365-4_24","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2021]]},"assertion":[{"value":"7 September 2021","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICANN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Artificial Neural Networks","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Bratislava","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Slovakia","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2021","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14 September 2021","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 September 2021","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icann2021","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/e-nns.org\/icann2021\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"OCS","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"496","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"265","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"53% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.5","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Conference was held online due to the COVID-19 pandemic.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}