{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T15:28:48Z","timestamp":1774538928057,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":128,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,5,8]]},"DOI":"10.1145\/3701716.3717739","type":"proceedings-article","created":{"date-parts":[[2025,6,23]],"date-time":"2025-06-23T14:24:42Z","timestamp":1750688682000},"page":"1916-1937","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":4,"title":["Evolving Skeletons: Motion Dynamics in Action Recognition"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-4903-483X","authenticated-orcid":false,"given":"Jushang","family":"Qiu","sequence":"first","affiliation":[{"name":"Australian National University, Canberra, Australian Capital Territory, Australia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8600-7099","authenticated-orcid":false,"given":"Lei","family":"Wang","sequence":"additional","affiliation":[{"name":"Griffith University, Brisbane, Queensland, Australia and Australian National University, Canberra, Australian Capital Territory, Australia"}]}],"member":"320","published-online":{"date-parts":[[2025,5,23]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"publisher","DOI":"10.1109\/TAI.2021.3076974"},{"key":"e_1_3_2_2_2_1","volume-title":"Human Action Recognition with Multi-Level Granularity and Pair-Wise Hyper GCN. In 2024 IEEE 18th International Conference on Automatic Face and Gesture Recognition (FG). IEEE, 1--10","author":"Alsarhan Tamam","year":"2024","unstructured":"Tamam Alsarhan, Syed Sadaf Ali, Ayoub Alsarhan, Iyyakutti Iyappan Ganapathi, and Naoufel Werghi. 2024a. Human Action Recognition with Multi-Level Granularity and Pair-Wise Hyper GCN. In 2024 IEEE 18th International Conference on Automatic Face and Gesture Recognition (FG). IEEE, 1--10."},{"key":"e_1_3_2_2_3_1","volume-title":"Iyyakutti Iyappan Ganapathi, Ahmad Ali, and Naoufel Werghi.","author":"Alsarhan Tamam","year":"2024","unstructured":"Tamam Alsarhan, Syed Sadaf Ali, Iyyakutti Iyappan Ganapathi, Ahmad Ali, and Naoufel Werghi. 2024b. PH-GCN: Boosting Human Action Recognition through Multi-Level Granularity with Pair-wise Hyper GCN. IEEE Access (2024)."},{"key":"e_1_3_2_2_4_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.procs.2024.09.363"},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2024.103974"},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"},{"key":"e_1_3_2_2_7_1","volume-title":"When spatial meets temporal in action recognition. arXiv preprint arXiv:2411.15284","author":"Chen Huilin","year":"2024","unstructured":"Huilin Chen, Lei Wang, Yifan Chen, Tom Gedeon, and Piotr Koniusz. 2024c. When spatial meets temporal in action recognition. arXiv preprint arXiv:2411.15284 (2024)."},{"key":"e_1_3_2_2_8_1","volume-title":"The 16th Asian Conference on Machine Learning (Conference Track).","author":"Chen Qixiang","unstructured":"Qixiang Chen, Lei Wang, Piotr Koniusz, and Tom Gedeon. [n.,d.]. Motion meets attention: Video motion prompts. In The 16th Asian Conference on Machine Learning (Conference Track)."},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681034"},{"key":"e_1_3_2_2_10_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2013.10.046"},{"key":"e_1_3_2_2_11_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ipm.2022.102950"},{"key":"e_1_3_2_2_12_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3379449"},{"key":"e_1_3_2_2_13_1","first-page":"2589","article-title":"ST-TGR","volume":"24","author":"Chen Zengzhao","year":"2024","unstructured":"Zengzhao Chen, Wenkai Huang, Hai Liu, Zhuo Wang, Yuqun Wen, and Shengming Wang. 2024b. ST-TGR: Spatio-Temporal Representation Learning for Skeleton-Based Teaching Gesture Recognition. Sensors, Vol. 24, 8 (2024), 2589.","journal-title":"Spatio-Temporal Representation Learning for Skeleton-Based Teaching Gesture Recognition. Sensors"},{"key":"e_1_3_2_2_14_1","unstructured":"Yan Cheng Chengxing Fang Jiawen Huang et al. [n. d.]. Spatiotemporal Action Detection Based on Fine-Grained. Chengxing and Huang Jiawen Spatiotemporal Action Detection Based on Fine-Grained ( [n. d.])."},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"publisher","DOI":"10.1109\/WACV45572.2020.9093639"},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10044-024-01319-3"},{"key":"e_1_3_2_2_17_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2022.104675"},{"key":"e_1_3_2_2_18_1","volume-title":"Lego: Learnable expansion of graph operators for multi-modal feature fusion. arXiv preprint arXiv:2410.01506","author":"Ding Dexuan","year":"2024","unstructured":"Dexuan Ding, Lei Wang, Liyun Zhu, Tom Gedeon, and Piotr Koniusz. 2024. Lego: Learnable expansion of graph operators for multi-modal feature fusion. arXiv preprint arXiv:2410.01506 (2024)."},{"key":"e_1_3_2_2_19_1","volume-title":"Do language models understand time? arXiv preprint arXiv:2412.13845","author":"Ding Xi","year":"2024","unstructured":"Xi Ding and Lei Wang. 2024a. Do language models understand time? arXiv preprint arXiv:2412.13845 (2024)."},{"key":"e_1_3_2_2_20_1","volume-title":"Anomaly Detection? LLMs and VLMs in the Spotlight. arXiv preprint arXiv:2412.18298","author":"Ding Xi","year":"2024","unstructured":"Xi Ding and Lei Wang. 2024b. Quo Vadis, Anomaly Detection? LLMs and VLMs in the Spotlight. arXiv preprint arXiv:2412.18298 (2024)."},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72940-9_23"},{"key":"e_1_3_2_2_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACPR.2015.7486569"},{"key":"e_1_3_2_2_23_1","volume-title":"Dg-stgcn: Dynamic spatial-temporal modeling for skeleton-based action recognition. arXiv preprint arXiv:2210.05895","author":"Duan Haodong","year":"2022","unstructured":"Haodong Duan, Jiaqi Wang, Kai Chen, and Dahua Lin. 2022a. Dg-stgcn: Dynamic spatial-temporal modeling for skeleton-based action recognition. arXiv preprint arXiv:2210.05895 (2022)."},{"key":"e_1_3_2_2_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548546"},{"key":"e_1_3_2_2_25_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-92659-5_17"},{"key":"e_1_3_2_2_26_1","volume-title":"A new adjacency matrix configuration in GCN-based models for skeleton-based action recognition. arXiv preprint arXiv:2206.14344","author":"Fang Zheng","year":"2022","unstructured":"Zheng Fang, Xiongwei Zhang, Tieyong Cao, Yunfei Zheng, and Meng Sun. 2022. A new adjacency matrix configuration in GCN-based models for skeleton-based action recognition. arXiv preprint arXiv:2206.14344 (2022)."},{"key":"e_1_3_2_2_27_1","doi-asserted-by":"publisher","DOI":"10.1109\/ISPA52656.2021.9552064"},{"key":"e_1_3_2_2_28_1","volume-title":"A comparative review of graph convolutional networks for human skeleton-based action recognition. Artificial Intelligence Review","author":"Feng Liqi","year":"2022","unstructured":"Liqi Feng, Yaqin Zhao, Wenxuan Zhao, and Jiaxi Tang. 2022. A comparative review of graph convolutional networks for human skeleton-based action recognition. Artificial Intelligence Review (2022), 1--31."},{"key":"e_1_3_2_2_29_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013558"},{"key":"e_1_3_2_2_30_1","doi-asserted-by":"publisher","DOI":"10.1109\/TETC.2022.3230912"},{"key":"e_1_3_2_2_31_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.patrec.2020.01.010"},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-021-02723-6"},{"key":"e_1_3_2_2_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3382117"},{"key":"e_1_3_2_2_34_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2012.2199502"},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"publisher","DOI":"10.1145\/3631444"},{"key":"e_1_3_2_2_36_1","volume-title":"Proceedings of the Asian Conference on Computer Vision. 1265--1281","author":"Hang Rui","year":"2022","unstructured":"Rui Hang and MinXian Li. 2022. Spatial-temporal adaptive graph convolutional network for skeleton-based action recognition. In Proceedings of the Asian Conference on Computer Vision. 1265--1281."},{"key":"e_1_3_2_2_37_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3051495"},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2023.120683"},{"key":"e_1_3_2_2_39_1","volume-title":"MAFormer: A cross-channel spatio-temporal feature aggregation method for human action recognition. AI Communications Preprint","author":"Huang Hongbo","year":"2024","unstructured":"Hongbo Huang, Longfei Xu, Yaolin Zheng, and Xiaoxu Yan. 2024. MAFormer: A cross-channel spatio-temporal feature aggregation method for human action recognition. AI Communications Preprint (2024), 1--15."},{"key":"e_1_3_2_2_40_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2023.106855"},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206795"},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3217763"},{"key":"e_1_3_2_2_43_1","volume-title":"Spatial-temporal interleaved network for efficient action recognition","author":"Jiang Shengqin","year":"2024","unstructured":"Shengqin Jiang, Haokui Zhang, Yuankai Qi, and Qingshan Liu. 2024. Spatial-temporal interleaved network for efficient action recognition. IEEE Transactions on Industrial Informatics (2024)."},{"key":"e_1_3_2_2_44_1","volume-title":"Human action recognition systems: A review of the trends and state-of-the-art","author":"Karim Misha","year":"2024","unstructured":"Misha Karim, Shah Khalid, Aliya Aleryani, Jawad Khan, Irfan Ullah, and Zafar Ali. 2024. Human action recognition systems: A review of the trends and state-of-the-art. IEEE Access (2024)."},{"key":"e_1_3_2_2_45_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01792"},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"publisher","DOI":"10.1145\/3637528.3671457"},{"key":"e_1_3_2_2_47_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3107160"},{"key":"e_1_3_2_2_48_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3061115"},{"key":"e_1_3_2_2_49_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00371"},{"key":"e_1_3_2_2_50_1","volume-title":"Skeleton-based action recognition through attention guided heterogeneous graph neural network. Knowledge-Based Systems","author":"Li Tianchen","year":"2024","unstructured":"Tianchen Li, Pei Geng, Xuequan Lu, Wanqing Li, and Lei Lyu. 2024b. Skeleton-based action recognition through attention guided heterogeneous graph neural network. Knowledge-Based Systems (2024), 112868."},{"key":"e_1_3_2_2_51_1","doi-asserted-by":"publisher","DOI":"10.3390\/electronics13224544"},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"publisher","DOI":"10.1002\/cav.2221"},{"key":"e_1_3_2_2_53_1","doi-asserted-by":"publisher","DOI":"10.1007\/s40747-024-01743-2"},{"key":"e_1_3_2_2_54_1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8967570"},{"key":"e_1_3_2_2_55_1","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681015"},{"key":"e_1_3_2_2_56_1","volume-title":"Ntu rgb d 120: A large-scale benchmark for 3d human activity understanding","author":"Liu Jun","year":"2019","unstructured":"Jun Liu, Amir Shahroudy, Mauricio Perez, Gang Wang, Ling-Yu Duan, and Alex C Kot. 2019b. Ntu rgb d 120: A large-scale benchmark for 3d human activity understanding. IEEE transactions on pattern analysis and machine intelligence, Vol. 42, 10 (2019), 2684--2701."},{"key":"e_1_3_2_2_57_1","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/109"},{"key":"e_1_3_2_2_58_1","volume-title":"GCN-based Multi-modality Fusion Network for Action Recognition","author":"Liu Shaocan","year":"2024","unstructured":"Shaocan Liu, Xingtao Wang, Ruiqin Xiong, and Xiaopeng Fan. 2024b. GCN-based Multi-modality Fusion Network for Action Recognition. IEEE Transactions on Multimedia (2024)."},{"key":"e_1_3_2_2_59_1","unstructured":"Yi Liu Ruyi Liu Yuzhi Hu Mengyao Wu Wentian Xin Qiguang Miao Shuai Wu and Long Li. [n. d.]. A Systematic Review of Skeleton-Based Action Recognition: Recent Advances Challenges and Future Directions. Challenges and Future Directions ( [n. d.])."},{"key":"e_1_3_2_2_60_1","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2023.3247075"},{"key":"e_1_3_2_2_61_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-60639-8_40"},{"key":"e_1_3_2_2_62_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-023-16001-9"},{"key":"e_1_3_2_2_63_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3391913"},{"key":"e_1_3_2_2_64_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3378886"},{"key":"e_1_3_2_2_65_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3201518"},{"key":"e_1_3_2_2_66_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3247820"},{"key":"e_1_3_2_2_67_1","volume-title":"Tracknetv4: Enhancing fast sports object tracking with motion attention maps. arXiv preprint arXiv:2409.14543","author":"Raj Arjun","year":"2024","unstructured":"Arjun Raj, Lei Wang, and Tom Gedeon. 2024. Tracknetv4: Enhancing fast sports object tracking with motion attention maps. arXiv preprint arXiv:2409.14543 (2024)."},{"key":"e_1_3_2_2_68_1","doi-asserted-by":"publisher","DOI":"10.34133\/cbsystems.0100"},{"key":"e_1_3_2_2_69_1","unstructured":"Zilaing Ren Li Luo Yong Qin Xiangyang Gao and Qieshi Zhang. [n. d.]. Skeleton-Guided and Supervised Learning of Hybrid Network for Multi-Modal Action Recognition. Available at SSRN 4970121 ( [n. d.])."},{"key":"e_1_3_2_2_70_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11431-023-2491-4"},{"key":"e_1_3_2_2_71_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.115"},{"key":"e_1_3_2_2_72_1","volume-title":"Syed Muhammad Shamsul Islam, and Naveed Akhtar","author":"Shaikh Muhammad Bilal","year":"2024","unstructured":"Muhammad Bilal Shaikh, Douglas Chai, Syed Muhammad Shamsul Islam, and Naveed Akhtar. 2024. From CNNs to Transformers in Multimodal Human Action Recognition: A Survey. ACM Transactions on Multimedia Computing, Communications and Applications (2024)."},{"key":"e_1_3_2_2_73_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00810"},{"key":"e_1_3_2_2_74_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01230"},{"key":"e_1_3_2_2_75_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00132"},{"key":"e_1_3_2_2_76_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3157033"},{"key":"e_1_3_2_2_77_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-023-17788-3"},{"key":"e_1_3_2_2_78_1","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3318325"},{"key":"e_1_3_2_2_79_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-023-17276-8"},{"key":"e_1_3_2_2_80_1","volume-title":"Attention is all you need. Advances in Neural Information Processing Systems","author":"Vaswani A","year":"2017","unstructured":"A Vaswani. 2017. Attention is all you need. Advances in Neural Information Processing Systems (2017)."},{"key":"e_1_3_2_2_81_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20500-2_32"},{"key":"e_1_3_2_2_82_1","volume-title":"Analysis and Evaluation of Kinect-based Action Recognition Algorithms. Master's thesis. School of the Computer Science and Software Engineering","author":"Wang Lei","unstructured":"Lei Wang. 2017. Analysis and Evaluation of Kinect-based Action Recognition Algorithms. Master's thesis. School of the Computer Science and Software Engineering, The University of Western Australia."},{"key":"e_1_3_2_2_83_1","volume-title":"Robust human action modelling. Ph.,D. Dissertation","author":"Wang Lei","unstructured":"Lei Wang. 2023. Robust human action modelling. Ph.,D. Dissertation. The Australian National University (Australia)."},{"key":"e_1_3_2_2_84_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2019.2925285"},{"key":"e_1_3_2_2_85_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2019.8803051"},{"key":"e_1_3_2_2_86_1","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475572"},{"key":"e_1_3_2_2_87_1","volume-title":"Proceedings of the Asian Conference on Computer Vision. 4176--4193","author":"Wang Lei","year":"2022","unstructured":"Lei Wang and Piotr Koniusz. 2022a. Temporal-viewpoint transportation plan for skeletal few-shot action recognition. In Proceedings of the Asian Conference on Computer Vision. 4176--4193."},{"key":"e_1_3_2_2_88_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19803-8_11"},{"key":"e_1_3_2_2_89_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00544"},{"key":"e_1_3_2_2_90_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446223"},{"key":"e_1_3_2_2_91_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00879"},{"key":"e_1_3_2_2_92_1","volume-title":"3D Skeleton-based Few-shot Action Recognition with JEANIE is not so Na'' ive. arXiv preprint arXiv:2112.12668","author":"Wang Lei","year":"2021","unstructured":"Lei Wang, Jun Liu, and Piotr Koniusz. 2021. 3D Skeleton-based Few-shot Action Recognition with JEANIE is not so Na'' ive. arXiv preprint arXiv:2112.12668 (2021)."},{"key":"e_1_3_2_2_93_1","volume-title":"Meet JEANIE: a Similarity Measure for 3D Skeleton Sequences via Temporal-Viewpoint Alignment. International Journal of Computer Vision","author":"Wang Lei","year":"2024","unstructured":"Lei Wang, Jun Liu, Liang Zheng, Tom Gedeon, and Piotr Koniusz. 2024a. Meet JEANIE: a Similarity Measure for 3D Skeleton Sequences via Temporal-Viewpoint Alignment. International Journal of Computer Vision (2024), 1--32."},{"key":"e_1_3_2_2_94_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446900"},{"key":"e_1_3_2_2_95_1","volume-title":"Taylor videos for action recognition. arXiv preprint arXiv:2402.03019","author":"Wang Lei","year":"2024","unstructured":"Lei Wang, Xiuyuan Yuan, Tom Gedeon, and Liang Zheng. 2024c. Taylor videos for action recognition. arXiv preprint arXiv:2402.03019 (2024)."},{"key":"e_1_3_2_2_96_1","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2024.3405712"},{"key":"e_1_3_2_2_97_1","unstructured":"Ying Wang Lu Zhang Jingliang Peng and Na Lv. [n. d.]. Motion-Centric Retrieval of 3d Human Skeleton and 2d Human Image Sequences. Available at SSRN 4925586 ( [n. d.])."},{"key":"e_1_3_2_2_98_1","doi-asserted-by":"publisher","DOI":"10.3390\/s22228738"},{"key":"e_1_3_2_2_99_1","doi-asserted-by":"publisher","DOI":"10.1109\/JSEN.2021.3089705"},{"key":"e_1_3_2_2_100_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-022-14193-0"},{"key":"e_1_3_2_2_101_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCSI.2023.3254610"},{"key":"e_1_3_2_2_102_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2024.111106"},{"key":"e_1_3_2_2_103_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2024.110427"},{"key":"e_1_3_2_2_104_1","volume-title":"Multi-stream network with key frame sampling for human action recognition. The Journal of Supercomputing","author":"Xia Limin","year":"2024","unstructured":"Limin Xia and Xin Wen. 2024. Multi-stream network with key frame sampling for human action recognition. The Journal of Supercomputing (2024), 1--31."},{"key":"e_1_3_2_2_105_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3246127"},{"key":"e_1_3_2_2_106_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2023.03.001"},{"key":"e_1_3_2_2_107_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-022-03589-y"},{"key":"e_1_3_2_2_108_1","doi-asserted-by":"publisher","DOI":"10.1049\/cvi2.12298"},{"key":"e_1_3_2_2_109_1","volume-title":"Hypergcn: A new method for training graph convolutional networks on hypergraphs. Advances in neural information processing systems","author":"Yadati Naganand","year":"2019","unstructured":"Naganand Yadati, Madhav Nimishakavi, Prateek Yadav, Vikram Nitin, Anand Louis, and Partha Talukdar. 2019. Hypergcn: A new method for training graph convolutional networks on hypergraphs. Advances in neural information processing systems, Vol. 32 (2019)."},{"key":"e_1_3_2_2_110_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12328"},{"key":"e_1_3_2_2_111_1","doi-asserted-by":"publisher","DOI":"10.1145\/3338533.3366569"},{"key":"e_1_3_2_2_112_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3129117"},{"key":"e_1_3_2_2_113_1","first-page":"4040","article-title":"ST-GCN human action recognition based on new partition strategy","volume":"29","author":"Shiqiang YANG","year":"2023","unstructured":"Shiqiang YANG, Zhuo LI, Jinhua WANG, Duo HE, Qi LI, and Dexin LI. 2023. ST-GCN human action recognition based on new partition strategy. Computer Integrated Manufacturing System, Vol. 29, 12 (2023), 4040.","journal-title":"Computer Integrated Manufacturing System"},{"key":"e_1_3_2_2_114_1","volume-title":"Expressive Keypoints for Skeleton-based Action Recognition via Skeleton Transformation. arXiv preprint arXiv:2406.18011","author":"Yang Yijie","year":"2024","unstructured":"Yijie Yang, Jinlu Zhang, Jiaxu Zhang, and Zhigang Tu. 2024. Expressive Keypoints for Skeleton-based Action Recognition via Skeleton Transformation. arXiv preprint arXiv:2406.18011 (2024)."},{"key":"e_1_3_2_2_115_1","volume-title":"Skeleton focused human activity recognition in rgb video. arXiv preprint arXiv:2004.13979","author":"Yu Bruce XB","year":"2020","unstructured":"Bruce XB Yu, Yan Liu, and Keith CC Chan. 2020. Skeleton focused human activity recognition in rgb video. arXiv preprint arXiv:2004.13979 (2020)."},{"key":"e_1_3_2_2_116_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2022.09.071"},{"key":"e_1_3_2_2_117_1","doi-asserted-by":"publisher","DOI":"10.1049\/cit2.12012"},{"key":"e_1_3_2_2_118_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2013.2240916"},{"key":"e_1_3_2_2_119_1","volume-title":"Learning with hypergraphs: Clustering, classification, and embedding. Advances in neural information processing systems","author":"Zhou Dengyong","year":"2006","unstructured":"Dengyong Zhou, Jiayuan Huang, and Bernhard Sch\u00f6lkopf. 2006. Learning with hypergraphs: Clustering, classification, and embedding. Advances in neural information processing systems, Vol. 19 (2006)."},{"key":"e_1_3_2_2_120_1","volume-title":"Hypergraph transformer for skeleton-based action recognition. arXiv preprint arXiv:2211.09590","author":"Zhou Yuxuan","year":"2022","unstructured":"Yuxuan Zhou, Zhi-Qi Cheng, Chao Li, Yanwen Fang, Yifeng Geng, Xuansong Xie, and Margret Keuper. 2022. Hypergraph transformer for skeleton-based action recognition. arXiv preprint arXiv:2211.09590 (2022)."},{"key":"e_1_3_2_2_121_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00200"},{"key":"e_1_3_2_2_122_1","volume-title":"YOWOv3: A Lightweight Spatio-Temporal Joint Network for Video Action Detection","author":"Zhu Anlei","year":"2024","unstructured":"Anlei Zhu, Yinghui Wang, Jinlong Yang, Tao Yan, Haomiao Ma, and Wei Li. 2024c. YOWOv3: A Lightweight Spatio-Temporal Joint Network for Video Action Detection. IEEE Transactions on Circuits and Systems for Video Technology (2024)."},{"key":"e_1_3_2_2_123_1","volume-title":"The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track.","author":"Zhu Liyun","unstructured":"Liyun Zhu, Lei Wang, Arjun Raj, Tom Gedeon, and Chen Chen. [n.,d.]. Advancing Video Anomaly Detection: A Concise Review and a New Dataset. In The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track."},{"key":"e_1_3_2_2_124_1","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3416732"},{"key":"e_1_3_2_2_125_1","volume-title":"Dynamical Attention Hypergraph Convolutional Network for Group Activity Recognition","author":"Zhu Xiaolin","year":"2024","unstructured":"Xiaolin Zhu, Dongli Wang, Jianxun Li, Rui Su, Qin Wan, and Yan Zhou. 2024b. Dynamical Attention Hypergraph Convolutional Network for Group Activity Recognition. IEEE Transactions on Neural Networks and Learning Systems (2024)."},{"key":"e_1_3_2_2_126_1","doi-asserted-by":"publisher","DOI":"10.1145\/3512527.3531367"},{"key":"e_1_3_2_2_127_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3230249"},{"key":"e_1_3_2_2_128_1","volume-title":"DSDC-GCN: Decoupled Static-Dynamic Co-occurrence Graph Convolutional Networks for Skeleton-Based Action Recognition","author":"Zhuang Tianming","year":"2024","unstructured":"Tianming Zhuang, Zhen Qin, Yi Ding, Zhiguang Qin, Ji Geng, Yi Liu, and Kim-Kwang Raymond Choo. 2024. DSDC-GCN: Decoupled Static-Dynamic Co-occurrence Graph Convolutional Networks for Skeleton-Based Action Recognition. IEEE Transactions on Circuits and Systems for Video Technology (2024)."}],"event":{"name":"WWW '25: The ACM Web Conference 2025","location":"Sydney NSW Australia","acronym":"WWW '25","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"]},"container-title":["Companion Proceedings of the ACM on Web Conference 2025"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3701716.3717739","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,7]],"date-time":"2025-10-07T18:30:14Z","timestamp":1759861814000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3701716.3717739"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,8]]},"references-count":128,"alternative-id":["10.1145\/3701716.3717739","10.1145\/3701716"],"URL":"https:\/\/doi.org\/10.1145\/3701716.3717739","relation":{},"subject":[],"published":{"date-parts":[[2025,5,8]]},"assertion":[{"value":"2025-05-23","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}