{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T01:00:43Z","timestamp":1764723643182,"version":"3.46.0"},"reference-count":23,"publisher":"Springer Science and Business Media LLC","issue":"16","license":[{"start":{"date-parts":[[2025,11,20]],"date-time":"2025-11-20T00:00:00Z","timestamp":1763596800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,11,20]],"date-time":"2025-11-20T00:00:00Z","timestamp":1763596800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61971253"],"award-info":[{"award-number":["61971253"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"National College Student Innovation and Entrepreneurship Training Pro-gram","award":["202410426014"],"award-info":[{"award-number":["202410426014"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1007\/s11760-025-04966-z","type":"journal-article","created":{"date-parts":[[2025,11,20]],"date-time":"2025-11-20T01:43:02Z","timestamp":1763602982000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["SAD-GCN: Semantic-aware deformable graph convolutional networks for human abnormal behavior recognition"],"prefix":"10.1007","volume":"19","author":[{"given":"Jiakang","family":"Dai","sequence":"first","affiliation":[]},{"given":"Yuping","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Yongping","family":"Zhu","sequence":"additional","affiliation":[]},{"given":"Mingliang","family":"Huo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,20]]},"reference":[{"issue":"10s","key":"4966_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3505244","volume":"54","author":"S Khan","year":"2022","unstructured":"Khan, S., Naseer, M., Hayat, M., Zamir, S.W., Khan, F.S., Shah, M.: Transformers in vision: A survey. ACM Comput. Surveys. 54(10s), 1\u201341 (2022). https:\/\/doi.org\/10.1145\/3505244","journal-title":"ACM Comput. Surveys"},{"key":"4966_CR2","unstructured":"Liu, H., Han, X., Jin, C., Qian, L., Wei, H., Lin, Z., et al.: Human motionformer: Transferring human motions with vision transformers, arXiv preprint arXiv:2302.11306, (2023)"},{"key":"4966_CR3","doi-asserted-by":"publisher","unstructured":"Patrick, M., Campbell, D., Asano, Y., et al.: Keeping your eye on the ball: Trajectory attention in video transformers, in Advances in Neural Information Processing Systems, vol. 34, pp. 12493\u2009\u2013\u200912406. (2021). https:\/\/doi.org\/10.48550\/arXiv.2106.05392","DOI":"10.48550\/arXiv.2106.05392"},{"key":"4966_CR4","doi-asserted-by":"publisher","unstructured":"Vemulapalli, R., Arrate, F., Chellappa, R.: Human Action Recognition by Representing 3D Skeletons as Points in a Lie Group, in: 2014 IEEE Conference on Computer Vision and Pattern Recognition, 2014, pp. 588\u2013595. https:\/\/doi.org\/10.1109\/CVPR.2014.82","DOI":"10.1109\/CVPR.2014.82"},{"key":"4966_CR5","doi-asserted-by":"publisher","first-page":"24","DOI":"10.1016\/j.jvcir.2013.04.007","volume":"25","author":"F Ofli","year":"2014","unstructured":"Ofli, F., Chaudhry, R., Kurillo, G., Vidal, R., Bajcsy, R.: Sequence of the most informative joints (SMIJ): A new representation for human skeletal action recognition. J. Vis. Commun. Image Represent. 25, 24\u201338 (2014). https:\/\/doi.org\/10.1016\/j.jvcir.2013.04.007","journal-title":"J. Vis. Commun. Image Represent."},{"key":"4966_CR6","doi-asserted-by":"publisher","unstructured":"Yong, D., Wang, W., Wang, L.: Hierarchical recurrent neural network for skeleton based action recognition, in: 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1110\u20131118. (2015). https:\/\/doi.org\/10.1109\/CVPR.2015.7298714","DOI":"10.1109\/CVPR.2015.7298714"},{"key":"4966_CR7","doi-asserted-by":"publisher","first-page":"1963","DOI":"10.1109\/TPAMI.2019.2896631","volume":"41","author":"P Zhang","year":"2019","unstructured":"Zhang, P., Lan, C., Xing, J., Zeng, W., Xue, J., Zheng, N.: View adaptive neural networks for high performance Skeleton-Based human action recognition. IEEE Trans. Pattern Anal. Mach. Intell. 41, 1963\u20131978 (2019). https:\/\/doi.org\/10.1109\/TPAMI.2019.2896631","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"4966_CR8","doi-asserted-by":"publisher","unstructured":"Wang, H., Wang, L.: Modeling Temporal Dynamics and Spatial Configurations of Actions Using Two-Stream Recurrent Neural Networks, in: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017, pp. 3633\u20133642. https:\/\/doi.org\/10.48550\/arXiv.1704.02581","DOI":"10.48550\/arXiv.1704.02581"},{"key":"4966_CR9","doi-asserted-by":"publisher","unstructured":"Chao, L., Qiaoyong, Z., Di, X., Shiliang, P.: Skeleton-based action recognition with convolutional neural networks, in: IEEE International Conference on Multimedia & Expo Workshops (ICMEW), 2017, pp. 597\u2013600. (2017). https:\/\/doi.org\/10.1109\/LSP.2017.2678539","DOI":"10.1109\/LSP.2017.2678539"},{"key":"4966_CR10","doi-asserted-by":"publisher","unstructured":"Kim, T.S., Reiter, A.: Interpretable 3D Human Action Analysis with Temporal Convolutional Networks, in: 2017 IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 1623\u20131631. (2017). https:\/\/doi.org\/10.48550\/arXiv.1704.04516","DOI":"10.48550\/arXiv.1704.04516"},{"key":"4966_CR11","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1705.08106","author":"H Liu","year":"2017","unstructured":"Liu, H., Tu, J., Liu, M.J.: Two-stream 3d convolutional neural network for skeleton-based action recognition. ArXiv Preprint arXiv:1705 08106. (2017). https:\/\/doi.org\/10.48550\/arXiv.1705.08106","journal-title":"ArXiv Preprint arXiv:1705 08106"},{"key":"4966_CR12","doi-asserted-by":"publisher","unstructured":"Ke, Q., Bennamoun, M., An, S., Sohel, F., Boussaid, F.: A New Representation of Skeleton Sequences for 3D Action Recognition, in: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017, pp. 4570\u20134579. (2017). https:\/\/doi.org\/10.1109\/CVPR.2017.486","DOI":"10.1109\/CVPR.2017.486"},{"key":"4966_CR13","doi-asserted-by":"publisher","first-page":"346","DOI":"10.1016\/j.patcog.2017.02.030","volume":"68","author":"M Liu","year":"2017","unstructured":"Liu, M., Liu, H., Chen, C.: Enhanced skeleton visualization for view invariant human action recognition. Pattern Recogn. 68, 346\u2013362 (2017). https:\/\/doi.org\/10.1016\/j.patcog.2017.02.030","journal-title":"Pattern Recogn."},{"key":"4966_CR14","doi-asserted-by":"publisher","unstructured":"Li, C., Zhong, Q., Xie, D., Pu, S.: Skeleton-based action recognition with convolutional neural networks, in: IEEE international conference on multimedia & expo workshops (ICMEW), IEEE, 2017, pp. 597\u2013600. (2017). https:\/\/doi.org\/10.1109\/LSP.2017.2678539","DOI":"10.1109\/LSP.2017.2678539"},{"key":"4966_CR15","doi-asserted-by":"publisher","unstructured":"Li, B., Dai, Y., Cheng, X., Chen, H., Lin, Y., He, M.: Skeleton based action recognition using translation-scale invariant image mapping and multi-scale deep CNN, in: IEEE International Conference on Multimedia & Expo Workshops (ICMEW), IEEE, 2017, pp. 601\u2013604. (2017). https:\/\/doi.org\/10.1109\/ICMEW.2017.8026282","DOI":"10.1109\/ICMEW.2017.8026282"},{"key":"4966_CR16","doi-asserted-by":"publisher","unstructured":"Xu, K., Ye, F., Zhong, Q., Xie, D.: Topology-aware convolutional neural network for efficient skeleton-based action recognition, in: Proceedings of the AAAI Conference on Artificial Intelligence, 2022, pp. 2866\u20132874. https:\/\/doi.org\/10.48550\/arXiv.2112.04178","DOI":"10.48550\/arXiv.2112.04178"},{"key":"4966_CR17","doi-asserted-by":"publisher","unstructured":"Yan, S., Xiong, Y., Lin, D.: Spatial temporal graph convolutional networks for skeleton-based action recognition, in: Proceedings of the AAAI conference on artificial intelligence, pp. 7444\u20137452. (2018). https:\/\/doi.org\/10.48550\/arXiv.1801.07455","DOI":"10.48550\/arXiv.1801.07455"},{"key":"4966_CR18","doi-asserted-by":"publisher","unstructured":"Chen, Y., Zhang, Z., Yuan, C., Li, B., Deng, Y., Hu, W.: Channel-wise Topology Refinement Graph Convolution for Skeleton-Based Action Recognition, in: 2021 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 13339\u201313348. (2021). https:\/\/doi.org\/10.48550\/arXiv.2107.12213","DOI":"10.48550\/arXiv.2107.12213"},{"key":"4966_CR19","doi-asserted-by":"publisher","unstructured":"Crasto, N., Weinzaepfel, P., Alahari, K., Schmid, C.: MARS: Motion-Augmented RGB Stream for Action Recognition, in: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, Long Beach, CA, USA, pp. 7874\u20137883. (2019). https:\/\/doi.org\/10.1109\/CVPR.2019.00807","DOI":"10.1109\/CVPR.2019.00807"},{"key":"4966_CR20","doi-asserted-by":"publisher","unstructured":"Zhou, B., Wang, P., Wan, J., Liang, Y., Wang, F.: A unified multimodal de-and re-coupling framework for RGB-D motion recognition, in: Proceedings of the IEEE Transactions on Pattern Analysis and Machine Intelligence, pp. 11428\u201311442. (2023). https:\/\/doi.org\/10.48550\/arXiv.2211.09146","DOI":"10.48550\/arXiv.2211.09146"},{"key":"4966_CR21","doi-asserted-by":"publisher","unstructured":"Xie, J., Zhao, Y., Meng, Y., Zhao, H., Nguyen, A., Zheng, Y.: Are Spatial-Temporal Graph Convolution Networks for Human Action Recognition Over-Parameterized? In Proceedings of the Computer Vision and Pattern Recognition Conference (CVPR), pp. 24309\u201324319. (2025). https:\/\/doi.org\/10.48550\/arXiv.2505.10679","DOI":"10.48550\/arXiv.2505.10679"},{"key":"4966_CR22","doi-asserted-by":"publisher","unstructured":"Duan, H., Wang, J., Chen, K., Lin, D.: Pyskl: Towards good practices for skeleton action recognition, in: Proceedings of the 30th ACM International Conference on Multimedia, 2022, pp. 7351\u20137354. https:\/\/doi.org\/10.48550\/arXiv.2205.09443","DOI":"10.48550\/arXiv.2205.09443"},{"key":"4966_CR23","doi-asserted-by":"publisher","first-page":"10134","DOI":"10.1038\/s41598-024-60598-2","volume":"14","author":"B Wang","year":"2024","unstructured":"Wang, B., Cai, B., Sheng, J., Jiao, W.J.S.R.: AAGCN: A graph convolutional neural network with adaptive feature and topology learning. Sci. Rep. 14, 10134 (2024). https:\/\/doi.org\/10.1038\/s41598-024-60598-2","journal-title":"Sci. Rep."}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-025-04966-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-025-04966-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-025-04966-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T00:55:47Z","timestamp":1764723347000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-025-04966-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,20]]},"references-count":23,"journal-issue":{"issue":"16","published-print":{"date-parts":[[2025,12]]}},"alternative-id":["4966"],"URL":"https:\/\/doi.org\/10.1007\/s11760-025-04966-z","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"type":"print","value":"1863-1703"},{"type":"electronic","value":"1863-1711"}],"subject":[],"published":{"date-parts":[[2025,11,20]]},"assertion":[{"value":"2 July 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 October 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 November 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 November 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"1377"}}