{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,17]],"date-time":"2026-01-17T02:40:12Z","timestamp":1768617612328,"version":"3.49.0"},"reference-count":46,"publisher":"Springer Science and Business Media LLC","issue":"10","license":[{"start":{"date-parts":[[2025,2,7]],"date-time":"2025-02-07T00:00:00Z","timestamp":1738886400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,2,7]],"date-time":"2025-02-07T00:00:00Z","timestamp":1738886400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2025,8]]},"DOI":"10.1007\/s00371-025-03816-w","type":"journal-article","created":{"date-parts":[[2025,2,7]],"date-time":"2025-02-07T03:46:02Z","timestamp":1738899962000},"page":"7447-7458","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["STIG-Net: a spatial\u2013temporal interactive graph framework for recognizing violent behaviors in videos"],"prefix":"10.1007","volume":"41","author":[{"given":"Xinbiao","family":"Lu","sequence":"first","affiliation":[]},{"given":"Yisen","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Yudan","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Xing","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Tieliu","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Guiyun","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,7]]},"reference":[{"key":"3816_CR1","doi-asserted-by":"publisher","unstructured":"Islam, Z., Rukonuzzaman, M., Ahmed, R., Kabir, M., Farazi, M.: Efficient two-stream network for violence detection using separable convolutional LSTM. In: Proceedings of 2021 International Joint Conference on Neural Networks (IJCNN), Shenzhen, China, pp. 1\u20138, (2021) https:\/\/doi.org\/10.1109\/IJCNN52387.2021.9534280","DOI":"10.1109\/IJCNN52387.2021.9534280"},{"key":"3816_CR2","doi-asserted-by":"publisher","unstructured":"Joe, Y., Ng, Matthew, H., Sudheendra, V.: Beyond short snippets: deep networks for video classification. In: Proceedings of the IEEE Conference, pp. 4694\u20134702, (2015) https:\/\/doi.org\/10.1109\/CVPR.2015.7299101","DOI":"10.1109\/CVPR.2015.7299101"},{"issue":"63","key":"3816_CR3","first-page":"933","volume":"21","author":"AS Ke\u00e7eli","year":"2019","unstructured":"Ke\u00e7eli, A.S., Kaya, A.: Video g\u00f6r\u00fcnt\u00fclerinde \u015fiddet i\u00e7eren aktivitelerin lstm a\u011f\u0131 ile tespiti. Dokuz Eyl\u00fcl \u00dcniversitesi M\u00fchendislik Fak\u00fcltesi Fen ve M\u00fchendislik Dergisi 21(63), 933\u2013939 (2019)","journal-title":"Dokuz Eyl\u00fcl \u00dcniversitesi M\u00fchendislik Fak\u00fcltesi Fen ve M\u00fchendislik Dergisi"},{"key":"3816_CR4","unstructured":"Alexey, D., Lucas, B., Alexander, K., Dirk, W. et al.: An image is worth 16x16 words: Transformers for image recognition at scale, arXiv preprint arXiv:2010.11929, 2020."},{"key":"3816_CR5","unstructured":"Joao, C., Andrew, Z.: Quo vadis, action recognition? New model and the kinetics dataset. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp.6299\u20136308 (2017)"},{"key":"3816_CR6","doi-asserted-by":"crossref","unstructured":"Du. T., Lubomir, B., Rob, F., Lorenzo, T., and Manohar, P.: Learning spatiotemporal features with 3d convolutional networks. In: Proceedings of the IEEE international conference on computer vision, pp. 4489\u20134497 (2015)","DOI":"10.1109\/ICCV.2015.510"},{"key":"3816_CR7","unstructured":"Du, T., Heng, W., Lorenzo, T., Jamie, R., Yann, L., and Manohar, P.: A closer look at spatiotemporal convolutions for action recognition. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pp. 6450\u20136459 (2018)"},{"key":"3816_CR8","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1007\/s11760-022-02213-3","volume":"17","author":"AS Keceli","year":"2023","unstructured":"Keceli, A.S., Kaya, A.: Violent activity classification with transferred deep features and 3d-Cnn. SIViP 17, 139\u2013146 (2023). https:\/\/doi.org\/10.1007\/s11760-022-02213-3","journal-title":"SIViP"},{"key":"3816_CR9","doi-asserted-by":"crossref","unstructured":"Giannakopoulos, T., Makris, A., Kosmopoulos, D., Perantonis, S.,Theodoridis, S.: Audio-visual fusion for detecting violent scenes in videos. In: Hellenic Conference on Artificial Intelligence, pp. 91\u2013100. Springer (2010)","DOI":"10.1007\/978-3-642-12842-4_13"},{"issue":"1","key":"3816_CR10","doi-asserted-by":"publisher","first-page":"48","DOI":"10.1186\/s40537-019-0212-5","volume":"6","author":"G Sreenu","year":"2019","unstructured":"Sreenu, G., Durai, M.S.: Intelligent video surveillance: a review through deep learning techniques for crowd analysis. J. Big Data 6(1), 48 (2019)","journal-title":"J. Big Data"},{"issue":"6","key":"3816_CR11","first-page":"113","volume":"7","author":"J Baek","year":"2017","unstructured":"Baek, J., Lee, D., Hong, C., Ahn, B.: Multimodal approach for blocking obscene and violent contents. J. Converg. Inf. Technol. 7(6), 113\u2013121 (2017)","journal-title":"J. Converg. Inf. Technol."},{"issue":"5","key":"3816_CR12","doi-asserted-by":"publisher","first-page":"1410","DOI":"10.1109\/TCSVT.2019.2902937","volume":"30","author":"B Sheng","year":"2020","unstructured":"Sheng, B., et al.: Illumination-invariant video cut-out using octagon sensitive optimization. IEEE Trans. Circuits Syst. Video Technol. 30(5), 1410\u20131422 (2020)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"7\u20138","key":"3816_CR13","first-page":"4661","volume":"79","author":"L Chao","year":"2020","unstructured":"Chao, L., et al.: Video flickering removal using temporal reconstruction optimization. Multim. Tools Appl. 79(7\u20138), 4661\u20134679 (2020)","journal-title":"Multim. Tools Appl."},{"key":"3816_CR14","doi-asserted-by":"crossref","unstructured":"Cui, Y., et al.: Selective frequency network for image restoration. International Conference on Learning Representations (2023)","DOI":"10.1109\/ICCV51070.2023.01195"},{"key":"3816_CR15","first-page":"6545","volume":"202","author":"Y Cui","year":"2023","unstructured":"Cui, Y., Ren, W., Yang, S., Cao, X., Knoll, A.: IRNeXt: rethinking convolutional network design for image restoration. Proc. Int. Conf. Mach. Learn. (ICML) 202, 6545\u20136564 (2023)","journal-title":"Proc. Int. Conf. Mach. Learn. (ICML)"},{"issue":"2","key":"3816_CR16","first-page":"1426","volume":"38","author":"Y Cui","year":"2024","unstructured":"Cui, Y., Ren, W., Knoll, A.: Omni-kernel network for image restoration. Proc AAAI Conf Artif Intell 38(2), 1426\u20131434 (2024)","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"3816_CR17","doi-asserted-by":"publisher","first-page":"1187","DOI":"10.1007\/s11760-021-02069-z","volume":"16","author":"X Lin","year":"2022","unstructured":"Lin, X., Li, Y., Zhou, Y., et al.: FSR: a feature self-regulation network for partially occluded hand pose estimation. SIViP 16, 1187\u20131195 (2022). https:\/\/doi.org\/10.1007\/s11760-021-02069-z","journal-title":"SIViP"},{"key":"3816_CR18","doi-asserted-by":"publisher","DOI":"10.1007\/s11760-023-02764-z","author":"F Aghabeigi","year":"2023","unstructured":"Aghabeigi, F., Nazari, S., Osati Eraghi, N.: An optimized facial emotion recognition architecture based on a deep convolutional neural network and genetic algorithm. SIViP (2023). https:\/\/doi.org\/10.1007\/s11760-023-02764-z","journal-title":"SIViP"},{"key":"3816_CR19","doi-asserted-by":"publisher","unstructured":"Su, Y., Lin, G., Zhu, J., Wu.: Human interaction learning on 3D skeleton point clouds for video violence recognition. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, JM. (eds). In: Proceedings of Computer Vision\u2013ECCV 2020. ECCV 2020. Lecture Notes in Computer Science, vol. 12349. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-030-58548-8_5","DOI":"10.1007\/978-3-030-58548-8_5"},{"key":"3816_CR20","unstructured":"Mathias, N., Mohamed, A., and Konstantin, K.: Learning convolutional neural networks for graphs. In: International conference on machine learning, pp. 2014\u20132023 (2016)"},{"key":"3816_CR21","unstructured":"Micha\u00ebl, D., Xavier, B., and Pierre, V.: Convolutional neural networks on graphs with fast localized spectral filtering. In: Advances in Neural Information Processing Systems, pp. 3844\u20133852 (2016)"},{"key":"3816_CR22","unstructured":"Petar, V., Guillem, C., Arantxa, C., Adriana, R., Pietro, L., and Yoshua, B.: Graph attention networks, arXiv preprintarXiv:1710.10903, (2017)"},{"key":"3816_CR23","doi-asserted-by":"publisher","unstructured":"Brody, S., Alon, U., Yahav, E.: How attentive are graph attention networks?. https:\/\/doi.org\/10.48550\/arXiv.2105.14491 (2021)","DOI":"10.48550\/arXiv.2105.14491"},{"key":"3816_CR24","unstructured":"Jonas, G., Michael, A., David, G., and Yann, N.: A convolutional encoder model for neural machine translation, arXiv preprint arXiv:1611.02344 (2016)"},{"key":"3816_CR25","doi-asserted-by":"crossref","unstructured":"Yan, S., Xiong, Y., & Lin, D.: Spatial temporal graph convolutional networks for skeleton-based action recognition. In: Proceedings of AAAI Conference on Artificial Intelligence (2018)","DOI":"10.1609\/aaai.v32i1.12328"},{"key":"3816_CR26","doi-asserted-by":"crossref","unstructured":"Tang, Y., Tian, Y., Lu, J., et al.: Deep Progressive Reinforcement Learning for Skeleton Based Action Recognition. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition. Salt Lake City, UT, USA: IEEE, 5323\u20135332 (2018)","DOI":"10.1109\/CVPR.2018.00558"},{"issue":"6","key":"3816_CR27","doi-asserted-by":"publisher","first-page":"1165","DOI":"10.1587\/transinf.2022EDP7182","volume":"106","author":"J Xu","year":"2023","unstructured":"Xu, J., Komorita, S., Kawamura, K.: FSPose: a heterogeneous framework with fast and slow networks for human pose estimation in videos. IEICE Trans. Inf. Syst. 106(6), 1165\u20131174 (2023). https:\/\/doi.org\/10.1587\/transinf.2022EDP7182","journal-title":"IEICE Trans. Inf. Syst."},{"key":"3816_CR28","unstructured":"Ravanbakhsh, M., Mousavi, H., Rastegari, M., et al.: Action recognition with image based CNN features. (2015). arXiv preprint arXiv:1512.03980"},{"key":"3816_CR29","doi-asserted-by":"crossref","unstructured":"Dong, Z., Qin, J., Wang, Y.: Multi-stream deep networks for person to person violence detection in videos. In: Chinese Conference on Pattern Recognition. Springer Singapore, (2016)","DOI":"10.1007\/978-981-10-3002-4_43"},{"key":"3816_CR30","doi-asserted-by":"publisher","unstructured":"Pan, H., et al.: Fighting detection based on pedestrian pose estimation. In: 2018 11th International Congress on Image and Signal Processing, BioMedical Engineering and Informatics (CISP-BMEI), Beijing, China, pp. 1\u20135, (2018) https:\/\/doi.org\/10.1109\/CISP-BMEI.2018.8633057","DOI":"10.1109\/CISP-BMEI.2018.8633057"},{"key":"3816_CR31","doi-asserted-by":"crossref","unstructured":"Sijie, Y., Yuanjun, X., Dahua, L.: Spatial temporal graph convolutional networks for skeleton-based action recognition. Proc. AAAI Conf. Artif. Intell., 32 (2018)","DOI":"10.1609\/aaai.v32i1.12328"},{"key":"3816_CR32","doi-asserted-by":"publisher","unstructured":"Yong, D., Wang, W., Wang, L.: Hierarchical recurrent neural network for skeleton based action recognition. In: 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Boston, MA, USA, pp. 1110\u20131118, (2015) https:\/\/doi.org\/10.1109\/CVPR.2015.7298714","DOI":"10.1109\/CVPR.2015.7298714"},{"issue":"9","key":"3816_CR33","doi-asserted-by":"publisher","first-page":"12130","DOI":"10.1109\/TNNLS.2023.3252172","volume":"35","author":"X Gao","year":"2024","unstructured":"Gao, X., Yang, Y., Wu, Y., Du, S.: Glimpse and focus: global and local-scale graph convolution network for skeleton-based action recognition. IEEE Trans. Neural Netw. Learn. Syst. 35(9), 12130\u201312141 (2024)","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"issue":"6","key":"3816_CR34","doi-asserted-by":"publisher","first-page":"3316","DOI":"10.1109\/TPAMI.2021.3053765","volume":"44","author":"M Li","year":"2022","unstructured":"Li, M., Chen, S., Chen, X., Zhang, Y., Wang, Y., Tian, Q.: Symbiotic graph neural networks for 3D skeleton-based human action recognition and motion prediction. IEEE Trans. Pattern Anal. Mach. Intell. 44(6), 3316\u20133333 (2022)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"3816_CR35","doi-asserted-by":"crossref","unstructured":"Fang, H., Xie, S., Tai, Y.W., Lu, C.: RMPE: regional multi-person pose estimation. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2334\u20132343 (2017)","DOI":"10.1109\/ICCV.2017.256"},{"key":"3816_CR36","unstructured":"Enrique, B., Oscar, D., Gloria, B., and Rahul, S.: Hockey fight detection dataset. In: Computer Analysis of Images and Patterns. Springer, pp. 332\u2013339 (2011)"},{"key":"3816_CR37","doi-asserted-by":"publisher","DOI":"10.1016\/j.dib.2020.106587","volume":"33","author":"B Miriana","year":"2020","unstructured":"Miriana, B., Nicola, F., Paolo, S., Selene, T., Paolo, C., Mara, L., Aldo, F.: A dataset for automatic violence detection in videos. Data Brief 33, 106587 (2020)","journal-title":"Data Brief"},{"key":"3816_CR38","doi-asserted-by":"crossref","unstructured":"Akt\u0131, \u015e., Tataro\u011flu, G., Ekenel, H.: Vision-based fight detection from surveillance cameras. In: 2019 9th International Conference on Image Processing Theory, Tools and Applications (IPTA). IEEE, 1\u20136, (2019)","DOI":"10.1109\/IPTA.2019.8936070"},{"key":"3816_CR39","doi-asserted-by":"publisher","unstructured":"Cheng, M., Cai, K., Li, M.: RWF-2000: An open large scale video database for violence detection. In: International Conference on Pattern Recognition. IEEE Computer Society, (2021). https:\/\/doi.org\/10.1109\/ICPR48806.2021.9412502.","DOI":"10.1109\/ICPR48806.2021.9412502"},{"key":"3816_CR40","doi-asserted-by":"crossref","unstructured":"Hassner, T., Itcher, Y., Kliper-Gross, O.: Violent flows: Real-time detection of violent crowd behavior. In: Computer Vision and Pattern Recognition Workshops (CVPRW), IEEE computer society conference on 2012, pp. 1\u20136. IEEE (2012)","DOI":"10.1109\/CVPRW.2012.6239348"},{"key":"3816_CR41","unstructured":"Mohammadreza, Z., Kamaljeet, S., and Thomas, B.: Eco: Efficient convolutional network for online video understanding. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 695\u2013712 (2018)"},{"key":"3816_CR42","doi-asserted-by":"publisher","unstructured":"Wang, W., Dong, S., Zou, K., et al.: A lightweight network for violence detection. In: Proceedings of the 5th International Conference on Image and Graphics Processing (ICIGP \u201822). Association for Computing Machinery, New York, NY, USA, 15\u201321, (2022). https:\/\/doi.org\/10.1145\/3512388.3512391","DOI":"10.1145\/3512388.3512391"},{"key":"3816_CR43","doi-asserted-by":"publisher","first-page":"103739","DOI":"10.1016\/j.cviu.2023.103739","volume":"233","author":"G Guillermo","year":"2023","unstructured":"Guillermo, G., SanMiguel, C.J.: Human skeletons and change detection for efficient violence detection in surveillance videos. Comput. Vis. Image Underst. 233, 103739 (2023). https:\/\/doi.org\/10.1016\/j.cviu.2023.103739","journal-title":"Comput. Vis. Image Underst."},{"key":"3816_CR44","doi-asserted-by":"publisher","DOI":"10.1145\/3326362","author":"Y Wang","year":"2019","unstructured":"Wang, Y., Sun, Y., Liu, Z., et al.: Dynamic graph CNN for learning on point clouds. Assoc. Comput. Mach. (ACM) (2019). https:\/\/doi.org\/10.1145\/3326362","journal-title":"Assoc. Comput. Mach. (ACM)"},{"key":"3816_CR45","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1706.02413","author":"C Qi","year":"2017","unstructured":"Qi, C., Yi, L., Su, H., et al.: PointNet++: deep hierarchical feature learning on point dets in a metric space. Adv Neural Inf ProcessSyst (2017). https:\/\/doi.org\/10.48550\/arXiv.1706.02413","journal-title":"Adv Neural Inf ProcessSyst"},{"key":"3816_CR46","doi-asserted-by":"publisher","unstructured":"Wu, W., Qi, Z., Fuxin, L.: PointConv: deep convolutional networks on 3D point clouds. (2018) https:\/\/doi.org\/10.48550\/arXiv.1811.07246.","DOI":"10.48550\/arXiv.1811.07246"}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-03816-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-025-03816-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-03816-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T04:45:51Z","timestamp":1757133951000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-025-03816-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,2,7]]},"references-count":46,"journal-issue":{"issue":"10","published-print":{"date-parts":[[2025,8]]}},"alternative-id":["3816"],"URL":"https:\/\/doi.org\/10.1007\/s00371-025-03816-w","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"value":"0178-2789","type":"print"},{"value":"1432-2315","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,2,7]]},"assertion":[{"value":"17 January 2025","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 February 2025","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}