{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,12]],"date-time":"2025-02-12T05:31:37Z","timestamp":1739338297177,"version":"3.37.0"},"reference-count":34,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2024,11,14]],"date-time":"2024-11-14T00:00:00Z","timestamp":1731542400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,14]],"date-time":"2024-11-14T00:00:00Z","timestamp":1731542400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"DOI":"10.1007\/s11042-024-20407-4","type":"journal-article","created":{"date-parts":[[2024,11,14]],"date-time":"2024-11-14T11:43:58Z","timestamp":1731584638000},"page":"2805-2832","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Harmonizing space\u2013time dynamics for precision in human action recognition"],"prefix":"10.1007","volume":"84","author":[{"given":"Abdul","family":"Majid","sequence":"first","affiliation":[]},{"given":"Yulin","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Anwar","family":"Ullah","sequence":"additional","affiliation":[]},{"given":"Fahim","family":"Naiz","sequence":"additional","affiliation":[]},{"given":"Muhammad","family":"Zarar","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,14]]},"reference":[{"doi-asserted-by":"crossref","unstructured":"Zhang R, Yan X (2024) Video-language graph convolutional network for human action recognition. In Proc. ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), IEEE, Seoul, Korea, Republic of, 18 March 2024","key":"20407_CR1","DOI":"10.1109\/ICASSP48485.2024.10445852"},{"doi-asserted-by":"crossref","unstructured":"Wang Y, He S. Wei X, George SA (2022) Research on an effective human action recognition model based on 3D CNN. In 2022 15th International Congress on Image and Signal Processing, BioMedical Engineering and Informatics (CISP-BMEI) (pp. 1-6). IEEE, Beijing, China, 21 December 2022","key":"20407_CR2","DOI":"10.1109\/CISP-BMEI56279.2022.9980092"},{"doi-asserted-by":"crossref","unstructured":"Min L, Yang B (2023) Skeleton-based human action recognition in low-resolution infrared images. In 2023 IEEE 18th Conference on Industrial Electronics and Applications (ICIEA) (pp. 1363-1368). IEEE, Ningbo, China, 11 September 2023","key":"20407_CR3","DOI":"10.1109\/ICIEA58696.2023.10241907"},{"doi-asserted-by":"crossref","unstructured":"Kim JS (2022) Efficient human action recognition with dual-action neural networks for virtual sports training. In 2022 IEEE International Conference on Consumer Electronics-Asia (ICCE-Asia) (pp. 1-3). IEEE, Yeosu, Korea, Republic of, 28 November 2022","key":"20407_CR4","DOI":"10.1109\/ICCE-Asia57006.2022.9954758"},{"doi-asserted-by":"crossref","unstructured":"Li Z, Guo H, Chau LP, Tan CH, Ma X, Lin D, Yap KH (2023) Object-augmented skeleton-based action recognition. In 2023 IEEE 5th International Conference on Artificial Intelligence Circuits and Systems (AICAS) (pp. 1-4). IEEE, Hangzhou, China, 07 July 2023","key":"20407_CR5","DOI":"10.1109\/AICAS57966.2023.10168565"},{"doi-asserted-by":"crossref","unstructured":"Tanigawa R, Ishii Y (2024) hear-your-action: human action recognition by ultrasound active sensing. In ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) (pp. 7260-7264). IEEE, Seoul, Korea, Republic of, 18 March 2024","key":"20407_CR6","DOI":"10.1109\/ICASSP48485.2024.10447130"},{"doi-asserted-by":"crossref","unstructured":"Lee HW, Hsieh SL, Tsai MF (2023) Action recognition with multiple people using long short-term memory. In 2023 International Conference on Consumer Electronics-Taiwan (ICCE-Taiwan) (pp. 127-128). IEEE, PingTung, Taiwan, 31 August 2023","key":"20407_CR7","DOI":"10.1109\/ICCE-Taiwan58799.2023.10226656"},{"doi-asserted-by":"crossref","unstructured":"Nasaoui H, Bellamine I, Silkan H (2023) Improving human action recognition in videos with two-stream and self-attention module. In 2023 7th IEEE Congress on Information Science and Technology (CiSt) (pp. 215-220). IEEE, Agadir - Essaouira, Morocco, 05 February 2024","key":"20407_CR8","DOI":"10.1109\/CiSt56084.2023.10409877"},{"doi-asserted-by":"crossref","unstructured":"Le VD, Nghiem TL, Le TL (2023) Accurate continuous action and gesture recognition method based on skeleton and sliding windows techniques. In 2023 Asia Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC) (pp. 284-290). IEEE, Taipei, Taiwan, 20 November 2023","key":"20407_CR9","DOI":"10.1109\/APSIPAASC58517.2023.10317368"},{"doi-asserted-by":"crossref","unstructured":"Zhang Y, Hui J, Zhou T, Zhang K, Ding K, Wang W (2022) Efficient skeleton-based human assembly action recognition optimized by data augmentation. In 2022 5th World Conference on Mechanical Engineering and Intelligent Manufacturing (WCMEIM) (pp. 864-867). IEEE, Ma'anshan, China, 27 January 2023","key":"20407_CR10","DOI":"10.1109\/WCMEIM56910.2022.10021380"},{"doi-asserted-by":"crossref","unstructured":"Kilis N, Papaioannidis C, Mademlis I, Pitas I (2022) An efficient framework for human action recognition based on graph convolutional networks. In 2022 IEEE International Conference on Image Processing (ICIP) (pp. 1441-1445). IEEE, Bordeaux, France, 18 October 2022","key":"20407_CR11","DOI":"10.1109\/ICIP46576.2022.9897258"},{"doi-asserted-by":"crossref","unstructured":"Waheed S, Amin R, Iqbal J, Hussain M, Bashir MA (2023) An automated human action recognition and classification framework using deep learning. In 2023 4th International Conference on Computing, Mathematics and Engineering Technologies (iCoMET) (pp. 1-5). IEEE, Sukkur, Pakistan, 20 April 2023","key":"20407_CR12","DOI":"10.1109\/iCoMET57998.2023.10099190"},{"doi-asserted-by":"crossref","unstructured":"Park J, Kim J, Gil Y, Kim D (2024) DGU-HAO: A Dataset With Daily Life Objects for Comprehensive 3D Human Action Analysis. IEEE Access","key":"20407_CR13","DOI":"10.1109\/ACCESS.2024.3351888"},{"doi-asserted-by":"crossref","unstructured":"Li Y, Wang Y, Liang S, Yan S (2023) Human action recognition based on deep neural networks. In 2023 4th International Symposium on Computer Engineering and Intelligent Communications (ISCEIC) (pp. 207-210). IEEE, Nanjing, China, 09 October 2023","key":"20407_CR14","DOI":"10.1109\/ISCEIC59030.2023.10271183"},{"doi-asserted-by":"crossref","unstructured":"Benhamida L, Larabi S (2022) Human action recognition and coding based on skeleton data for visually impaired and blind people aid system. In 2022 First International Conference on Computer Communications and Intelligent Systems (I3CIS) (pp. 49-54). IEEE, Jijel, Algeria, 28 March 2023","key":"20407_CR15","DOI":"10.1109\/I3CIS56626.2022.10075662"},{"doi-asserted-by":"crossref","unstructured":"Zheng J, Xu Z, Li B (2022) An action recognition method based on the voxelization of point cloud from FMCW radar. In 2022 Cross Strait Radio Science & Wireless Technology Conference (CSRSWTC) (pp. 1-3). IEEE, Haidian, China, 17 April 2023","key":"20407_CR16","DOI":"10.1109\/CSRSWTC56224.2022.10098313"},{"doi-asserted-by":"crossref","unstructured":"Gupta D, Singh AK, Gupta N, Vishwakarma DK (2023) SDL-Net: a combined CNN & RNN human activity recognition model. In 2023 International Conference in Advances in Power, Signal, and Information Technology (APSIT) (pp. 1-5). IEEE, Bhubaneswar, India, 09 August 2023","key":"20407_CR17","DOI":"10.1109\/APSIT58554.2023.10201657"},{"doi-asserted-by":"crossref","unstructured":"Sathya R, Mythili M, Ananthi S, Asitha R, Vardhini VN, Shivaani M (2023) Intelligent video surveillance system for real time effective human action recognition using deep learning techniques. In 2023 2nd International Conference on Automation, Computing and Renewable Systems (ICACRS) (pp. 1826-1831). IEEE, Pudukkottai, India, 26 January 2024","key":"20407_CR18","DOI":"10.1109\/ICACRS58579.2023.10404670"},{"key":"20407_CR19","doi-asserted-by":"publisher","first-page":"42769","DOI":"10.1109\/ACCESS.2024.3378515","volume":"12","author":"M Karim","year":"2024","unstructured":"Karim M, Khalid S, Aleryani A, Tairan N, Ali Z, Ali F (2024) HADE: exploiting human action recognition through fine-tuned deep learning methods. IEEE Access 12:42769\u201342790","journal-title":"IEEE Access"},{"doi-asserted-by":"crossref","unstructured":"Jiang B, Tian Z, Li Z, Liu K (2023) A discriminative probability-based weighted algorithm for human action recognition. In 2023 IEEE 11th Asia-Pacific Conference on Antennas and Propagation (APCAP) (pp. 1-2). IEEE, Guangzhou, China, 21 March 2024","key":"20407_CR20","DOI":"10.1109\/APCAP59480.2023.10470028"},{"doi-asserted-by":"crossref","unstructured":"Wang J, Zhang T, Wu X, Zeng L (2023) A dataset and system for service robot action interaction based on skeleton action recognition. In 2023 8th International Conference on Signal and Image Processing (ICSIP) (pp. 41-48). IEEE, Wuxi, China, 09 October 2023","key":"20407_CR21","DOI":"10.1109\/ICSIP57908.2023.10271042"},{"key":"20407_CR22","doi-asserted-by":"publisher","first-page":"4989","DOI":"10.1109\/TIP.2023.3308750","volume":"32","author":"F Guo","year":"2023","unstructured":"Guo F, Jin T, Zhu S, Xi X, Wang W, Meng Q, Song W, Zhu J (2023) B2C-AFM: Bi-Directional Co-Temporal and cross-spatial attention fusion model for human action recognition. IEEE Trans Image Process 32:4989\u20135003","journal-title":"IEEE Trans Image Process"},{"key":"20407_CR23","doi-asserted-by":"publisher","first-page":"51930","DOI":"10.1109\/ACCESS.2023.3278974","volume":"11","author":"A Ghimire","year":"2023","unstructured":"Ghimire A, Kakani V, Kim H (2023) SSRT: a sequential skeleton RGB transformer to recognize fine-grained human-object interactions and action recognition. IEEE Access 11:51930\u201351948","journal-title":"IEEE Access"},{"doi-asserted-by":"crossref","unstructured":"Pei Y, Zhang X (2023) Research on human action recognition model based on computer laplacian matrix and convolutional neural network. In 2023 IEEE 6th International Conference on Information Systems and Computer Aided Education (ICISCAE) (pp. 1-6). IEEE, Dalian, China, 22 January 2024","key":"20407_CR24","DOI":"10.1109\/ICISCAE59047.2023.10392617"},{"doi-asserted-by":"crossref","unstructured":"Patil AA, Swaminathan A, Gayathri R (2022) Human action recognition using Skeleton features. In 2022 IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct) (pp. 289-296). IEEE, Singapore, Singapore, 15 December 2022","key":"20407_CR25","DOI":"10.1109\/ISMAR-Adjunct57072.2022.00066"},{"doi-asserted-by":"crossref","unstructured":"Choi B, An W, Kang H (2022) Human action recognition method using YOLO and OpenPose. In 2022 13th International Conference on Information and Communication Technology Convergence (ICTC) (pp. 1786-1788). IEEE, Jeju Island, Korea, Republic of, 25 November 2022","key":"20407_CR26","DOI":"10.1109\/ICTC55196.2022.9952808"},{"doi-asserted-by":"crossref","unstructured":"Zhang S, Jiang T, Ding X, Zhong Y, Jia H (2023) Federated learning-based framework for cross-environment human action recognition using Wi-Fi signal. In 2023 IEEE Globecom Workshops (GC Wkshps) (pp. 638-643). IEEE, Kuala Lumpur, Malaysia, 21 March 2024","key":"20407_CR27","DOI":"10.1109\/GCWkshps58843.2023.10465134"},{"doi-asserted-by":"crossref","unstructured":"Iodice F, De Momi E, Ajoudani A (2022) Hri30: an action recognition dataset for industrial human-robot interaction. In 2022 26th International Conference on Pattern Recognition (ICPR) (pp. 4941-4947). IEEE, Montreal, QC, Canada, 29 November 2022","key":"20407_CR28","DOI":"10.1109\/ICPR56361.2022.9956300"},{"doi-asserted-by":"crossref","unstructured":"Pandey S, Kumar N (2023) enhancing human action recognition in high-resolution videos using ConvLSTM and LRCN model. In 2023 4th International Conference on Communication, Computing and Industry 6.0 (C216) (pp. 1-5). IEEE, Bangalore, India, 19 February 2024","key":"20407_CR29","DOI":"10.1109\/C2I659362.2023.10430885"},{"doi-asserted-by":"crossref","unstructured":"Nair SAL, Megalingam RK (2022) Fusion of bag of visual words with neural network for human action recognition. In 2022 12th International Conference on Cloud Computing, Data Science & Engineering (Confluence) (pp. 14-19). IEEE, Noida, India, 21 March 2022","key":"20407_CR30","DOI":"10.1109\/Confluence52989.2022.9734221"},{"issue":"4","key":"20407_CR31","doi-asserted-by":"publisher","first-page":"1609","DOI":"10.1109\/TNNLS.2020.3043002","volume":"33","author":"J Liu","year":"2020","unstructured":"Liu J, Akhtar N, Mian A (2020) Adversarial attack on skeleton-based human action recognition. IEEE Trans Neural Netw Learn Syst 33(4):1609\u20131622","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"doi-asserted-by":"crossref","unstructured":"Liu Y, Cheng D, Zhang D, Xu S, Han J (2024) Capsule networks with residual pose routing.\u00a0IEEE Trans Neural Netw Learn Syst","key":"20407_CR32","DOI":"10.1109\/TNNLS.2023.3347722"},{"issue":"7","key":"20407_CR33","first-page":"3688","volume":"44","author":"Y Liu","year":"2021","unstructured":"Liu Y et al (2021) Part-object relational visual saliency. IEEE Trans Pattern Anal Mach Intell 44(7):3688\u20133704","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"doi-asserted-by":"crossref","unstructured":"Liu Y, Zhou L, Wu G, Xu S, Han J (2023) Tcgnet: Type-correlation guidance for salient object detection. IEEE Trans Intell Transp Syst","key":"20407_CR34","DOI":"10.1109\/TITS.2023.3342811"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-024-20407-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-024-20407-4\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-024-20407-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,11]],"date-time":"2025-02-11T21:31:03Z","timestamp":1739309463000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-024-20407-4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,14]]},"references-count":34,"journal-issue":{"issue":"5","published-online":{"date-parts":[[2025,2]]}},"alternative-id":["20407"],"URL":"https:\/\/doi.org\/10.1007\/s11042-024-20407-4","relation":{},"ISSN":["1573-7721"],"issn-type":[{"type":"electronic","value":"1573-7721"}],"subject":[],"published":{"date-parts":[[2024,11,14]]},"assertion":[{"value":"8 June 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 August 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 October 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 November 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declaration"}},{"value":"The authors declare that they have no conflicts of interest to report regarding the present study.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}