{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,14]],"date-time":"2026-02-14T07:32:42Z","timestamp":1771054362445,"version":"3.50.1"},"reference-count":75,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2025,2,6]],"date-time":"2025-02-06T00:00:00Z","timestamp":1738800000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,2,6]],"date-time":"2025-02-06T00:00:00Z","timestamp":1738800000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Complex Intell. Syst."],"published-print":{"date-parts":[[2025,3]]},"DOI":"10.1007\/s40747-024-01774-9","type":"journal-article","created":{"date-parts":[[2025,2,6]],"date-time":"2025-02-06T07:04:56Z","timestamp":1738825496000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Manet: motion-aware network for video action recognition"],"prefix":"10.1007","volume":"11","author":[{"given":"Xiaoyang","family":"Li","sequence":"first","affiliation":[]},{"given":"Wenzhu","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Kanglin","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Tiebiao","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Chen","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,6]]},"reference":[{"key":"1774_CR1","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2020.106605","volume":"212","author":"M Zhang","year":"2021","unstructured":"Zhang M, Tian G, Zhang Y, Duan P (2021) Service skill improvement for home robots: autonomous generation of action sequence based on reinforcement learning. Knowl-Based Syst 212:106605. https:\/\/doi.org\/10.1016\/j.knosys.2020.106605","journal-title":"Knowl-Based Syst"},{"key":"1774_CR2","doi-asserted-by":"publisher","unstructured":"Wang Z, She Q, Smolic A (2021) Action-net: Multipath excitation for action recognition. In: 2021 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 13209\u201313218. https:\/\/doi.org\/10.1109\/CVPR46437.2021.01301","DOI":"10.1109\/CVPR46437.2021.01301"},{"issue":"9","key":"1774_CR3","doi-asserted-by":"publisher","first-page":"3059","DOI":"10.1007\/s13042-023-01820-x","volume":"14","author":"Z Jiang","year":"2023","unstructured":"Jiang Z, Zhang Y, Hu S (2023) Esti: an action recognition network with enhanced spatio-temporal information. Int J Mach Learn Cybern 14(9):3059\u20133070. https:\/\/doi.org\/10.1007\/s13042-023-01820-x","journal-title":"Int J Mach Learn Cybern"},{"issue":"3","key":"1774_CR4","doi-asserted-by":"publisher","first-page":"1250","DOI":"10.1109\/TCSVT.2021.3077512","volume":"32","author":"H Wu","year":"2022","unstructured":"Wu H, Ma X, Li Y (2022) Spatiotemporal multimodal learning with 3d cnns for video action recognition. IEEE Trans Circuits Syst Video Technol 32(3):1250\u20131261. https:\/\/doi.org\/10.1109\/TCSVT.2021.3077512","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"issue":"3","key":"1774_CR5","doi-asserted-by":"publisher","first-page":"1498","DOI":"10.1109\/TCSVT.2021.3076165","volume":"32","author":"J Cheng","year":"2022","unstructured":"Cheng J, Ren Z, Zhang Q, Gao X, Hao F (2022) Cross-modality compensation convolutional neural networks for rgb-d action recognition. IEEE Trans Circuits Syst Video Technol 32(3):1498\u20131509. https:\/\/doi.org\/10.1109\/TCSVT.2021.3076165","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"1774_CR6","doi-asserted-by":"publisher","first-page":"32","DOI":"10.1016\/j.patcog.2018.01.020","volume":"79","author":"Z Tu","year":"2018","unstructured":"Tu Z, Xie W, Qin Q, Poppe R, Veltkamp RC, Li B, Yuan J (2018) Multi-stream cnn: Learning representations based on human-related regions for action recognition. Pattern Recogn 79:32\u201343. https:\/\/doi.org\/10.1016\/j.patcog.2018.01.020","journal-title":"Pattern Recogn"},{"key":"1774_CR7","doi-asserted-by":"publisher","unstructured":"Karpathy A, Toderici G, Shetty S, Leung T, Sukthankar R, Fei-Fei L (2014) Large-scale video classification with convolutional neural networks. In: 2014 IEEE conference on computer vision and pattern recognition, pp 1725\u20131732. https:\/\/doi.org\/10.1109\/CVPR.2014.223","DOI":"10.1109\/CVPR.2014.223"},{"issue":"3","key":"1774_CR8","doi-asserted-by":"publisher","first-page":"748","DOI":"10.1109\/TCSVT.2019.2896029","volume":"30","author":"X Song","year":"2020","unstructured":"Song X, Lan C, Zeng W, Xing J, Sun X, Yang J (2020) Temporal-spatial mapping for action recognition. IEEE Trans Circuits Syst Video Technol 30(3):748\u2013759. https:\/\/doi.org\/10.1109\/TCSVT.2019.2896029","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"issue":"4","key":"1774_CR9","doi-asserted-by":"publisher","first-page":"2943","DOI":"10.1609\/aaai.v35i4.16401","volume":"35","author":"W Wu","year":"2021","unstructured":"Wu W, He D, Lin T, Li F, Gan C, Ding E (2021) Mvfnet: multi-view fusion network for efficient video recognition. Proc AAAI Conf Artif Intell 35(4):2943\u20132951. https:\/\/doi.org\/10.1609\/aaai.v35i4.16401","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"1774_CR10","doi-asserted-by":"publisher","unstructured":"Hao Y, Zhang H, Ngo C-W, He X (2022) Group contextualization for video recognition. In: 2022 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 918\u2013928. https:\/\/doi.org\/10.1109\/CVPR52688.2022.00100","DOI":"10.1109\/CVPR52688.2022.00100"},{"issue":"5","key":"1774_CR11","doi-asserted-by":"publisher","first-page":"3073","DOI":"10.1109\/TCSVT.2021.3100842","volume":"32","author":"H Luo","year":"2022","unstructured":"Luo H, Lin G, Yao Y, Tang Z, Wu Q, Hua X (2022) Dense semantics-assisted networks for video action recognition. IEEE Trans Circuits Syst Video Technol 32(5):3073\u20133084. https:\/\/doi.org\/10.1109\/TCSVT.2021.3100842","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"issue":"5","key":"1774_CR12","doi-asserted-by":"publisher","first-page":"3050","DOI":"10.1109\/TCSVT.2021.3098839","volume":"32","author":"H Wang","year":"2022","unstructured":"Wang H, Yu B, Li J, Zhang L, Chen D (2022) Multi-stream interaction networks for human action recognition. IEEE Trans Circuits Syst Video Technol 32(5):3050\u20133060. https:\/\/doi.org\/10.1109\/TCSVT.2021.3098839","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"1774_CR13","doi-asserted-by":"publisher","unstructured":"Feichtenhofer C, Fan H, Malik J, He K (2019) Slowfast networks for video recognition. In: 2019 IEEE\/CVF international conference on computer vision (ICCV), pp 6201\u20136210. https:\/\/doi.org\/10.1109\/ICCV.2019.00630","DOI":"10.1109\/ICCV.2019.00630"},{"key":"1774_CR14","doi-asserted-by":"publisher","unstructured":"Feichtenhofer C (2020) X3d: Expanding architectures for efficient video recognition. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 200\u2013210. https:\/\/doi.org\/10.1109\/CVPR42600.2020.00028","DOI":"10.1109\/CVPR42600.2020.00028"},{"key":"1774_CR15","doi-asserted-by":"publisher","unstructured":"Li M, Xu X, Fan H, Zhou P, Liu J, Liu J-W, Li J, Keppo J, Shou MZ, Yan S (2023) Stprivacy: Spatio-temporal privacy-preserving action recognition. In: 2023 IEEE\/CVF international conference on computer vision (ICCV), pp 5083\u20135092. https:\/\/doi.org\/10.1109\/ICCV51070.2023.00471","DOI":"10.1109\/ICCV51070.2023.00471"},{"key":"1774_CR16","doi-asserted-by":"publisher","unstructured":"Wasim ST, Khattak MU, Naseer M, Khan S, Shah M, Khan FS (2023) Video-focalnets: Spatio-temporal focal modulation for video action recognition. In: 2023 IEEE\/cvf international conference on computer vision (ICCV), pp 13732\u201313743. https:\/\/doi.org\/10.1109\/ICCV51070.2023.01267","DOI":"10.1109\/ICCV51070.2023.01267"},{"key":"1774_CR17","doi-asserted-by":"publisher","unstructured":"Lin J, Gan C, Han S (2019) Tsm: Temporal shift module for efficient video understanding. In: 2019 IEEE\/CVF international conference on computer vision (ICCV), pp 7082\u20137092. https:\/\/doi.org\/10.1109\/ICCV.2019.00718","DOI":"10.1109\/ICCV.2019.00718"},{"key":"1774_CR18","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2023.120683","volume":"232","author":"L Hu","year":"2023","unstructured":"Hu L, Liu S, Feng W (2023) Skeleton-based action recognition with local dynamic spatial-temporal aggregation. Expert Syst Appl 232:120683. https:\/\/doi.org\/10.1016\/j.eswa.2023.120683","journal-title":"Expert Syst Appl"},{"key":"1774_CR19","doi-asserted-by":"publisher","unstructured":"Wang L, Tong Z, Ji B, Wu G (2021) Tdn: Temporal difference networks for efficient action recognition. In: 2021 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 1895\u20131904. https:\/\/doi.org\/10.1109\/CVPR46437.2021.00193","DOI":"10.1109\/CVPR46437.2021.00193"},{"key":"1774_CR20","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2024.111852","volume":"296","author":"X Gao","year":"2024","unstructured":"Gao X, Chang Z, Ran X, Lu Y (2024) Canet: Comprehensive attention network for video-based action recognition. Knowl-Based Syst 296:111852. https:\/\/doi.org\/10.1016\/j.knosys.2024.111852","journal-title":"Knowl-Based Syst"},{"issue":"8","key":"1774_CR21","doi-asserted-by":"publisher","first-page":"3912","DOI":"10.1109\/TCSVT.2023.3235522","volume":"33","author":"Y Chen","year":"2023","unstructured":"Chen Y, Ge H, Liu Y, Cai X, Sun L (2023) Agpn: Action granularity pyramid network for video action recognition. IEEE Trans Circuits Syst Video Technol 33(8):3912\u20133923. https:\/\/doi.org\/10.1109\/TCSVT.2023.3235522","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"issue":"12","key":"1774_CR22","doi-asserted-by":"publisher","first-page":"18731","DOI":"10.1109\/TNNLS.2023.3321141","volume":"35","author":"Q Wang","year":"2024","unstructured":"Wang Q, Hu Q, Gao Z, Li P, Hu Q (2024) Ams-net: Modeling adaptive multi-granularity spatio-temporal cues for video action recognition. IEEE Trans Neural Netw Learn Syst 35(12):18731\u201318745. https:\/\/doi.org\/10.1109\/TNNLS.2023.3321141","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"issue":"10","key":"1774_CR23","doi-asserted-by":"publisher","first-page":"1943","DOI":"10.1177\/01423312231225782","volume":"46","author":"Y Tao","year":"2024","unstructured":"Tao Y, Tao H, Zhuang Z, Stojanovic V, Paszke W (2024) Quantized iterative learning control of communication-constrained systems with encoding and decoding mechanism. Trans Inst Meas Control 46(10):1943\u20131954. https:\/\/doi.org\/10.1177\/01423312231225782","journal-title":"Trans Inst Meas Control"},{"key":"1774_CR24","doi-asserted-by":"publisher","DOI":"10.1016\/j.cnsns.2024.107945","volume":"132","author":"X Song","year":"2024","unstructured":"Song X, Peng Z, Song S, Stojanovic V (2024) Anti-disturbance state estimation for pdt-switched rdnns utilizing time-sampling and space-splitting measurements. Commun Nonlinear Sci Numer Simul 132:107945. https:\/\/doi.org\/10.1016\/j.cnsns.2024.107945","journal-title":"Commun Nonlinear Sci Numer Simul"},{"issue":"6","key":"1774_CR25","doi-asserted-by":"publisher","first-page":"7451","DOI":"10.1007\/s40747-023-01135-y","volume":"9","author":"Z Peng","year":"2023","unstructured":"Peng Z, Song X, Song S, Stojanovic V (2023) Hysteresis quantified control for switched reaction-diffusion systems and its application. Compl Intell Syst 9(6):7451\u20137460. https:\/\/doi.org\/10.1007\/s40747-023-01135-y","journal-title":"Compl Intell Syst"},{"key":"1774_CR26","doi-asserted-by":"publisher","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-excitation networks. In: 2018 IEEE\/CVF conference on computer vision and pattern recognition, pp 7132\u20137141. https:\/\/doi.org\/10.1109\/CVPR.2018.00745","DOI":"10.1109\/CVPR.2018.00745"},{"key":"1774_CR27","doi-asserted-by":"publisher","unstructured":"Wang Q, Wu B, Zhu P, Li P, Zuo W, Hu Q (2020) Eca-net: Efficient channel attention for deep convolutional neural networks. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 11531\u201311539. https:\/\/doi.org\/10.1109\/CVPR42600.2020.01155","DOI":"10.1109\/CVPR42600.2020.01155"},{"key":"1774_CR28","doi-asserted-by":"publisher","unstructured":"Qin Z, Zhang P, Wu F, Li X (2021) Fcanet: Frequency channel attention networks. In: 2021 IEEE\/CVF international conference on computer vision (ICCV), pp 763\u2013772. https:\/\/doi.org\/10.1109\/ICCV48922.2021.00082","DOI":"10.1109\/ICCV48922.2021.00082"},{"issue":"9","key":"1774_CR29","doi-asserted-by":"publisher","first-page":"5174","DOI":"10.1109\/TCSVT.2023.3250646","volume":"33","author":"Z Li","year":"2023","unstructured":"Li Z, Li J, Ma Y, Wang R, Shi Z, Ding Y, Liu X (2023) Spatio-temporal adaptive network with bidirectional temporal difference for action recognition. IEEE Trans Circuits Syst Video Technol 33(9):5174\u20135185. https:\/\/doi.org\/10.1109\/TCSVT.2023.3250646","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"1774_CR30","doi-asserted-by":"publisher","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A, Weissenborn D, Zhai X, Unterthiner T, Dehghani M, Minderer M, Heigold G, Gelly S, Uszkoreit J, Houlsby N (2021) An image is worth 16x16 words: Transformers for image recognition at scale. In: International conference on learning representations. https:\/\/doi.org\/10.48550\/arXiv.2010.11929","DOI":"10.48550\/arXiv.2010.11929"},{"key":"1774_CR31","doi-asserted-by":"publisher","unstructured":"Liu Z, Lin Y, Cao Y, Hu H, Wei Y, Zhang Z, Lin S, Guo B (2021) Swin transformer: Hierarchical vision transformer using shifted windows. In: 2021 IEEE\/CVF international conference on computer vision (ICCV), pp 9992\u201310002. https:\/\/doi.org\/10.1109\/ICCV48922.2021.00986","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"1774_CR32","doi-asserted-by":"publisher","first-page":"1091361","DOI":"10.3389\/fnbot.2022.1091361","volume":"16","author":"H Yang","year":"2022","unstructured":"Yang H, Ren Z, Yuan H, Wei W, Zhang Q, Zhang Z (2022) Multi-scale and attention enhanced graph convolution network for skeleton-based violence action recognition. Front Neurorobot 16:1091361. https:\/\/doi.org\/10.3389\/fnbot.2022.1091361","journal-title":"Front Neurorobot"},{"key":"1774_CR33","doi-asserted-by":"publisher","unstructured":"Shu Y, Li W, Li D, Gao K, Jie B (2024) Multi-scale dilated attention graph convolutional network for skeleton-based action recognition. In: Pattern recognition and computer vision, pp 16\u201328. Springer, Singapore. https:\/\/doi.org\/10.1007\/978-981-99-8429-9_2","DOI":"10.1007\/978-981-99-8429-9_2"},{"key":"1774_CR34","doi-asserted-by":"publisher","unstructured":"Tu Z, Talebi H, Zhang H, Yang F, Milanfar P, Bovik A, Li Y (2022) Maxvit: Multi-axis vision transformer. In: Computer vision \u2013 ECCV 2022, pp 459\u2013479. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-031-20053-3_27","DOI":"10.1007\/978-3-031-20053-3_27"},{"key":"1774_CR35","doi-asserted-by":"publisher","unstructured":"Lee Y, Kim J, Willette J, Hwang SJ (2022) Mpvit: Multi-path vision transformer for dense prediction. In: 2022 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 7277\u20137286. https:\/\/doi.org\/10.1109\/CVPR52688.2022.00714","DOI":"10.1109\/CVPR52688.2022.00714"},{"key":"1774_CR36","doi-asserted-by":"publisher","unstructured":"Plizzari C, Cannici M, Matteucci M (2021) Spatial temporal transformer network for skeleton-based action recognition. In: Pattern recognition. ICPR international workshops and challenges, pp 694\u2013701. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-030-68796-0_50","DOI":"10.1007\/978-3-030-68796-0_50"},{"key":"1774_CR37","doi-asserted-by":"publisher","unstructured":"Shi L, Zhang Y, Cheng J, Lu H (2021) Decoupled spatial-temporal attention network for skeleton-based action-gesture recognition. In: Computer vision \u2013 ACCV 2020, pp 38\u201353. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-030-69541-5_3","DOI":"10.1007\/978-3-030-69541-5_3"},{"issue":"8","key":"1774_CR38","doi-asserted-by":"publisher","first-page":"4137","DOI":"10.1109\/TCSVT.2023.3240472","volume":"33","author":"H Liu","year":"2023","unstructured":"Liu H, Liu Y, Chen Y, Yuan C, Li B, Hu W (2023) Transkeleton: Hierarchical spatial-temporal transformer for skeleton-based action recognition. IEEE Trans Circuits Syst Video Technol 33(8):4137\u20134148. https:\/\/doi.org\/10.1109\/TCSVT.2023.3240472","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"1774_CR39","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2024.123776","volume":"249","author":"H Nie","year":"2024","unstructured":"Nie H, Lu S (2024) Fedcrmw: Federated model ownership verification with compression-resistant model watermarking. Expert Syst Appl 249:123776. https:\/\/doi.org\/10.1016\/j.eswa.2024.123776","journal-title":"Expert Syst Appl"},{"issue":"7","key":"1774_CR40","doi-asserted-by":"publisher","first-page":"3362","DOI":"10.1109\/TAI.2024.3351116","volume":"5","author":"H Nie","year":"2024","unstructured":"Nie H, Lu S, Wu J, Zhu J (2024) Deep model intellectual property protection with compression-resistant model watermarking. IEEE Trans Artif Intell 5(7):3362\u20133373. https:\/\/doi.org\/10.1109\/TAI.2024.3351116","journal-title":"IEEE Trans Artif Intell"},{"key":"1774_CR41","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2024.111675","volume":"293","author":"H Nie","year":"2024","unstructured":"Nie H, Lu S (2024) Persistverify: Federated model ownership verification with spatial attention and boundary sampling. Knowl-Based Syst 293:111675. https:\/\/doi.org\/10.1016\/j.knosys.2024.111675","journal-title":"Knowl-Based Syst"},{"issue":"21","key":"1774_CR42","doi-asserted-by":"publisher","first-page":"10455","DOI":"10.1007\/s10489-024-05746-x","volume":"54","author":"H Nie","year":"2024","unstructured":"Nie H, Lu S (2024) Securing ip in edge ai: neural network watermarking for multimodal models. Appl Intell 54(21):10455\u201310472. https:\/\/doi.org\/10.1007\/s10489-024-05746-x","journal-title":"Appl Intell"},{"key":"1774_CR43","doi-asserted-by":"publisher","unstructured":"Nie H, Lu S, Wang M, Xiao J, Lu Z, Yi Z (2024) Verichroma: Ownership verification for federated models via rgb filters. In: Euro-Par 2024: Parallel processing, pp 332\u2013345. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-031-69766-1_23","DOI":"10.1007\/978-3-031-69766-1_23"},{"key":"1774_CR44","doi-asserted-by":"publisher","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: 2016 IEEE conference on computer vision and pattern recognition (CVPR), pp 770\u2013778. https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"1774_CR45","doi-asserted-by":"publisher","unstructured":"Zhang Z, Liniger A, Sakaridis C, Yu F, Gool LV (2023) Real-time motion prediction via heterogeneous polyline transformer with relative pose encoding. In: Thirty-seventh conference on neural information processing systems. https:\/\/doi.org\/10.48550\/arXiv.2310.12970","DOI":"10.48550\/arXiv.2310.12970"},{"key":"1774_CR46","doi-asserted-by":"publisher","unstructured":"Selvaraju RR, Cogswell M, Das A, Vedantam R, Parikh D, Batra D (2017) Grad-cam: Visual explanations from deep networks via gradient-based localization. In: 2017 IEEE international conference on computer vision (ICCV), pp. 618\u2013626. https:\/\/doi.org\/10.1109\/ICCV.2017.74","DOI":"10.1109\/ICCV.2017.74"},{"key":"1774_CR47","doi-asserted-by":"publisher","unstructured":"Goyal R, Kahou SE, Michalski V, Materzynska J, Westphal S, Kim H, Haenel V, Fruend I, Yianilos P, Mueller-Freitag M, Hoppe F, Thurau C, Bax I, Memisevic R (2017) The \u201csomething something\u201d video database for learning and evaluating visual common sense. In: 2017 IEEE international conference on computer vision (ICCV), pp 5843\u20135851. https:\/\/doi.org\/10.1109\/ICCV.2017.622","DOI":"10.1109\/ICCV.2017.622"},{"key":"1774_CR48","doi-asserted-by":"publisher","unstructured":"Mahdisoltani F, Berger G, Gharbieh W, Fleet D, Memisevic R (2019) On the effectiveness of task granularity for transfer learning. https:\/\/doi.org\/10.48550\/arXiv.1804.09235","DOI":"10.48550\/arXiv.1804.09235"},{"key":"1774_CR49","doi-asserted-by":"publisher","unstructured":"Materzynska J, Berger G, Bax I, Memisevic R (2019) The jester dataset: A large-scale video dataset of human gestures. In: 2019 IEEE\/CVF international conference on computer vision workshop (ICCVW), pp 2874\u20132882. https:\/\/doi.org\/10.1109\/ICCVW.2019.00349","DOI":"10.1109\/ICCVW.2019.00349"},{"key":"1774_CR50","doi-asserted-by":"publisher","unstructured":"Li Y, Li Y, Vasconcelos N (2018) Resound: towards action recognition without representation bias. In: Computer Vision \u2013 ECCV 2018, pp 520\u2013535. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-030-01231-1_32","DOI":"10.1007\/978-3-030-01231-1_32"},{"key":"1774_CR51","doi-asserted-by":"publisher","unstructured":"Soomro K, Zamir A, Shah M (2012) Ucf101: A dataset of 101 human actions classes from videos in the wild. ArXiv: 1212.0402, https:\/\/doi.org\/10.48550\/arXiv.1212.0402","DOI":"10.48550\/arXiv.1212.0402"},{"key":"1774_CR52","doi-asserted-by":"publisher","unstructured":"Carreira J, Zisserman A (2017) Quo vadis, action recognition? a new model and the kinetics dataset. In: 2017 IEEE conference on computer vision and pattern recognition (CVPR), pp 4724\u20134733. https:\/\/doi.org\/10.1109\/CVPR.2017.502","DOI":"10.1109\/CVPR.2017.502"},{"key":"1774_CR53","doi-asserted-by":"publisher","unstructured":"Li X, Wang Y, Zhou Z, Qiao Y (2020) Smallbignet: Integrating core and contextual views for video classification. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 1089\u20131098. https:\/\/doi.org\/10.1109\/CVPR42600.2020.00117","DOI":"10.1109\/CVPR42600.2020.00117"},{"key":"1774_CR54","doi-asserted-by":"publisher","unstructured":"Liu Z, Wang L, Wu W, Qian C, Lu T (2021) Tam: Temporal adaptive module for video recognition. In: 2021 IEEE\/CVF international conference on computer vision (ICCV), pp 13688\u201313698. https:\/\/doi.org\/10.1109\/ICCV48922.2021.01345","DOI":"10.1109\/ICCV48922.2021.01345"},{"issue":"07","key":"1774_CR55","doi-asserted-by":"publisher","first-page":"11669","DOI":"10.1609\/aaai.v34i07.6836","volume":"34","author":"Z Liu","year":"2020","unstructured":"Liu Z, Luo D, Wang Y, Wang L, Tai Y, Wang C, Li J, Huang F, Lu T (2020) Teinet: Towards an efficient architecture for video recognition. Proc AAAI Conf Artif Intell 34(07):11669\u201311676. https:\/\/doi.org\/10.1609\/aaai.v34i07.6836","journal-title":"Proc AAAI Conf Artif Intell"},{"key":"1774_CR56","doi-asserted-by":"publisher","unstructured":"Jiang B, Wang M, Gan W, Wu W, Yan J (2019) Stm: Spatiotemporal and motion encoding for action recognition. In: 2019 IEEE\/CVF international conference on computer vision (ICCV), pp 2000\u20132009. https:\/\/doi.org\/10.1109\/ICCV.2019.00209","DOI":"10.1109\/ICCV.2019.00209"},{"key":"1774_CR57","doi-asserted-by":"publisher","unstructured":"Li Y, Ji B, Shi X, Zhang J, Kang B, Wang L (2020) Tea: Temporal excitation and aggregation for action recognition. In: 2020 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 906\u2013915. https:\/\/doi.org\/10.1109\/CVPR42600.2020.00099","DOI":"10.1109\/CVPR42600.2020.00099"},{"key":"1774_CR58","doi-asserted-by":"publisher","unstructured":"Weng J, Luo D, Wang Y, Tai Y, Wang C, Li J, Huang F, Jiang X, Yuan J (2020) Temporal distinct representation learning for action recognition. In: Computer vision \u2013 ECCV 2020, pp 363\u2013378. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-030-58571-6_22","DOI":"10.1007\/978-3-030-58571-6_22"},{"key":"1774_CR59","doi-asserted-by":"publisher","unstructured":"Deng J, Dong W, Socher R, Li L-J, Li K, Fei-Fei L (2009) Imagenet: A large-scale hierarchical image database. In: 2009 IEEE conference on computer vision and pattern recognition, pp 248\u2013255. https:\/\/doi.org\/10.1109\/CVPR.2009.5206848","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"1774_CR60","doi-asserted-by":"publisher","unstructured":"Wang L, Xiong Y, Wang Z, Qiao Y, Lin D, Tang X, Van\u00a0Gool L (2016) Temporal segment networks: Towards good practices for deep action recognition. In: Computer Vision \u2013 ECCV 2016, pp 20\u201336. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-319-46484-8_2","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"1774_CR61","doi-asserted-by":"publisher","unstructured":"Fan L, Buch S, Wang G, Cao R, Zhu Y, Niebles JC, Fei-Fei L (2020) Rubiksnet: Learnable 3d-shift for efficient video action recognition. In: Computer vision \u2013 ECCV 2020, pp 505\u2013521. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-030-58529-7_30","DOI":"10.1007\/978-3-030-58529-7_30"},{"key":"1774_CR62","doi-asserted-by":"publisher","unstructured":"Lee S, Hong S (2023) D-tsm: Discriminative temporal shift module for action recognition. In: 2023 20th international conference on ubiquitous robots (UR), pp 133\u2013136. https:\/\/doi.org\/10.1109\/UR57808.2023.10202338","DOI":"10.1109\/UR57808.2023.10202338"},{"key":"1774_CR63","doi-asserted-by":"publisher","unstructured":"Bertasius G, Wang H, Torresani L (2021) Is space-time attention all you need for video understanding? In: Proceedings of the 38th international conference on machine learning. Proceedings of machine learning research, 139, 813\u2013824. https:\/\/doi.org\/10.48550\/arXiv.2102.05095","DOI":"10.48550\/arXiv.2102.05095"},{"key":"1774_CR64","doi-asserted-by":"publisher","unstructured":"Zhi Y, Tong Z, Wang L, Wu G (2021) Mgsampler: An explainable sampling strategy for video action recognition. In: 2021 IEEE\/CVF international conference on computer vision (ICCV), pp 1493\u20131502. https:\/\/doi.org\/10.1109\/ICCV48922.2021.00154","DOI":"10.1109\/ICCV48922.2021.00154"},{"key":"1774_CR65","doi-asserted-by":"publisher","unstructured":"Xian R, Wang X, Kothandaraman D, Manocha D (2024) Pmi sampler: Patch similarity guided frame selection for aerial action recognition. In: 2024 IEEE\/CVF winter conference on applications of computer vision (WACV), pp 6967\u20136976. https:\/\/doi.org\/10.1109\/WACV57701.2024.00683","DOI":"10.1109\/WACV57701.2024.00683"},{"key":"1774_CR66","doi-asserted-by":"publisher","first-page":"26839","DOI":"10.48550\/arXiv.2206.06346","volume":"35","author":"E Ben Avraham","year":"2022","unstructured":"Ben Avraham E, Herzig R, Mangalam K, Bar A, Rohrbach A, Karlinsky L, Darrell T, Globerson A (2022) Bringing image scene structure to video via frame-clip consistency of object tokens. Adv Neural Inf Process Syst 35:26839\u201326855. https:\/\/doi.org\/10.48550\/arXiv.2206.06346","journal-title":"Adv Neural Inf Process Syst"},{"key":"1774_CR67","doi-asserted-by":"publisher","unstructured":"Turkoglu MO, D\u2019Aronco S, Wegner JD, Schindler K (2022) Gating revisited: deep multi-layer rnns that can be trained. IEEE Trans Pattern Anal Mach Intell 44(8):4081\u20134092. https:\/\/doi.org\/10.1109\/TPAMI.2021.3064878","DOI":"10.1109\/TPAMI.2021.3064878"},{"key":"1774_CR68","doi-asserted-by":"publisher","unstructured":"Zhou B, Andonian A, Oliva A, Torralba A (2018) Temporal relational reasoning in videos. In: Computer Vision \u2013 ECCV 2018, pp 831\u2013846. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-030-01246-5_49","DOI":"10.1007\/978-3-030-01246-5_49"},{"key":"1774_CR69","doi-asserted-by":"publisher","unstructured":"Izutov E (2021) LIGAR: Lightweight general-purpose action recognition. https:\/\/doi.org\/10.48550\/arXiv.2108.13153","DOI":"10.48550\/arXiv.2108.13153"},{"key":"1774_CR70","doi-asserted-by":"publisher","unstructured":"Arnab A, Dehghani M, Heigold G, Sun C, Lu\u010di\u0107 M, Schmid C (2021) Vivit: A video vision transformer. In: 2021 IEEE\/CVF international conference on computer vision (ICCV), pp 6816\u20136826. https:\/\/doi.org\/10.1109\/ICCV48922.2021.00676","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"1774_CR71","doi-asserted-by":"publisher","unstructured":"Sudhakaran S, Lanz O (2018) Top-down attention recurrent vlad encoding for action recognition in videos. In: AI*IA 2018 \u2013 advances in artificial intelligence, pp 375\u2013386. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-030-03840-3_28","DOI":"10.1007\/978-3-030-03840-3_28"},{"key":"1774_CR72","doi-asserted-by":"publisher","unstructured":"Srivastava N, Mansimov E, Salakhutdinov R (2015) Unsupervised learning of video representations using lstms. In: Proceedings of the 32nd international conference on international conference on machine learning - 37, 843\u2013852. https:\/\/doi.org\/10.48550\/arXiv.1502.04681","DOI":"10.48550\/arXiv.1502.04681"},{"key":"1774_CR73","doi-asserted-by":"publisher","unstructured":"Donahue J, Hendricks LA, Guadarrama S, Rohrbach M, Venugopalan S, Darrell T, Saenko K (2015) Long-term recurrent convolutional networks for visual recognition and description. In: 2015 IEEE conference on computer vision and pattern recognition (CVPR), pp 2625\u20132634. https:\/\/doi.org\/10.1109\/CVPR.2015.7298878","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"1774_CR74","doi-asserted-by":"publisher","unstructured":"Ma N, Zhang X, Zheng H-T, Sun J (2018) Shufflenet v2: Practical guidelines for efficient cnn architecture design. In: Computer Vision \u2013 ECCV 2018, pp 122\u2013138. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-030-01264-9_8","DOI":"10.1007\/978-3-030-01264-9_8"},{"key":"1774_CR75","doi-asserted-by":"publisher","unstructured":"Sandler M, Howard A, Zhu M, Zhmoginov A, Chen L-C (2018) Mobilenetv2: Inverted residuals and linear bottlenecks. In: 2018 IEEE\/CVF conference on computer vision and pattern recognition, pp 4510\u20134520. https:\/\/doi.org\/10.1109\/CVPR.2018.00474","DOI":"10.1109\/CVPR.2018.00474"}],"container-title":["Complex &amp; Intelligent Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01774-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s40747-024-01774-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01774-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,4]],"date-time":"2025-03-04T12:07:32Z","timestamp":1741090052000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s40747-024-01774-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,2,6]]},"references-count":75,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2025,3]]}},"alternative-id":["1774"],"URL":"https:\/\/doi.org\/10.1007\/s40747-024-01774-9","relation":{},"ISSN":["2199-4536","2198-6053"],"issn-type":[{"value":"2199-4536","type":"print"},{"value":"2198-6053","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,2,6]]},"assertion":[{"value":"8 July 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 December 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 February 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Statements and Declarations"}}],"article-number":"167"}}