{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,8]],"date-time":"2026-02-08T03:53:29Z","timestamp":1770522809651,"version":"3.49.0"},"reference-count":41,"publisher":"Springer Science and Business Media LLC","issue":"20","license":[{"start":{"date-parts":[[2023,7,5]],"date-time":"2023-07-05T00:00:00Z","timestamp":1688515200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,7,5]],"date-time":"2023-07-05T00:00:00Z","timestamp":1688515200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62172267"],"award-info":[{"award-number":["62172267"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2023,10]]},"DOI":"10.1007\/s10489-023-04756-5","type":"journal-article","created":{"date-parts":[[2023,7,4]],"date-time":"2023-07-04T23:02:19Z","timestamp":1688511739000},"page":"23039-23048","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":7,"title":["Space or time for video classification transformers"],"prefix":"10.1007","volume":"53","author":[{"given":"Xing","family":"Wu","sequence":"first","affiliation":[]},{"given":"Chenjie","family":"Tao","sequence":"additional","affiliation":[]},{"given":"Jian","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Qun","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Jianjia","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Weimin","family":"Li","sequence":"additional","affiliation":[]},{"given":"Yue","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Yike","family":"Guo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,7,5]]},"reference":[{"key":"4756_CR1","doi-asserted-by":"crossref","unstructured":"Wu, X., Tang, B., Zhao, M., Wang, J., Guo, Y.: Str transformer: A cross-domain transformer for scene text recognition. Applied Intelligence, 1\u201315 (2022)","DOI":"10.1007\/s10489-022-03728-5"},{"key":"4756_CR2","doi-asserted-by":"crossref","unstructured":"Wu, X., Zhang, Y., Li, Q., Qi, Y., Wang, J., Guo, Y.: Face aging with pixel-level alignment gan. Applied Intelligence, 1\u201314 (2022)","DOI":"10.1007\/s10489-022-03541-0"},{"issue":"5","key":"4756_CR3","doi-asserted-by":"publisher","first-page":"1366","DOI":"10.1007\/s11263-022-01594-9","volume":"130","author":"Y Kong","year":"2022","unstructured":"Kong Y, Fu Y (2022) Human action recognition and prediction: A survey. International Journal of Computer Vision 130(5):1366\u20131401","journal-title":"International Journal of Computer Vision"},{"key":"4756_CR4","doi-asserted-by":"crossref","unstructured":"Islam, M.M., Nooruddin, S., Karray, F., Muhammad, G.: Human activity recognition using tools of convolutional neural networks: A state of the art review, data sets, challenges, and future prospects. Computers in Biology and Medicine, 106060 (2022)","DOI":"10.1016\/j.compbiomed.2022.106060"},{"key":"4756_CR5","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141., Polosukhin, I.: Attention is all you need. Advances in neural information processing systems 30 (2017)"},{"key":"4756_CR6","doi-asserted-by":"crossref","unstructured":"Islam, M.M., Bertasius, G.: Long movie clip classification with state-space video models. In: Computer Vision\u2013ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part XXXV, pp. 87\u2013104 (2022). Springer","DOI":"10.1007\/978-3-031-19833-5_6"},{"key":"4756_CR7","doi-asserted-by":"crossref","unstructured":"Wang, X., Girshick, R., Gupta, A., He, K.: Non-local neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7794\u20137803 (2018)","DOI":"10.1109\/CVPR.2018.00813"},{"key":"4756_CR8","doi-asserted-by":"crossref","unstructured":"Wang, X., Xiong, X., Neumann, M., Piergiovanni, A., Ryoo, M.S., Angelova, A., Kitani, K.M., Hua, W.: Attentionnas: Spatiotemporal attention cell search for video classification. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part VIII 16, pp. 449\u2013465 (2020). Springer","DOI":"10.1007\/978-3-030-58598-3_27"},{"key":"4756_CR9","unstructured":"Kenton, J.D.M.-W.C., Toutanova, L.K.: Bert: Pre-training of deep bidirectional transformers for language understanding. In: Proceedings of NAACL-HLT, pp. 4171\u20134186 (2019)"},{"key":"4756_CR10","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown T, Mann B, Ryder N, Subbiah M, Kaplan JD, Dhariwal P, Neelakantan A, Shyam P, Sastry G, Askell A et al (2020) Language models are few-shot learners. Advances in neural information processing systems 33:1877\u20131901","journal-title":"Advances in neural information processing systems"},{"key":"4756_CR11","unstructured":"Fedus, W., Zoph, B., Shazeer, N.: Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity (2021)"},{"key":"4756_CR12","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2020.101913","volume":"68","author":"X Wu","year":"2021","unstructured":"Wu X, Chen C, Zhong M, Wang J, Shi J (2021) Covid-al: The diagnosis of covid-19 with deep active learning. Medical Image Analysis 68:101913","journal-title":"Medical Image Analysis"},{"key":"4756_CR13","unstructured":"Wu, X., Ji, S., Wang, J., Guo, Y.: Speech synthesis with face embeddings. Applied Intelligence, 1\u201314 (2022)"},{"issue":"5","key":"4756_CR14","doi-asserted-by":"publisher","first-page":"5817","DOI":"10.1007\/s10489-021-02687-7","volume":"52","author":"X Lan","year":"2022","unstructured":"Lan X, Gu X, Gu X (2022) Mmnet: Multi-modal multi-stage network for rgb-t image semantic segmentation. Applied Intelligence 52(5):5817\u20135829","journal-title":"Applied Intelligence"},{"key":"4756_CR15","doi-asserted-by":"publisher","DOI":"10.1016\/j.jvcir.2021.103344","volume":"81","author":"C Leng","year":"2021","unstructured":"Leng C, Ding Q, Wu C, Chen A (2021) Augmented two stream network for robust action recognition adaptive to various action videos. Journal of Visual Communication and Image Representation 81:103344","journal-title":"Journal of Visual Communication and Image Representation"},{"issue":"7","key":"4756_CR16","doi-asserted-by":"publisher","first-page":"1821","DOI":"10.1007\/s00371-020-01940-3","volume":"37","author":"A Abdelbaky","year":"2021","unstructured":"Abdelbaky A, Aly S (2021) Two-stream spatiotemporal feature fusion for human action recognition. The Visual Computer 37(7):1821\u20131835","journal-title":"The Visual Computer"},{"key":"4756_CR17","doi-asserted-by":"publisher","first-page":"304","DOI":"10.1016\/j.neucom.2020.06.032","volume":"410","author":"Z Zhang","year":"2020","unstructured":"Zhang Z, Lv Z, Gan C, Zhu Q (2020) Human action recognition using convolutional lstm and fully-connected lstm with different attentions. Neurocomputing 410:304\u2013316","journal-title":"Neurocomputing"},{"key":"4756_CR18","doi-asserted-by":"publisher","first-page":"276","DOI":"10.1016\/j.neucom.2022.07.040","volume":"505","author":"B Zhang","year":"2022","unstructured":"Zhang B, Wang Q, Gao Z, Zeng R, Li P (2022) Temporal grafter network: Rethinking lstm for effective video recognition. Neurocomputing 505:276\u2013288","journal-title":"Neurocomputing"},{"key":"4756_CR19","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2022.105803","volume":"147","author":"Q Liu","year":"2022","unstructured":"Liu Q, Cai M, Liu D, Ma S, Zhang Q, Liu Z, Yang J (2022) Two stream non-local cnn-lstm network for the auxiliary assessment of mental retardation. Computers in Biology and Medicine 147:105803","journal-title":"Computers in Biology and Medicine"},{"key":"4756_CR20","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2021.106995","volume":"222","author":"T \u00d6zyer","year":"2021","unstructured":"\u00d6zyer T, Ak DS, Alhajj R (2021) Human action recognition approaches with video datasets-a survey. Knowledge-Based Systems 222:106995","journal-title":"Knowledge-Based Systems"},{"key":"4756_CR21","doi-asserted-by":"publisher","first-page":"385","DOI":"10.1016\/j.ins.2022.02.006","volume":"593","author":"X Wu","year":"2022","unstructured":"Wu X, Chen C, Li P, Zhong M, Wang J, Qian Q, Ding P, Yao J, Guo Y (2022) Ftap: Feature transferring autonomous machine learning pipeline. Information Sciences 593:385\u2013397","journal-title":"Information Sciences"},{"issue":"2","key":"4756_CR22","doi-asserted-by":"publisher","first-page":"931","DOI":"10.3390\/app12020931","volume":"12","author":"R Vrskova","year":"2022","unstructured":"Vrskova R, Hudec R, Kamencay P, Sykora P (2022) Human activity classification using the 3dcnn architecture. Applied Sciences 12(2):931","journal-title":"Applied Sciences"},{"key":"4756_CR23","doi-asserted-by":"publisher","first-page":"1261","DOI":"10.1007\/s00371-019-01733-3","volume":"36","author":"J Cai","year":"2020","unstructured":"Cai J, Hu J (2020) 3d rans: 3d residual attention networks for action recognition. The Visual Computer 36:1261\u20131270","journal-title":"The Visual Computer"},{"key":"4756_CR24","doi-asserted-by":"publisher","first-page":"362","DOI":"10.1016\/j.neucom.2021.03.120","volume":"450","author":"Y Ming","year":"2021","unstructured":"Ming Y, Feng F, Li C, Xue J-H (2021) 3d-tdc: A 3d temporal dilation convolution framework for video action recognition. Neurocomputing 450:362\u2013371","journal-title":"Neurocomputing"},{"key":"4756_CR25","doi-asserted-by":"crossref","unstructured":"Yang, C., Xu, Y., Shi, J., Dai, B., Zhou, B.: Temporal pyramid network for action recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 591\u2013600 (2020)","DOI":"10.1109\/CVPR42600.2020.00067"},{"key":"4756_CR26","doi-asserted-by":"crossref","unstructured":"Hara, K., Kataoka, H., Satoh, Y.: Can spatiotemporal 3d cnns retrace the history of 2d cnns and imagenet? In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6546\u20136555 (2018)","DOI":"10.1109\/CVPR.2018.00685"},{"key":"4756_CR27","doi-asserted-by":"crossref","unstructured":"Neimark, D., Bar, O., Zohar, M., Asselmann, D.: Video transformer network. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3163\u20133172 (2021)","DOI":"10.1109\/ICCVW54120.2021.00355"},{"key":"4756_CR28","unstructured":"Bertasius, G., Wang, H., Torresani, L.: Is space-time attention all you need for video understanding? In: ICML, vol. 2, p. 4 (2021)"},{"key":"4756_CR29","doi-asserted-by":"crossref","unstructured":"Arnab, A., Dehghani, M., Heigold, G., Sun, C., Lu\u010di\u0107, M., Schmid, C.: Vivit: A video vision transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6836\u20136846 (2021)","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"4756_CR30","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Li, X., Liu, C., Shuai, B., Zhu, Y., Brattoli, B., Chen, H., Marsic, I., Tighe, J.: Vidtr: Video transformer without convolutions. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 13577\u201313587 (2021)","DOI":"10.1109\/ICCV48922.2021.01332"},{"key":"4756_CR31","doi-asserted-by":"crossref","unstructured":"Fan, H., Xiong, B., Mangalam, K., Li, Y., Yan, Z., Malik, J., Feichtenhofer, C.: Multiscale vision transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6824\u20136835 (2021)","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"4756_CR32","doi-asserted-by":"crossref","unstructured":"Li, L., Zhuang, L.: Mevit: Motion enhanced video transformer for video classification. In: International Conference on Multimedia Modeling, pp. 419\u2013430 (2022). Springer","DOI":"10.1007\/978-3-030-98355-0_35"},{"key":"4756_CR33","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2021.108487","volume":"124","author":"V Mazzia","year":"2022","unstructured":"Mazzia V, Angarano S, Salvetti F, Angelini F, Chiaberge M (2022) Action transformer: A self-attention model for short-time pose-based human action recognition. Pattern Recognition 124:108487","journal-title":"Pattern Recognition"},{"key":"4756_CR34","doi-asserted-by":"crossref","unstructured":"Girdhar, R., Grauman, K.: Anticipative video transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 13505\u201313515 (2021)","DOI":"10.1109\/ICCV48922.2021.01325"},{"issue":"1","key":"4756_CR35","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1038\/s41597-020-00622-y","volume":"7","author":"H Borgli","year":"2020","unstructured":"Borgli H, Thambawita V, Smedsrud PH, Hicks S, Jha D, Eskeland SL, Randel KR, Pogorelov K, Lux M, Nguyen DTD et al (2020) Hyperkvasir, a comprehensive multi-class image and video dataset for gastrointestinal endoscopy. Scientific data 7(1):1\u201314","journal-title":"Scientific data"},{"key":"4756_CR36","unstructured":"Fan, Q., Chen, C.-F.R., Kuehne, H., Pistoia, M., Cox, D.: More is less: Learning efficient video representations by big-little network and depthwise temporal aggregation. Advances in Neural Information Processing Systems 32 (2019)"},{"key":"4756_CR37","doi-asserted-by":"crossref","unstructured":"Jiang, B., Wang, M., Gan, W., Wu, W., Yan, J.: Stm: Spatiotemporal and motion encoding for action recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2000\u20132009 (2019)","DOI":"10.1109\/ICCV.2019.00209"},{"key":"4756_CR38","doi-asserted-by":"crossref","unstructured":"Lin, J., Gan, C., Han, S.: Tsm: Temporal shift module for efficient video understanding. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 7083\u20137093 (2019)","DOI":"10.1109\/ICCV.2019.00718"},{"key":"4756_CR39","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Fan, H., Malik, J., He, K.: Slowfast networks for video recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6202\u20136211 (2019)","DOI":"10.1109\/ICCV.2019.00630"},{"key":"4756_CR40","doi-asserted-by":"crossref","unstructured":"Chen, Y., Fan, H., Xu, B., Yan, Z., Kalantidis, Y., Rohrbach, M., Yan, S., Feng, J.: Drop an octave: Reducing spatial redundancy in convolutional neural networks with octave convolution. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3435\u20133444 (2019)","DOI":"10.1109\/ICCV.2019.00353"},{"key":"4756_CR41","doi-asserted-by":"crossref","unstructured":"Li, Y., Ji, B., Shi, X., Zhang, J., Kang, B., Wang, L.: Tea: Temporal excitation and aggregation for action recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 909\u2013918 (2020)","DOI":"10.1109\/CVPR42600.2020.00099"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-023-04756-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-023-04756-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-023-04756-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,21]],"date-time":"2023-10-21T16:05:37Z","timestamp":1697904337000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-023-04756-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,7,5]]},"references-count":41,"journal-issue":{"issue":"20","published-print":{"date-parts":[[2023,10]]}},"alternative-id":["4756"],"URL":"https:\/\/doi.org\/10.1007\/s10489-023-04756-5","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,7,5]]},"assertion":[{"value":"2 June 2023","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 July 2023","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that there is no conflict of interests with anybody or any institution regarding the publication of this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of Interests"}}]}}