{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,5]],"date-time":"2026-02-05T00:12:53Z","timestamp":1770250373018,"version":"3.49.0"},"publisher-location":"Singapore","reference-count":27,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819601219","type":"print"},{"value":"9789819601226","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,12]],"date-time":"2024-11-12T00:00:00Z","timestamp":1731369600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,12]],"date-time":"2024-11-12T00:00:00Z","timestamp":1731369600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-96-0122-6_18","type":"book-chapter","created":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T18:24:30Z","timestamp":1731781470000},"page":"195-207","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Action Recognition Based on\u00a0Multi-perspective Feature Excitation"],"prefix":"10.1007","author":[{"given":"Xiaoyang","family":"Li","sequence":"first","affiliation":[]},{"given":"Wenzhu","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Zhenchao","family":"Cui","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,12]]},"reference":[{"key":"18_CR1","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo vadis, action recognition? a new model and the kinetics dataset. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6299\u20136308 (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"18_CR2","unstructured":"Chen, Y., Kalantidis, Y., Li, J., Yan, S., Feng, J.: A$$^2$$-nets: double attention networks. Adv. Neural Inf. Process. Syst. 31 (2018)"},{"key":"18_CR3","doi-asserted-by":"publisher","unstructured":"Fan, L., et al.: RubiksNet: learnable 3D-shift for efficient video action recognition. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12364, pp. 505\u2013521. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58529-7_30","DOI":"10.1007\/978-3-030-58529-7_30"},{"key":"18_CR4","doi-asserted-by":"crossref","unstructured":"Goyal, R., et al.: The \u201csomething something\u201d video database for learning and evaluating visual common sense. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 5842\u20135850 (2017)","DOI":"10.1109\/ICCV.2017.622"},{"key":"18_CR5","doi-asserted-by":"crossref","unstructured":"Hao, Y., Zhang, H., Ngo, C.W., He, X.: Group contextualization for video recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 928\u2013938 (2022)","DOI":"10.1109\/CVPR52688.2022.00100"},{"key":"18_CR6","doi-asserted-by":"crossref","unstructured":"He, D., et al.: Stnet: local and global spatial-temporal modeling for action recognition. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a033, pp. 8401\u20138408 (2019)","DOI":"10.1609\/aaai.v33i01.33018401"},{"key":"18_CR7","doi-asserted-by":"crossref","unstructured":"Jiang, B., Wang, M., Gan, W., Wu, W., Yan, J.: STM: spatiotemporal and motion encoding for action recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2000\u20132009 (2019)","DOI":"10.1109\/ICCV.2019.00209"},{"issue":"9","key":"18_CR8","doi-asserted-by":"publisher","first-page":"3059","DOI":"10.1007\/s13042-023-01820-x","volume":"14","author":"Z Jiang","year":"2023","unstructured":"Jiang, Z., Zhang, Y., Hu, S.: ESTI: an action recognition network with enhanced spatio-temporal information. Int. J. Mach. Learn. Cybern. 14(9), 3059\u20133070 (2023)","journal-title":"Int. J. Mach. Learn. Cybern."},{"key":"18_CR9","doi-asserted-by":"crossref","unstructured":"Li, X., Wang, Y., Zhou, Z., Qiao, Y.: Smallbignet: integrating core and contextual views for video classification. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1092\u20131101 (2020)","DOI":"10.1109\/CVPR42600.2020.00117"},{"issue":"6","key":"18_CR10","doi-asserted-by":"publisher","first-page":"1059","DOI":"10.1049\/iet-ipr.2019.0963","volume":"14","author":"X Li","year":"2020","unstructured":"Li, X., Xie, M., Zhang, Y., Ding, G., Tong, W.: Dual attention convolutional network for action recognition. IET Image Proc. 14(6), 1059\u20131065 (2020)","journal-title":"IET Image Proc."},{"key":"18_CR11","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"275","DOI":"10.1007\/978-3-030-58539-6_17","volume-title":"Computer Vision \u2013 ECCV 2020","author":"X Li","year":"2020","unstructured":"Li, X., Shuai, B., Tighe, J.: Directional temporal modeling for action recognition. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12351, pp. 275\u2013291. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58539-6_17"},{"key":"18_CR12","doi-asserted-by":"crossref","unstructured":"Li, Y., Ji, B., Shi, X., Zhang, J., Kang, B., Wang, L.: Tea: temporal excitation and aggregation for action recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 909\u2013918 (2020)","DOI":"10.1109\/CVPR42600.2020.00099"},{"key":"18_CR13","doi-asserted-by":"crossref","unstructured":"Lin, J., Gan, C., Han, S.: TSM: temporal shift module for efficient video understanding. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 7083\u20137093 (2019)","DOI":"10.1109\/ICCV.2019.00718"},{"key":"18_CR14","doi-asserted-by":"publisher","first-page":"4104","DOI":"10.1109\/TIP.2022.3180585","volume":"31","author":"Y Liu","year":"2022","unstructured":"Liu, Y., Yuan, J., Tu, Z.: Motion-driven visual tempo learning for video-based action recognition. IEEE Trans. Image Process. 31, 4104\u20134116 (2022)","journal-title":"IEEE Trans. Image Process."},{"key":"18_CR15","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Teinet: towards an efficient architecture for video recognition. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a034, pp. 11669\u201311676 (2020)","DOI":"10.1609\/aaai.v34i07.6836"},{"key":"18_CR16","doi-asserted-by":"crossref","unstructured":"Liu, Z., Wang, L., Wu, W., Qian, C., Lu, T.: Tam: temporal adaptive module for video recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 13708\u201313718 (2021)","DOI":"10.1109\/ICCV48922.2021.01345"},{"key":"18_CR17","doi-asserted-by":"crossref","unstructured":"Luo, C., Yuille, A.L.: Grouped spatial-temporal aggregation for efficient action recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5512\u20135521 (2019)","DOI":"10.1109\/ICCV.2019.00561"},{"key":"18_CR18","unstructured":"Mahdisoltani, F., Berger, G., Gharbieh, W., Fleet, D., Memisevic, R.: On the effectiveness of task granularity for transfer learning. arXiv preprint arXiv:1804.09235 (2018)"},{"key":"18_CR19","doi-asserted-by":"crossref","unstructured":"Russakovsky, O., et al.: Imagenet large scale visual recognition challenge. Int. J. Comput. Vision 115, 211\u2013252 (2015)","DOI":"10.1007\/s11263-015-0816-y"},{"key":"18_CR20","doi-asserted-by":"crossref","unstructured":"Ryu, S., Hong, S., Lee, S.: Making TSM Better: Preserving Foundational Philosophy for Efficient Action Recognition. ICT Express (2023)","DOI":"10.1016\/j.icte.2023.12.004"},{"key":"18_CR21","doi-asserted-by":"crossref","unstructured":"Selvaraju, R.R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., Batra, D.: Grad-cam: visual explanations from deep networks via gradient-based localization. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 618\u2013626 (2017)","DOI":"10.1109\/ICCV.2017.74"},{"key":"18_CR22","unstructured":"Soomro, K., Zamir, A.R., Shah, M.: Ucf101: a dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)"},{"key":"18_CR23","doi-asserted-by":"crossref","unstructured":"Wang, H., Tran, D., Torresani, L., Feiszli, M.: Video modeling with correlation networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 352\u2013361 (2020)","DOI":"10.1109\/CVPR42600.2020.00043"},{"key":"18_CR24","doi-asserted-by":"publisher","unstructured":"Wang, L., et al.: Temporal segment networks: towards good practices for deep action recognition. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9912, pp. 20\u201336. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46484-8_2","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"18_CR25","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"413","DOI":"10.1007\/978-3-030-01228-1_25","volume-title":"Computer Vision \u2013 ECCV 2018","author":"X Wang","year":"2018","unstructured":"Wang, X., Gupta, A.: Videos as space-time region graphs. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11209, pp. 413\u2013431. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01228-1_25"},{"key":"18_CR26","doi-asserted-by":"crossref","unstructured":"Wang, Z., She, Q., Smolic, A.: Action-net: multipath excitation for action recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13214\u201313223 (2021)","DOI":"10.1109\/CVPR46437.2021.01301"},{"key":"18_CR27","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"318","DOI":"10.1007\/978-3-030-01267-0_19","volume-title":"Computer Vision \u2013 ECCV 2018","author":"S Xie","year":"2018","unstructured":"Xie, S., Sun, C., Huang, J., Tu, Z., Murphy, K.: Rethinking spatiotemporal feature learning: speed-accuracy trade-offs in video classification. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11219, pp. 318\u2013335. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01267-0_19"}],"container-title":["Lecture Notes in Computer Science","PRICAI 2024: Trends in Artificial Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-96-0122-6_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T19:18:53Z","timestamp":1731784733000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-96-0122-6_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,12]]},"ISBN":["9789819601219","9789819601226"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-981-96-0122-6_18","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,12]]},"assertion":[{"value":"12 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRICAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Pacific Rim International Conference on Artificial Intelligence","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Kyoto","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Japan","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"19 November 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25 November 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"pricai2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.pricai.org\/2024\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}