{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:13:48Z","timestamp":1777655628455,"version":"3.51.4"},"publisher-location":"Cham","reference-count":49,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726835","type":"print"},{"value":"9783031726842","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72684-2_26","type":"book-chapter","created":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:08:59Z","timestamp":1730574539000},"page":"457-474","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Optimizing Factorized Encoder Models: Time and\u00a0Memory Reduction for\u00a0Scalable and\u00a0Efficient Action Recognition"],"prefix":"10.1007","author":[{"given":"Shreyank N.","family":"Gowda","sequence":"first","affiliation":[]},{"given":"Anurag","family":"Arnab","sequence":"additional","affiliation":[]},{"given":"Jonathan","family":"Huang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,3]]},"reference":[{"key":"26_CR1","doi-asserted-by":"crossref","unstructured":"Arnab, A., Dehghani, M., Heigold, G., Sun, C., Lu\u010di\u0107, M., Schmid, C.: Vivit: a video vision transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6836\u20136846 (2021)","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"26_CR2","unstructured":"Bertasius, G., Wang, H., Torresani, L.: Is space-time attention all you need for video understanding? In: ICML, vol.\u00a02, p.\u00a04 (2021)"},{"key":"26_CR3","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020, Part I. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"26_CR4","unstructured":"Carreira, J., Noland, E., Banki-Horvath, A., Hillier, C., Zisserman, A.: A short note about kinetics-600. arXiv preprint arXiv:1808.01340 (2018)"},{"key":"26_CR5","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo vadis, action recognition? a new model and the kinetics dataset. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6299\u20136308 (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"26_CR6","doi-asserted-by":"crossref","unstructured":"Chen, J., Ho, C.M.: Mm-vit: multi-modal video transformer for compressed video action recognition. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 1910\u20131921 (2022)","DOI":"10.1109\/WACV51458.2022.00086"},{"key":"26_CR7","doi-asserted-by":"crossref","unstructured":"Cheng, F., et al.: Stochastic backpropagation: a memory efficient strategy for training video models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8301\u20138310 (2022)","DOI":"10.1109\/CVPR52688.2022.00812"},{"key":"26_CR8","doi-asserted-by":"crossref","unstructured":"Damen, D., et\u00a0al.: Scaling egocentric vision: the epic-kitchens dataset. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 720\u2013736 (2018)","DOI":"10.1007\/978-3-030-01225-0_44"},{"key":"26_CR9","unstructured":"Dehghani, M., Arnab, A., Beyer, L., Vaswani, A., Tay, Y.: The efficiency misnomer. arXiv preprint arXiv:2110.12894 (2021)"},{"key":"26_CR10","unstructured":"Dosovitskiy, A., Beyer, L., et\u00a0al.: An image is worth 16x16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"26_CR11","doi-asserted-by":"crossref","unstructured":"Fan, H., et al.: Multiscale vision transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6824\u20136835 (2021)","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"26_CR12","doi-asserted-by":"crossref","unstructured":"Foteinopoulou, N.M., Patras, I.: Emoclip: a vision-language method for zero-shot video facial expression recognition. arXiv preprint arXiv:2310.16640 (2023)","DOI":"10.1109\/FG59268.2024.10581982"},{"key":"26_CR13","doi-asserted-by":"crossref","unstructured":"Gowda, S.N.: Human activity recognition using combinatorial deep belief networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp.\u00a01\u20136 (2017)","DOI":"10.1109\/CVPRW.2017.203"},{"key":"26_CR14","doi-asserted-by":"crossref","unstructured":"Gowda, S.N., Gao, B., Clifton, D.: Fe-adapter: adapting image-based emotion classifiers to videos (2024)","DOI":"10.1109\/FG59268.2024.10581905"},{"key":"26_CR15","unstructured":"Gowda, S.N., Hao, X., Li, G., Sevilla-Lara, L., Gowda, S.N.: Watt for what: rethinking deep learning\u2019s energy-performance relationship. arXiv preprint arXiv:2310.06522 (2023)"},{"key":"26_CR16","doi-asserted-by":"publisher","unstructured":"Gowda, S.N., Rohrbach, M., Keller, F., Sevilla-Lara, L.: Learn2Augment: learning to composite videos for data augmentation in action recognition. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision - ECCV 2022, ECCV 2022, LNCS, Part XXXI, vol. 13691, pp. 242\u2013259. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19821-2_14","DOI":"10.1007\/978-3-031-19821-2_14"},{"key":"26_CR17","doi-asserted-by":"crossref","unstructured":"Gowda, S.N., Rohrbach, M., Sevilla-Lara, L.: Smart frame selection for action recognition. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a035, pp. 1451\u20131459 (2021)","DOI":"10.1609\/aaai.v35i2.16235"},{"key":"26_CR18","doi-asserted-by":"crossref","unstructured":"Goyal, R., et\u00a0al.: The \u201csomething something\u201d video database for learning and evaluating visual common sense. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 5842\u20135850 (2017)","DOI":"10.1109\/ICCV.2017.622"},{"key":"26_CR19","doi-asserted-by":"crossref","unstructured":"Gritsenko, A., et al.: End-to-end spatio-temporal action localisation with video transformers. arXiv preprint arXiv:2304.12160 (2023)","DOI":"10.1109\/CVPR52733.2024.01739"},{"key":"26_CR20","unstructured":"Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. arXiv preprint arXiv:1503.02531 (2015)"},{"issue":"1","key":"26_CR21","doi-asserted-by":"publisher","first-page":"221","DOI":"10.1109\/TPAMI.2012.59","volume":"35","author":"S Ji","year":"2012","unstructured":"Ji, S., Xu, W., Yang, M., Yu, K.: 3d convolutional neural networks for human action recognition. IEEE Trans. Pattern Anal. Mach. Intell. 35(1), 221\u2013231 (2012)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"26_CR22","unstructured":"Kim, K., Gowda, S.N., Mac\u00a0Aodha, O., Sevilla-Lara, L.: Capturing temporal information in a single frame: channel sampling strategies for action recognition. arXiv preprint arXiv:2201.10394 (2022)"},{"key":"26_CR23","doi-asserted-by":"crossref","unstructured":"Kuehne, H., Jhuang, H., Garrote, E., Poggio, T., Serre, T.: Hmdb: a large video database for human motion recognition. In: 2011 International Conference on Computer Vision, pp. 2556\u20132563. IEEE (2011)","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"26_CR24","doi-asserted-by":"publisher","first-page":"107","DOI":"10.1007\/s11263-005-1838-7","volume":"64","author":"I Laptev","year":"2005","unstructured":"Laptev, I.: On space-time interest points. Int. J. Comput. Vision 64, 107\u2013123 (2005)","journal-title":"Int. J. Comput. Vision"},{"key":"26_CR25","unstructured":"Li, K., Wang, Y., Gao, P., Song, G., Liu, Y., Li, H., Qiao, Y.: Uniformer: unified transformer for efficient spatiotemporal representation learning. arXiv preprint arXiv:2201.04676 (2022)"},{"key":"26_CR26","doi-asserted-by":"publisher","unstructured":"Liang, Y., Zhou, P., Zimmermann, R., Yan, S.: DualFormer: local-global stratified transformer for efficient video recognition. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision - ECCV 2022, ECCV 2022, LNCS, vol. 13694, pp 577\u2013595. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19830-4_33","DOI":"10.1007\/978-3-031-19830-4_33"},{"key":"26_CR27","doi-asserted-by":"crossref","unstructured":"Lin, J., Gan, C., Han, S.: TSM: temporal shift module for efficient video understanding. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 7083\u20137093 (2019)","DOI":"10.1109\/ICCV.2019.00718"},{"key":"26_CR28","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"26_CR29","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Video swin transformer. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3202\u20133211 (2022)","DOI":"10.1109\/CVPR52688.2022.00320"},{"key":"26_CR30","doi-asserted-by":"crossref","unstructured":"Materzynska, J., Xiao, T., Herzig, R., Xu, H., Wang, X., Darrell, T.: Something-else: Compositional action recognition with spatial-temporal interaction networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1049\u20131059 (2020)","DOI":"10.1109\/CVPR42600.2020.00113"},{"issue":"2","key":"26_CR31","doi-asserted-by":"publisher","first-page":"502","DOI":"10.1109\/TPAMI.2019.2901464","volume":"42","author":"M Monfort","year":"2019","unstructured":"Monfort, M., et al.: Moments in time dataset: one million videos for event understanding. IEEE Trans. Pattern Anal. Mach. Intell. 42(2), 502\u2013508 (2019)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"26_CR32","first-page":"26462","volume":"35","author":"J Pan","year":"2022","unstructured":"Pan, J., Lin, Z., Zhu, X., Shao, J., Li, H.: St-adapter: parameter-efficient image-to-video transfer learning. Adv. Neural. Inf. Process. Syst. 35, 26462\u201326477 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"26_CR33","doi-asserted-by":"crossref","unstructured":"Piergiovanni, A., Kuo, W., Angelova, A.: Rethinking video vits: sparse video tubes for joint image and video learning. arXiv preprint arXiv:2212.03229 (2022)","DOI":"10.1109\/CVPR52729.2023.00220"},{"key":"26_CR34","unstructured":"Ridnik, T., Ben-Baruch, E., Noy, A., Zelnik-Manor, L.: Imagenet-21k pretraining for the masses. arXiv preprint arXiv:2104.10972 (2021)"},{"key":"26_CR35","first-page":"12786","volume":"34","author":"M Ryoo","year":"2021","unstructured":"Ryoo, M., Piergiovanni, A., Arnab, A., Dehghani, M., Angelova, A.: Tokenlearner: Adaptive space-time tokenization for videos. Adv. Neural. Inf. Process. Syst. 34, 12786\u201312797 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"26_CR36","unstructured":"Simonyan, K., Zisserman, A.: Two-stream convolutional networks for action recognition in videos. In: Advances in Neural Information Processing Systems, vol. 27 (2014)"},{"key":"26_CR37","unstructured":"Soomro, K., Zamir, A.R., Shah, M.: Ucf101: a dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)"},{"key":"26_CR38","unstructured":"Tong, Z., Song, Y., Wang, J., Wang, L.: Videomae: masked autoencoders are data-efficient learners for self-supervised video pre-training. arXiv preprint arXiv:2203.12602 (2022)"},{"key":"26_CR39","doi-asserted-by":"crossref","unstructured":"Tran, D., Bourdev, L., Fergus, R., Torresani, L., Paluri, M.: Learning spatiotemporal features with 3d convolutional networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 4489\u20134497 (2015)","DOI":"10.1109\/ICCV.2015.510"},{"key":"26_CR40","unstructured":"Vaswani, A., et al.: Attention is all you need. IN: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"26_CR41","doi-asserted-by":"publisher","first-page":"60","DOI":"10.1007\/s11263-012-0594-8","volume":"103","author":"H Wang","year":"2013","unstructured":"Wang, H., Kl\u00e4ser, A., Schmid, C., Liu, C.L.: Dense trajectories and motion boundary descriptors for action recognition. Int. J. Comput. Vision 103, 60\u201379 (2013)","journal-title":"Int. J. Comput. Vision"},{"key":"26_CR42","unstructured":"Wang, J., et al.: Git: a generative image-to-text transformer for vision and language. Transactions of Machine Learning Research (2022)"},{"key":"26_CR43","doi-asserted-by":"publisher","unstructured":"Wang, J., Yang, X., Li, H., Liu, L., Wu, Z., Jiang, Y.G.: Efficient video transformers with spatial-temporal token selection. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision - ECCV 2022, ECCV 2022, Part XXXV, LNCS, vol. 13695. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19833-5_5","DOI":"10.1007\/978-3-031-19833-5_5"},{"key":"26_CR44","doi-asserted-by":"publisher","unstructured":"Wang, L., et al.: Temporal segment networks: towards good practices for deep action recognition. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) Computer Vision - ECCV 2016, ECCV 2016, LNCS, vol. 9912, pp. 20\u201336. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46484-8_2","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"26_CR45","doi-asserted-by":"crossref","unstructured":"Wasim, S.T., Khattak, M.U., Naseer, M., Khan, S., Shah, M., Khan, F.S.: Video-focalnets: Spatio-temporal focal modulation for video action recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 13778\u201313789 (2023)","DOI":"10.1109\/ICCV51070.2023.01267"},{"key":"26_CR46","unstructured":"Xiong, X., Arnab, A., Nagrani, A., Schmid, C.: M &m mix: a multimodal multiview transformer ensemble. arXiv preprint arXiv:2206.09852 (2022)"},{"key":"26_CR47","doi-asserted-by":"crossref","unstructured":"Yan, S., et al.: Multiview transformers for video recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3333\u20133343 (2022)","DOI":"10.1109\/CVPR52688.2022.00333"},{"key":"26_CR48","doi-asserted-by":"crossref","unstructured":"Yang, A., et al.: Vid2seq: large-scale pretraining of a visual language model for dense video captioning. In: CVPR 2023-IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2023)","DOI":"10.1109\/CVPR52729.2023.01032"},{"key":"26_CR49","doi-asserted-by":"crossref","unstructured":"Zhang, H., Hao, Y., Ngo, C.W.: Token shift transformer for video classification. In: Proceedings of the 29th ACM International Conference on Multimedia, pp. 917\u2013925 (2021)","DOI":"10.1145\/3474085.3475272"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72684-2_26","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:11:56Z","timestamp":1730574716000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72684-2_26"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,3]]},"ISBN":["9783031726835","9783031726842"],"references-count":49,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72684-2_26","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,3]]},"assertion":[{"value":"3 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}