{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,17]],"date-time":"2026-04-17T16:45:56Z","timestamp":1776444356811,"version":"3.51.2"},"reference-count":30,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,9,27]],"date-time":"2023-09-27T00:00:00Z","timestamp":1695772800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,9,27]],"date-time":"2023-09-27T00:00:00Z","timestamp":1695772800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,9,27]]},"DOI":"10.1109\/mmsp59012.2023.10337696","type":"proceedings-article","created":{"date-parts":[[2023,12,8]],"date-time":"2023-12-08T19:10:51Z","timestamp":1702062651000},"page":"1-6","source":"Crossref","is-referenced-by-count":4,"title":["ConViViT - A Deep Neural Network Combining Convolutions and Factorized Self-Attention for Human Activity Recognition"],"prefix":"10.1109","author":[{"given":"Dokkar Rachid","family":"Reda","sequence":"first","affiliation":[{"name":"Paris Panth&#x00E9;on-Assas University,Efrei Research Lab,Paris,France"}]},{"given":"Faten","family":"Chaieb","sequence":"additional","affiliation":[{"name":"Paris Panth&#x00E9;on-Assas University,Efrei Research Lab,Paris,France"}]},{"given":"Hassen","family":"Drira","sequence":"additional","affiliation":[{"name":"Universit&#x00E9; de Strasbourg,ICube UMR 7357, CNRS,Strasbourg,France"}]},{"given":"Arezki","family":"Aberkane","sequence":"additional","affiliation":[{"name":"Caplogy SAS,V&#x00E9;lizy-Villacoubay,France"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Uniformer: Unified transformer for efficient spatiotemporal representation learning","volume":"abs\/2201.04676","author":"Li","year":"2022","journal-title":"CoRR"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"ref4","article-title":"UCF101: A dataset of 101 human actions classes from videos in the wild","volume":"abs\/1212.0402","author":"Soomro","year":"2012","journal-title":"CoRR"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9341160"},{"key":"ref6","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","volume":"abs\/2010.11929","author":"Dosovitskiy","year":"2020","journal-title":"CoRR"},{"key":"ref7","article-title":"A closer look at spatiotemporal convolutions for action recognition","volume":"abs\/1711.11248","author":"Tran","year":"2017","journal-title":"CoRR"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.590"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00565"},{"key":"ref11","article-title":"Slowfast networks for video recognition","volume":"abs\/1812.03982","author":"Feichtenhofer","year":"2018","journal-title":"CoRR"},{"key":"ref12","article-title":"Video modeling with correlation networks","volume":"abs\/1906.03349","author":"Wang","year":"2019","journal-title":"CoRR"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00117"},{"key":"ref14","article-title":"TDN: temporal difference networks for efficient action recognition","volume":"abs\/2012.10071","author":"Wang","year":"2020","journal-title":"CoRR"},{"key":"ref15","article-title":"Is space-time attention all you need for video understanding?","volume":"abs\/2102.05095","author":"Bertasius","year":"2021","journal-title":"CoRR"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW54120.2021.00355"},{"key":"ref17","article-title":"An image is worth 16\u00d716 words, what is a video worth?","volume":"abs\/2103.13915","author":"Sharir","year":"2021","journal-title":"CoRR"},{"key":"ref18","article-title":"Shifted chunk transformer for spatio-temporal representational learning","volume":"abs\/2108.11575","author":"Zha","year":"2021","journal-title":"CoRR"},{"key":"ref19","first-page":"813","article-title":"Is space-time attention all you need for video understanding?","volume-title":"Proceedings of the 38th International Conference on Machine Learning, ser. Proceedings of Machine Learning Research","volume":"139","author":"Bertasius"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref21","article-title":"Hallucinating statistical moment and subspace descriptors from object and saliency detectors for action recognition","volume":"abs\/2001.04627","author":"Wang","year":"2020","journal-title":"CoRR"},{"key":"ref22","article-title":"High-order tensor pooling with attention for action recognition","volume":"abs\/2110.05216","author":"Koniusz","year":"2021","journal-title":"CoRR"},{"key":"ref23","article-title":"Tensor representations for action recognition","volume":"abs\/2012.14371","author":"Koniusz","year":"2020","journal-title":"CoRR"},{"key":"ref24","article-title":"High-order tensor pooling with attention for action recognition","volume":"abs\/2110.05216","author":"Koniusz","year":"2021","journal-title":"CoRR"},{"key":"ref25","article-title":"Pose and joint-aware action recognition","volume":"abs\/2010.08164","author":"Shah","year":"2020","journal-title":"CoRR"},{"key":"ref26","article-title":"SMART frame selection for action recognition","volume":"abs\/2012.10671","author":"Gowda","year":"2020","journal-title":"CoRR"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2022.104465"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58555-6_40"},{"key":"ref29","article-title":"Perf-net: Pose empowered rgb-flow net","volume":"abs\/2009.13087","author":"Li","year":"2020","journal-title":"CoRR"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01233"}],"event":{"name":"2023 IEEE 25th International Workshop on Multimedia Signal Processing (MMSP)","location":"Poitiers, France","start":{"date-parts":[[2023,9,27]]},"end":{"date-parts":[[2023,9,29]]}},"container-title":["2023 IEEE 25th International Workshop on Multimedia Signal Processing (MMSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10337652\/10337633\/10337696.pdf?arnumber=10337696","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,12,19]],"date-time":"2023-12-19T23:46:15Z","timestamp":1703029575000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10337696\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,9,27]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/mmsp59012.2023.10337696","relation":{},"subject":[],"published":{"date-parts":[[2023,9,27]]}}}