{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T12:01:18Z","timestamp":1775563278781,"version":"3.50.1"},"reference-count":22,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T00:00:00Z","timestamp":1772841600000},"content-version":"vor","delay-in-days":65,"URL":"http:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Procedia Computer Science"],"published-print":{"date-parts":[[2026]]},"DOI":"10.1016\/j.procs.2026.03.113","type":"journal-article","created":{"date-parts":[[2026,3,24]],"date-time":"2026-03-24T12:39:40Z","timestamp":1774355980000},"page":"1299-1306","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Memory vs. Attention: Investigating LSTM and Transformer Models in Human Action Recognition"],"prefix":"10.1016","volume":"278","author":[{"given":"Isaac","family":"Van-Deste","sequence":"first","affiliation":[]},{"given":"Ju\u00b4lio Castro","family":"Lopes","sequence":"additional","affiliation":[]},{"given":"Rui Pedro","family":"Lopes","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"#cr-split#-10.1016\/j.procs.2026.03.113_bib1.1","doi-asserted-by":"crossref","unstructured":"Y. Zhang, Y. Wang, A comprehensive survey on rgb-d-based human action recognition: algorithms, datasets, and popular applications, Journal of Image and Video Processing 2025","DOI":"10.1186\/s13640-025-00677-0"},{"key":"#cr-split#-10.1016\/j.procs.2026.03.113_bib1.2","doi-asserted-by":"crossref","unstructured":"(15) (2025). doi: 10.1186\/s13640-025-00677-0.","DOI":"10.1186\/s13640-025-00677-0"},{"issue":"5","key":"10.1016\/j.procs.2026.03.113_bib2","doi-asserted-by":"crossref","first-page":"e2293","DOI":"10.1002\/cav.2293","article-title":"Human action recognition in immersive virtual reality based on multi-scale spatio-temporal attention network","volume":"35","author":"Xiao","year":"2024","journal-title":"Computer Animation and Virtual Worlds"},{"key":"10.1016\/j.procs.2026.03.113_bib3","doi-asserted-by":"crossref","unstructured":"I. Vernikos, E. Spyrou, Skeleton reconstruction using generative adversarial networks for human activity recognition under occlusion, Sensors 25 (5) (2025). doi: 10.3390\/s25051567.","DOI":"10.3390\/s25051567"},{"key":"10.1016\/j.procs.2026.03.113_bib4","doi-asserted-by":"crossref","first-page":"103089","DOI":"10.1016\/j.rcim.2025.103089","article-title":"Enhancing industrial human action recognition framework integrating skeleton data acquisition","volume":"97","author":"Liu","year":"2026","journal-title":"data repair and optimized graph convolutional networks, Robotics and Computer-Integrated Manufacturing"},{"key":"10.1016\/j.procs.2026.03.113_bib5","doi-asserted-by":"crossref","first-page":"16481","DOI":"10.1007\/s11042-024-19611-z","article-title":"Insights into aerial intelligence: assessing cnn-based algorithms for human action recog-nition and object detection in diverse environments","volume":"84","author":"Maheriya","year":"2025","journal-title":"Multimedia Tools and Applications"},{"key":"10.1016\/j.procs.2026.03.113_bib6","doi-asserted-by":"crossref","first-page":"111710","DOI":"10.1016\/j.engappai.2025.111710","article-title":"Graph-based framework for temporal human action recognition and segmentation in industrial context","volume":"159","author":"Benmessabih","year":"2025","journal-title":"Engineering Applications of Artificial Intelligence"},{"key":"10.1016\/j.procs.2026.03.113_bib7","first-page":"1","article-title":"An Architecture for Capturing and Synchronizing Heart Rate and Body Motion for Stress Inference","author":"Lopes","year":"2023","journal-title":"2023 IEEE 11th International Conference on Serious Games and Applications for Health (SeGAH)"},{"issue":"6","key":"10.1016\/j.procs.2026.03.113_bib8","doi-asserted-by":"crossref","first-page":"1195","DOI":"10.1093\/schbul\/sby058","article-title":"Global epidemiology and burden of schizophrenia: Findings from the global burden of disease study 2016","volume":"44","author":"Charlson","year":"2018","journal-title":"Schizophrenia Bulletin"},{"issue":"18","key":"10.1016\/j.procs.2026.03.113_bib9","doi-asserted-by":"crossref","first-page":"2260","DOI":"10.3390\/electronics10182260","article-title":"Digital Technologies for Innovative Mental Health Rehabilitation","volume":"10","author":"Lopes","year":"2021","journal-title":"Electronics"},{"key":"10.1016\/j.procs.2026.03.113_bib10","first-page":"804","article-title":"Classification of facial expressions under partial occlusion for VR games","author":"Rodrigues","year":"2023","journal-title":"in: Optimization, Learning Algorithms and Applications, Springer"},{"key":"10.1016\/j.procs.2026.03.113_bib11","unstructured":"M. T. Truong, V. D. Hoang, T. M. C. Le, Skeleton-based posture estimation for human action recognition using deep learning, in: Y. P. Huang."},{"key":"10.1016\/j.procs.2026.03.113_bib12","doi-asserted-by":"crossref","unstructured":"W. J. Wang, H. G. Le, A. Q. Hoang (Eds.), Computational Intelligence Methods for Green Technology and Sustainable Development, Vol. 1195 of Lecture Notes in Networks and Systems, Springer, Cham, 2024. doi: 10.1007\/978-3-031-76197-3_8. URL https:\/\/doi.org\/10.1007\/978-3-031-76197-3_8","DOI":"10.1007\/978-3-031-76197-3_8"},{"key":"10.1016\/j.procs.2026.03.113_bib13","doi-asserted-by":"crossref","first-page":"36372","DOI":"10.1109\/ACCESS.2024.3373199","article-title":"Human action recognition systems: A review of the trends and state-of-the-art","volume":"12","author":"Karim","year":"2024","journal-title":"IEEE Access"},{"issue":"5","key":"10.1016\/j.procs.2026.03.113_bib14","doi-asserted-by":"crossref","first-page":"2745","DOI":"10.3390\/s23052745","article-title":"Multi-View Human Action Recognition Using Skeleton Based-FineKNN with Extraneous Frame Scrapping Technique","volume":"23","author":"Malik","year":"2023","journal-title":"Sensors"},{"key":"10.1016\/j.procs.2026.03.113_bib15","doi-asserted-by":"crossref","unstructured":"J. Do, M. Kim, Skateformer: Skeletal-temporal transformer for human action recognition (2024). arXiv:2403.09508.","DOI":"10.1007\/978-3-031-72940-9_23"},{"key":"10.1016\/j.procs.2026.03.113_bib16","unstructured":"Y. Peng, A. Yilmaz, Cascadeformer: A family of two-stage cascading transformers for skeleton-based human action recognition (2025). arXiv:2509.00692."},{"key":"10.1016\/j.procs.2026.03.113_bib17","doi-asserted-by":"crossref","first-page":"101840","DOI":"10.1016\/j.rasd.2021.101840","article-title":"Automated and scalable Computerized Assessment of Motor Imitation (CAMI) in children with Autism Spectrum Disorder using a single 2D camera: A pilot study","volume":"87","author":"Lidstone","year":"2021","journal-title":"Research in Autism Spectrum Disorders"},{"key":"10.1016\/j.procs.2026.03.113_bib18","doi-asserted-by":"crossref","unstructured":"D. Maji, S. Nagori, M. Mathew, D. Poddar, YOLO-Pose: Enhancing YOLO for Multi Person Pose Estimation Using Object Keypoint Similarity Loss, arXiv:2204.06806 [cs] (Apr. 2022).","DOI":"10.1109\/CVPRW56347.2022.00297"},{"key":"10.1016\/j.procs.2026.03.113_bib19","first-page":"1","article-title":"Exploring human action recognition for rehabilitation game application","author":"Lopes","year":"2024","journal-title":"2024 IEEE 12th International Conference on Serious Games and Applications for Health (SeGAH)"},{"key":"10.1016\/j.procs.2026.03.113_bib20","unstructured":"A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, I. Polosukhin, Attention Is All You Need, arXiv:1706.03762 [cs] (Aug. 2023). doi: 10.48550\/arXiv.1706.03762."},{"key":"10.1016\/j.procs.2026.03.113_bib21","doi-asserted-by":"crossref","unstructured":"J. Wang, X. Nie, Y. Xia, Y. Wu, S.-C. Zhu, Cross-view action modeling, learning and recognition, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2014.","DOI":"10.1109\/CVPR.2014.339"}],"container-title":["Procedia Computer Science"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1877050926007088?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1877050926007088?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T11:15:25Z","timestamp":1775560525000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S1877050926007088"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"references-count":22,"alternative-id":["S1877050926007088"],"URL":"https:\/\/doi.org\/10.1016\/j.procs.2026.03.113","relation":{},"ISSN":["1877-0509"],"issn-type":[{"value":"1877-0509","type":"print"}],"subject":[],"published":{"date-parts":[[2026]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Memory vs. Attention: Investigating LSTM and Transformer Models in Human Action Recognition","name":"articletitle","label":"Article Title"},{"value":"Procedia Computer Science","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.procs.2026.03.113","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 The Author(s). Published by Elsevier B.V.","name":"copyright","label":"Copyright"}]}}