{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,16]],"date-time":"2025-07-16T11:52:12Z","timestamp":1752666732884,"version":"3.37.3"},"reference-count":46,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2019,1,1]],"date-time":"2019-01-01T00:00:00Z","timestamp":1546300800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"},{"start":{"date-parts":[[2019,1,1]],"date-time":"2019-01-01T00:00:00Z","timestamp":1546300800000},"content-version":"am","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61936003","61673182","61771199"],"award-info":[{"award-number":["61936003","61673182","61771199"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"crossref","award":["2016YFB1001405"],"award-info":[{"award-number":["2016YFB1001405"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100003453","name":"Natural Science Foundation of Guangdong Province","doi-asserted-by":"publisher","award":["2017A030312006"],"award-info":[{"award-number":["2017A030312006"]}],"id":[{"id":"10.13039\/501100003453","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Foundation of Guangdong Science and Technology Department (GDSTP)","award":["2017A010101027"],"award-info":[{"award-number":["2017A010101027"]}]},{"DOI":"10.13039\/501100010843","name":"Guangzhou Science, Technology and Innovation Commission","doi-asserted-by":"publisher","award":["201704020134"],"award-info":[{"award-number":["201704020134"]}],"id":[{"id":"10.13039\/501100010843","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Chinese Scholarship Council (CSC)"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2019]]},"DOI":"10.1109\/access.2019.2937344","type":"journal-article","created":{"date-parts":[[2019,8,26]],"date-time":"2019-08-26T23:49:37Z","timestamp":1566863377000},"page":"121212-121222","source":"Crossref","is-referenced-by-count":8,"title":["Human Action Recognition in Unconstrained Trimmed Videos Using Residual Attention Network and Joints Path Signature"],"prefix":"10.1109","volume":"7","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8108-7915","authenticated-orcid":false,"given":"Tasweer","family":"Ahmad","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5456-0957","authenticated-orcid":false,"given":"Lianwen","family":"Jin","sequence":"additional","affiliation":[]},{"given":"Jialuo","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Guozhi","family":"Tang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.84"},{"journal-title":"The Iisignature Package","year":"2018","author":"benjamin","key":"ref38"},{"key":"ref33","article-title":"Sparse arrays of signatures for online character recognition","author":"graham","year":"2013","journal-title":"arXiv 1308 0371 [cs]"},{"key":"ref32","first-page":"223","article-title":"Sound compression: A rough path approach","author":"lyons","year":"2005","journal-title":"Proc Int Symp Commun Inf Technol"},{"key":"ref31","article-title":"Calculation of iterated-integral signatures and log signatures","author":"reizenstein","year":"2017","journal-title":"arXiv 1712 02757"},{"key":"ref30","article-title":"Interpretable spatio-temporal attention for video action recognition","author":"meng","year":"2018","journal-title":"arXiv 1810 04511"},{"key":"ref37","article-title":"Leveraging the path signature for skeleton-based human action recognition","author":"yang","year":"2017","journal-title":"arXiv 1707 03993"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2015.7333821"},{"key":"ref35","article-title":"Methods of combining multiple classifiers based on different representations for pen-based handwritten digit recognition","author":"alimoglu","year":"1996","journal-title":"Proc 15th Turkish Artif Intell Artif Neural Netw Symp (TAINN)"},{"key":"ref34","article-title":"Rotation invariants of two dimensional curves based on iterated integrals","author":"diehl","year":"2013","journal-title":"arXiv 1305 6883"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2712608"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00151"},{"key":"ref11","first-page":"4247","article-title":"Title learning latent subevents in activity videos using temporal attention filters","author":"piergiovanni","year":"2017","journal-title":"Proc 31st AAAI Conf Artif Intell"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.122"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2011.6126386"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"ref15","article-title":"UCF101: A dataset of 101 human actions classes from videos in the wild","author":"soomro","year":"2012","journal-title":"arXiv preprint arXiv 1212 0402"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2013.436"},{"key":"ref17","article-title":"The kinetics human action video dataset","author":"kay","year":"2017","journal-title":"arXiv 1705 06950"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298878"},{"key":"ref28","first-page":"34","article-title":"Attentional pooling for action recognition","author":"girdhar","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N16-1174"},{"key":"ref3","first-page":"568","article-title":"Two-stream convolutional networks for action recognition in videos","author":"simonyan","year":"2014","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00710"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/WACV.2016.7477591"},{"key":"ref8","article-title":"Very deep convolutional networks for large-scale image recognition","author":"simonyan","year":"2014","journal-title":"arXiv 1409 1556"},{"key":"ref7","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","author":"ioffe","year":"2015","journal-title":"arXiv 1502 03167"},{"key":"ref2","article-title":"Towards good practices for very deep two-stream convnets","author":"wang","year":"2015","journal-title":"ArXiv 1507 02159"},{"key":"ref9","first-page":"4278","article-title":"Inception-v4, inception-resnet and the impact of residual connections on learning","author":"szegedy","year":"2017","journal-title":"Proc 31st AAAI Conf Artif Intell"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.510"},{"key":"ref46","first-page":"3218","article-title":"P-CNN: Pose-based CNN features for action recognition","author":"cheron","year":"2015","journal-title":"Proc IEEE Int Conf Comput Vis (ICCV)"},{"key":"ref20","article-title":"Temporal 3D ConvNets: New architecture and transfer learning for video classification","author":"diba","year":"2017","journal-title":"arXiv 1711 08200"},{"journal-title":"Computational Rough Paths Software library","year":"2019","key":"ref45"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00054"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.236"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46466-4_3"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00631"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.683"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00983"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.396"},{"key":"ref26","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref43","first-page":"4898","article-title":"Understanding the effective receptive field in deep convolutional neural networks","author":"luo","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst (NIPS)"},{"key":"ref25","first-page":"2048","article-title":"Show, attend and tell: Neural image caption generation with visual attention","author":"xu","year":"2017","journal-title":"Proc Int Conf Mach Learn"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"https:\/\/ieeexplore.ieee.org\/ielaam\/6287639\/8600701\/8812659-aam.pdf","content-type":"application\/pdf","content-version":"am","intended-application":"syndication"},{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/8600701\/08812659.pdf?arnumber=8812659","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,8]],"date-time":"2022-04-08T18:53:50Z","timestamp":1649444030000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8812659\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/access.2019.2937344","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2019]]}}}