{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T14:06:02Z","timestamp":1774533962642,"version":"3.50.1"},"reference-count":30,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2019,1,1]],"date-time":"2019-01-01T00:00:00Z","timestamp":1546300800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"Major Project of Philosophy and Social Science Research in Colleges and Universities of Jiangsu Province: Research on Design of Human-Machine Interaction Under the Application of AI Technology","award":["2019SJZDA118"],"award-info":[{"award-number":["2019SJZDA118"]}]},{"name":"Innovation and Development of Industrial Design Driven by Big Data","award":["2018SJZDA015"],"award-info":[{"award-number":["2018SJZDA015"]}]},{"DOI":"10.13039\/501100004028","name":"Jiangnan University","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100004028","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100002663","name":"Northwestern Polytechnical University","doi-asserted-by":"publisher","award":["2019JDZD02"],"award-info":[{"award-number":["2019JDZD02"]}],"id":[{"id":"10.13039\/501100002663","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2019]]},"DOI":"10.1109\/access.2019.2952432","type":"journal-article","created":{"date-parts":[[2019,11,8]],"date-time":"2019-11-08T21:45:48Z","timestamp":1573249548000},"page":"163806-163813","source":"Crossref","is-referenced-by-count":12,"title":["Motion Recognition Algorithm Based on Deep Edge-Aware Pyramid Pooling Network in Human\u2013Computer Interaction"],"prefix":"10.1109","volume":"7","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1311-2170","authenticated-orcid":false,"given":"Lijun","family":"Xu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7942-7650","authenticated-orcid":false,"given":"Shengzan","family":"Yan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5682-9879","authenticated-orcid":false,"given":"Xiang","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6871-9834","authenticated-orcid":false,"given":"Peng","family":"Wang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-019-7356-3"},{"key":"ref10","first-page":"568","article-title":"Two-stream convolutional networks for action recognition in videos","author":"simonyan","year":"2014","journal-title":"Proc Conf Neural Inf Process Syst"},{"key":"ref11","first-page":"4694","article-title":"Beyond short snippets: Deep networks for video classification","author":"ng","year":"2015","journal-title":"Proc IEEE Conf Comput Vis Pattern Recognit"},{"key":"ref12","first-page":"22","article-title":"Temporal segment networks: Towards good practices for deep action recognition","author":"wang","year":"2016","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299059"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"},{"key":"ref15","first-page":"12","author":"murphy","year":"2012","journal-title":"Machine Learning A Probabilistic Perspective"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/0004-3702(81)90024-2"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1088\/1742-6596\/787\/1\/012008"},{"key":"ref18","first-page":"12","article-title":"Competition track evaluation setup","author":"jiang","year":"2018","journal-title":"Proc 1st Int Workshop Action Recognit Large Number Classes"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/s12221-018-8019-0"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.patrec.2018.08.002"},{"key":"ref4","first-page":"3551","article-title":"Action recognition with improved trajectories","author":"wang","year":"2014","journal-title":"Proc IEEE Int Conf Comput Vis"},{"key":"ref27","first-page":"123","article-title":"Learning to recognise 3D human action from a new skeleton-based representation using deep convolutional neural networks","volume":"24","author":"pham","year":"2018","journal-title":"IET Comput Vis"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.83"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0859-0"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1049\/iet-bmt.2017.0134"},{"key":"ref5","doi-asserted-by":"crossref","first-page":"109","DOI":"10.1016\/j.cviu.2016.03.013","article-title":"Bag of visual words and fusion methods for action recognition: Comprehensive study and good practice","volume":"150","author":"peng","year":"2016","journal-title":"Comput Vis Image Understand"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.510"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.223"},{"key":"ref2","first-page":"1","author":"forsyth","year":"2011","journal-title":"Computer Vision A Modern Approach"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2712608"},{"key":"ref1","article-title":"Unsupervised object-level video summarization with online motion auto-encoder","author":"zhang","year":"0","journal-title":"Pattern Recognit Lett"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.3390\/s18041061"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2018.02.028"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2017.2705227"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2018.2856094"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2791180"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1021\/acsami.7b15386"},{"key":"ref25","first-page":"1387","article-title":"Gesture recognition based on Kinect v2 and leap motion data fusion","volume":"77","author":"li","year":"2018","journal-title":"Int J Pattern Recognit Artif Intell"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/8600701\/08894402.pdf?arnumber=8894402","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,27]],"date-time":"2022-01-27T00:35:36Z","timestamp":1643243736000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8894402\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/access.2019.2952432","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2019]]}}}