{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,12]],"date-time":"2025-07-12T01:13:13Z","timestamp":1752282793596,"version":"3.37.3"},"reference-count":43,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003213","name":"Beijing Municipal Education Commission","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003213","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100011347","name":"State Key Laboratory of Software Development Environment","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100011347","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,8,21]]},"DOI":"10.1109\/icpr56361.2022.9956581","type":"proceedings-article","created":{"date-parts":[[2022,11,29]],"date-time":"2022-11-29T19:34:13Z","timestamp":1669750453000},"page":"2964-2970","source":"Crossref","is-referenced-by-count":2,"title":["TMN: Temporal-guided Multiattention Network for Action Recognition"],"prefix":"10.1109","author":[{"given":"Yongkang","family":"Zhang","sequence":"first","affiliation":[{"name":"Capital Normal University,Information Engineering College,Beijing,China,100048"}]},{"given":"Han","family":"Zhang","sequence":"additional","affiliation":[{"name":"Capital Normal University,Information Engineering College,Beijing,China,100048"}]},{"given":"Guoming","family":"Wu","sequence":"additional","affiliation":[{"name":"Capital Normal University,Information Engineering College,Beijing,China,100048"}]},{"given":"Yangfan","family":"Xu","sequence":"additional","affiliation":[{"name":"Capital Normal University,Information Engineering College,Beijing,China,100048"}]},{"given":"Zhiping","family":"Shi","sequence":"additional","affiliation":[{"name":"Capital Normal University,Information Engineering College,Beijing,China,100048"}]},{"given":"Jun","family":"Li","sequence":"additional","affiliation":[{"name":"Capital Normal University,Information Engineering College,Beijing,China,100048"}]}],"member":"263","reference":[{"doi-asserted-by":"publisher","key":"ref39","DOI":"10.1007\/978-3-030-89029-2_20"},{"doi-asserted-by":"publisher","key":"ref38","DOI":"10.1109\/ICCV.2017.622"},{"key":"ref33","article-title":"Video modeling with correlation networks","author":"wang","year":"2019","journal-title":"CVPR"},{"doi-asserted-by":"publisher","key":"ref32","DOI":"10.1109\/ICCV.2019.00561"},{"year":"2019","author":"fan","article-title":"More is less: Learning efficient video representations by big-little network and depthwise temporal aggregation","key":"ref31"},{"key":"ref30","article-title":"Eco: Efficient convolutional network for online video understanding","author":"zolfaghari","year":"2018","journal-title":"ECCV"},{"year":"2012","author":"soomro","article-title":"Ucf101: A dataset of 101 human actions classes from videos in the wild","key":"ref37"},{"doi-asserted-by":"publisher","key":"ref36","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"ref35","article-title":"Tam:Temporal adaptive module for video recognition","author":"liu","year":"2021","journal-title":"ICCV"},{"doi-asserted-by":"publisher","key":"ref34","DOI":"10.1109\/CVPR42600.2020.00117"},{"key":"ref10","article-title":"Object detection networks on convolutional feature maps","author":"ren","year":"2016","journal-title":"TPAMI"},{"doi-asserted-by":"publisher","key":"ref40","DOI":"10.1007\/978-3-030-98355-0_10"},{"doi-asserted-by":"publisher","key":"ref11","DOI":"10.1109\/CVPR.2016.396"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1109\/CVPR.2017.601"},{"doi-asserted-by":"publisher","key":"ref13","DOI":"10.1109\/TMI.2019.2913184"},{"doi-asserted-by":"publisher","key":"ref14","DOI":"10.1109\/ITSC45078.2019.9086430"},{"key":"ref15","article-title":"Hypercolumns for object segmen- tation and fine-grained localization","author":"hariharan","year":"2015","journal-title":"CVPR"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1109\/CVPR.2017.106"},{"year":"2018","author":"hu","journal-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition","key":"ref17"},{"key":"ref18","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"NIPS"},{"year":"2020","author":"dosovitskiy","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","key":"ref19"},{"doi-asserted-by":"publisher","key":"ref28","DOI":"10.1609\/aaai.v33i01.33018401"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1109\/CVPR46437.2021.01301"},{"key":"ref27","article-title":"Spatiotemporal channel correlation networks for action classification","author":"diba","year":"2018","journal-title":"ECCV"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1109\/ICCV.2019.00718"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1109\/ICCV.2015.510"},{"key":"ref29","article-title":"Temporal relational reasoning in videos","author":"zhou","year":"2018","journal-title":"ECCV"},{"year":"2014","author":"simonyan","article-title":"Two-stream convolutional networks for action recognition in videos","key":"ref5"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.1109\/CVPR.2019.00875"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.1109\/CVPR.2017.502"},{"key":"ref2","article-title":"Temporal segment networks for action recognition in videos","author":"wang","year":"2018","journal-title":"TPAMI"},{"doi-asserted-by":"publisher","key":"ref9","DOI":"10.1109\/CVPR42600.2020.00312"},{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1109\/CVPR46437.2021.00193"},{"key":"ref20","article-title":"Pyramid attention network for semantic segmentation","author":"li","year":"2018","journal-title":"BMVC"},{"key":"ref22","article-title":"Appearance-and-relation networks for video classification","author":"wang","year":"2017","journal-title":"CVPR"},{"doi-asserted-by":"publisher","key":"ref21","DOI":"10.1109\/CVPR.2019.00154"},{"doi-asserted-by":"publisher","key":"ref42","DOI":"10.1609\/aaai.v34i07.6836"},{"doi-asserted-by":"publisher","key":"ref24","DOI":"10.1109\/ICCV.2019.00209"},{"doi-asserted-by":"publisher","key":"ref41","DOI":"10.1109\/CVPR42600.2020.00067"},{"key":"ref23","article-title":"A closer look at spatiotemporal convolutions for action recognition","author":"du","year":"2018","journal-title":"CVPR"},{"key":"ref26","article-title":"Rethinking spatiotemporal feature learning: Speed-accuracy trade-offs in video classification","author":"xie","year":"2017","journal-title":"ECCV"},{"doi-asserted-by":"publisher","key":"ref43","DOI":"10.1109\/ICME52920.2022.9859741"},{"doi-asserted-by":"publisher","key":"ref25","DOI":"10.1109\/CVPR42600.2020.00099"}],"event":{"name":"2022 26th International Conference on Pattern Recognition (ICPR)","start":{"date-parts":[[2022,8,21]]},"location":"Montreal, QC, Canada","end":{"date-parts":[[2022,8,25]]}},"container-title":["2022 26th International Conference on Pattern Recognition (ICPR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9956007\/9955631\/09956581.pdf?arnumber=9956581","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,19]],"date-time":"2022-12-19T20:05:23Z","timestamp":1671480323000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9956581\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8,21]]},"references-count":43,"URL":"https:\/\/doi.org\/10.1109\/icpr56361.2022.9956581","relation":{},"subject":[],"published":{"date-parts":[[2022,8,21]]}}}