{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T17:51:23Z","timestamp":1771955483891,"version":"3.50.1"},"reference-count":17,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2018AAA0102200"],"award-info":[{"award-number":["2018AAA0102200"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62122018,61772116,61872064"],"award-info":[{"award-number":["62122018,61772116,61872064"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,7,18]]},"DOI":"10.1109\/icme52920.2022.9859787","type":"proceedings-article","created":{"date-parts":[[2022,8,26]],"date-time":"2022-08-26T19:45:18Z","timestamp":1661543118000},"page":"01-06","source":"Crossref","is-referenced-by-count":17,"title":["MKE-GCN: Multi-Modal Knowledge Embedded Graph Convolutional Network for Skeleton-Based Action Recognition in the Wild"],"prefix":"10.1109","author":[{"given":"Sen","family":"Yang","sequence":"first","affiliation":[{"name":"University of Electronic Science and Technology of China,China"}]},{"given":"Xuanhan","family":"Wang","sequence":"additional","affiliation":[{"name":"University of Electronic Science and Technology of China,China"}]},{"given":"Lianli","family":"Gao","sequence":"additional","affiliation":[{"name":"University of Electronic Science and Technology of China,China"}]},{"given":"Jingkuan","family":"Song","sequence":"additional","affiliation":[{"name":"University of Electronic Science and Technology of China,China"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01281"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.115"},{"key":"ref12","article-title":"Ntu rgb+ d 120: A large-scale benchmark for 3d human activity under-standing","author":"liu","year":"2019","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00810"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58621-8_25"},{"key":"ref15","article-title":"Decoupling gcn with drop-graph module for skeleton-based action recognition","author":"cheng","year":"2020","journal-title":"ECCV"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00022"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413941"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00026"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01311"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01600"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICME51207.2021.9428213"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01230"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12328"},{"key":"ref2","article-title":"Graph convolutional hour-glass networks for skeleton-based action recognition","author":"zhu","year":"2021","journal-title":"ICME"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00119"},{"key":"ref9","article-title":"Fitnets: Hints for thin deep nets","author":"romero","year":"0","journal-title":"ArXiv Preprint"}],"event":{"name":"2022 IEEE International Conference on Multimedia and Expo (ICME)","location":"Taipei, Taiwan","start":{"date-parts":[[2022,7,18]]},"end":{"date-parts":[[2022,7,22]]}},"container-title":["2022 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9859562\/9858923\/09859787.pdf?arnumber=9859787","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,19]],"date-time":"2022-09-19T20:25:20Z","timestamp":1663619120000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9859787\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,18]]},"references-count":17,"URL":"https:\/\/doi.org\/10.1109\/icme52920.2022.9859787","relation":{},"subject":[],"published":{"date-parts":[[2022,7,18]]}}}