{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,13]],"date-time":"2026-03-13T15:12:23Z","timestamp":1773414743726,"version":"3.50.1"},"reference-count":68,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,8,25]],"date-time":"2025-08-25T00:00:00Z","timestamp":1756080000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,8,25]],"date-time":"2025-08-25T00:00:00Z","timestamp":1756080000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100011993","name":"Robert Bosch","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100011993","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,8,25]]},"DOI":"10.1109\/ro-man63969.2025.11217528","type":"proceedings-article","created":{"date-parts":[[2025,11,3]],"date-time":"2025-11-03T18:42:29Z","timestamp":1762195349000},"page":"477-477","source":"Crossref","is-referenced-by-count":1,"title":["How do Foundation Models Compare to Skeleton-Based Approaches for Gesture Recognition in Human-Robot Interaction?"],"prefix":"10.1109","author":[{"given":"Stephanie","family":"K\u00e4s","sequence":"first","affiliation":[{"name":"RWTH Aachen University,Chair for Computer Vision,Germany"}]},{"given":"Anton","family":"Burenko","sequence":"additional","affiliation":[{"name":"RWTH Aachen University,Chair for Computer Vision,Germany"}]},{"given":"Louis","family":"Markert","sequence":"additional","affiliation":[{"name":"RWTH Aachen University,Chair for Computer Vision,Germany"}]},{"given":"Onur Alp","family":"\u00c7ulha","sequence":"additional","affiliation":[{"name":"RWTH Aachen University,Chair for Computer Vision,Germany"}]},{"given":"Dennis","family":"Mack","sequence":"additional","affiliation":[{"name":"Corporate Research &#x0026; Bosch Center for AI,Robert Bosch GmbH,Renningen and Hildesheim,Germany"}]},{"given":"Timm","family":"Linder","sequence":"additional","affiliation":[{"name":"Corporate Research &#x0026; Bosch Center for AI,Robert Bosch GmbH,Renningen and Hildesheim,Germany"}]},{"given":"Bastian","family":"Leibe","sequence":"additional","affiliation":[{"name":"RWTH Aachen University,Chair for Computer Vision,Germany"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/FG.2011.5771448"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2019.07.103"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.3390\/app112411951"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9341214"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2017.8297033"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.3390\/s23125555"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/3DV50981.2020.00072"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CAC.2018.8623035"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3568294.3580126"},{"key":"ref10","article-title":"A fast-response dynamic-static parallel attention GCN network for body\u2013hand gesture recognition in HRI","author":"Guo","year":"2023","journal-title":"TIE"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TBIOM.2020.3037257"},{"key":"ref12","article-title":"Revisiting feature prediction for learning visual representations from video","author":"Bardes","year":"2024","journal-title":"Transactions on Machine Learning Research"},{"key":"ref13","article-title":"DINOv2: Learning robust visual features without supervision","author":"Oquab","year":"2024","journal-title":"TMLR"},{"key":"ref14","article-title":"The Gemini 2.0 model family"},{"key":"ref15","article-title":"GPT-4: OpenAI\u2019s language model","year":"2025"},{"key":"ref16","article-title":"Foundation models for video understanding: A survey","author":"Madan","year":"2024"},{"key":"ref17","article-title":"VideoGLUE: Video general understanding evaluation of foundation models","author":"Yuan","year":"2024","journal-title":"Transactions on Machine Learning Research"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00958"},{"key":"ref19","article-title":"Gemini: A family of highly capable multimodal models","year":"2023"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCC.2007.893280"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2025.3593428"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/s40747-023-01173-6"},{"key":"ref23","article-title":"Robust dynamic gesture recognition at ultra-long distances","author":"Beeri","year":"2024"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-11012-3_9"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/AIRPHARO52252.2021.9571027"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160416"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1115\/1.4054297"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCAR.2019.8813509"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2011.6094592"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2013.6696416"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.3390\/app12010258"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1016\/j.ergon.2017.02.004"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-024-20576-2"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2009.11.014"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1016\/j.vrih.2021.05.001"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1145\/3524497"},{"key":"ref37","article-title":"TSGCNeXt: Dynamic-static multigraph convolution for efficient skeleton-based action recognition with long-term learning potential","author":"Liu","year":"2023"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/3628797.3629008"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01645"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01254"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00516"},{"key":"ref42","article-title":"MotionGPT: Human motion as a foreign language","volume":"36","author":"Jiang","year":"2024","journal-title":"NeurIPS"},{"key":"ref43","article-title":"MotionGPT-2: A general-purpose motion-language model for motion generation and understanding","author":"Wang","year":"2024"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12328"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00297"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA55743.2025.11128613"},{"key":"ref47","article-title":"A proposed set of communicative gestures for human robot interaction and an RGB image-based gesture recognizer implemented in ROS","author":"Tan","year":"2022","journal-title":"ICRA"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-22216-0_3"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/iccv.2009.5459184"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR.2016.7899766"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/fgr.2006.8"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.18196\/jrc.v4i6.20399"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00944"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00942"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2021.01.036"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10888090"},{"key":"ref57","article-title":"Few-shot action recognition with captioning foundation models","author":"Wang","year":"2023"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1117\/12.3013530"},{"key":"ref59","article-title":"motpy - simple multi object tracking library","author":"Muro\u0144","year":"2021"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2916873"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3378886"},{"key":"ref62","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2021","journal-title":"ICLR"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.3390\/make5040083"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref65","article-title":"Revisiting feature prediction for learning visual representations from video","author":"Bardes","year":"2024","journal-title":"Transactions on Machine Learning Research"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.02245"},{"key":"ref67","article-title":"Mistral 7B","author":"Jiang","year":"2023"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00794"}],"event":{"name":"2025 34th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)","location":"Eindhoven, Netherlands","start":{"date-parts":[[2025,8,25]]},"end":{"date-parts":[[2025,8,29]]}},"container-title":["2025 34th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11217544\/11217526\/11217528.pdf?arnumber=11217528","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,4]],"date-time":"2025-11-04T06:10:05Z","timestamp":1762236605000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11217528\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,25]]},"references-count":68,"URL":"https:\/\/doi.org\/10.1109\/ro-man63969.2025.11217528","relation":{},"subject":[],"published":{"date-parts":[[2025,8,25]]}}}