{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T02:30:46Z","timestamp":1730255446415,"version":"3.28.0"},"reference-count":36,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,5,13]],"date-time":"2024-05-13T00:00:00Z","timestamp":1715558400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,5,13]],"date-time":"2024-05-13T00:00:00Z","timestamp":1715558400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100006190","name":"Research and Development","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100006190","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,5,13]]},"DOI":"10.1109\/icra57147.2024.10610601","type":"proceedings-article","created":{"date-parts":[[2024,8,8]],"date-time":"2024-08-08T17:51:05Z","timestamp":1723139465000},"page":"12397-12403","source":"Crossref","is-referenced-by-count":0,"title":["AnyOKP: One-Shot and Instance-Aware Object Keypoint Extraction with Pretrained ViT"],"prefix":"10.1109","author":[{"given":"Fangbo","family":"Qin","sequence":"first","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Automation,Beijing,China,100190"}]},{"given":"Taogang","family":"Hou","sequence":"additional","affiliation":[{"name":"Beijing Jiaotong University,School of Electronic and Information Engineering,Beijing,China,100044"}]},{"given":"Shan","family":"Lin","sequence":"additional","affiliation":[{"name":"University of California,Department of Electrical and Computer Engineering,San Diego, La Jolla,CA,USA,92093"}]},{"given":"Kaiyuan","family":"Wang","sequence":"additional","affiliation":[{"name":"University of California,Department of Electrical and Computer Engineering,San Diego, La Jolla,CA,USA,92093"}]},{"given":"Michael C.","family":"Yip","sequence":"additional","affiliation":[{"name":"University of California,Department of Electrical and Computer Engineering,San Diego, La Jolla,CA,USA,92093"}]},{"given":"Shan","family":"Yu","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,Institute of Automation,Beijing,China,100190"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.3026970"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2019.2909081"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9196830"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3096156"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3062560"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.3026963"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9196971"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9812299"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2021.3069998"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3151981"},{"key":"ref11","first-page":"373","article-title":"Dense Object Nets: Learning Dense Visual Object Descriptors By and For Robotic Manipulation","volume-title":"Proc. Conf. Robot Learn","author":"Florence"},{"key":"ref12","first-page":"550","article-title":"One-shot transfer of affordance regions? affcorrs!","volume-title":"Proc. Conf. Robot Learn","author":"Hadjivelichkov"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9340914"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2022.3181054"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00745"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00615"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00499"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i5.20482"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58536-5_42"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2018.00060"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561168"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2023.3248112"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00828"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00881"},{"issue":"3","key":"ref25","article-title":"Deep ViT features as dense visual descriptors","volume":"2","author":"Amir","year":"4"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/3197517.3201332"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_38"},{"article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Represent","author":"Dosovitskiy","key":"ref28"},{"key":"ref29","first-page":"23296","article-title":"Intriguing properties of vision transformers","volume-title":"Proc. Advances Neural Inf. Process. Syst","author":"Naseer"},{"key":"ref30","first-page":"12116","article-title":"Do vision transformers see like convolutional neural networks?","volume-title":"Proc. Advances Neural Inf. Process. Syst","author":"Raghu"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48891.2023.10160537"},{"key":"ref32","first-page":"416","article-title":"Real-world robot learning with masked visual pre-training","volume-title":"Proc. Conf. Robot Learn","author":"Radosavovic"},{"article-title":"Attention is all you need","volume-title":"Proc. Advances Neural Inf. Process. Syst","author":"Vaswani","key":"ref33"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"article-title":"Dinov2: Learning robust visual features without supervision","year":"2023","author":"Oquab","key":"ref36"}],"event":{"name":"2024 IEEE International Conference on Robotics and Automation (ICRA)","start":{"date-parts":[[2024,5,13]]},"location":"Yokohama, Japan","end":{"date-parts":[[2024,5,17]]}},"container-title":["2024 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10609961\/10609862\/10610601.pdf?arnumber=10610601","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,10]],"date-time":"2024-08-10T05:51:35Z","timestamp":1723269095000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10610601\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,13]]},"references-count":36,"URL":"https:\/\/doi.org\/10.1109\/icra57147.2024.10610601","relation":{},"subject":[],"published":{"date-parts":[[2024,5,13]]}}}