{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,28]],"date-time":"2026-01-28T08:26:50Z","timestamp":1769588810324,"version":"3.49.0"},"reference-count":78,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Centre for Perceptual and Interactive Intelligence"},{"DOI":"10.13039\/501100003452","name":"Innovation and Technology Commission","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003452","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Research Impact Fund","award":["R5001-18"],"award-info":[{"award-number":["R5001-18"]}]},{"name":"Hong Kong RGC. XiaogangWang is a PI of CPII"},{"name":"InnoHK"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1109\/tpami.2024.3425768","type":"journal-article","created":{"date-parts":[[2024,7,11]],"date-time":"2024-07-11T17:53:22Z","timestamp":1720720402000},"page":"9521-9535","source":"Crossref","is-referenced-by-count":11,"title":["TCFormer: Visual Recognition via Token Clustering Transformer"],"prefix":"10.1109","volume":"46","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1562-6332","authenticated-orcid":false,"given":"Wang","family":"Zeng","sequence":"first","affiliation":[{"name":"Chinese University of Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5736-7434","authenticated-orcid":false,"given":"Sheng","family":"Jin","sequence":"additional","affiliation":[{"name":"University of Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2125-2760","authenticated-orcid":false,"given":"Lumin","family":"Xu","sequence":"additional","affiliation":[{"name":"Chinese University of Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6587-9878","authenticated-orcid":false,"given":"Wentao","family":"Liu","sequence":"additional","affiliation":[{"name":"SenseTime Research, Shatin, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8761-5563","authenticated-orcid":false,"given":"Chen","family":"Qian","sequence":"additional","affiliation":[{"name":"SenseTime Research, Shatin, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9163-2761","authenticated-orcid":false,"given":"Wanli","family":"Ouyang","sequence":"additional","affiliation":[{"name":"University of Sydney, Camperdown, NSW, Australia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6685-7950","authenticated-orcid":false,"given":"Ping","family":"Luo","sequence":"additional","affiliation":[{"name":"University of Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8402-7504","authenticated-orcid":false,"given":"Xiaogang","family":"Wang","sequence":"additional","affiliation":[{"name":"Chinese University of Hong Kong, Hong Kong"}]}],"member":"263","reference":[{"key":"ref1","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Dosovitskiy"},{"key":"ref2","first-page":"10347","article-title":"Training data-efficient image transformers & distillation through attention","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Touvron"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00010"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"ref6","article-title":"Deformable DETR: Deformable transformers for end-to-end object detection","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhu"},{"key":"ref7","article-title":"DINO: DETR with improved denoising anchor boxes for end-to-end object detection","author":"Zhang","year":"2022"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref9","first-page":"12077","article-title":"SegFormer: Simple and efficient design for semantic segmentation with transformers","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Xie"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00717"},{"key":"ref12","article-title":"Vision transformer adapter for dense predictions","author":"Chen","year":"2022"},{"key":"ref13","first-page":"7281","article-title":"HRFormer: High-resolution vision transformer for dense predict","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Yuan"},{"key":"ref14","article-title":"ViTPose: Simple vision transformer baselines for human pose estimation","author":"Xu","year":"2022"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01082"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-022-0274-8"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01199"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00584"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.106"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref22","first-page":"13937","article-title":"DynamicViT: Efficient vision transformers with dynamic token sparsification","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Rao"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00462"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20202"},{"key":"ref25","first-page":"11960","article-title":"Not all images are worth 16 x 16 words: Dynamic transformers for efficient image recognition","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Wang"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00044"},{"key":"ref27","article-title":"Token merging: Your ViT but faster","author":"Bolya","year":"2022"},{"key":"ref28","first-page":"35462","article-title":"Expediting large-scale vision transformer for dense prediction without fine-tuning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Liang"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/iros55552.2023.10342025"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2016.02.001"},{"key":"ref31","article-title":"PyTorch image models","author":"Wightman","year":"2019"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.634"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01044"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"ref36","article-title":"Transformer in transformer","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Han"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00983"},{"key":"ref38","first-page":"9355","article-title":"Twins: Revisiting the design of spatial attention in vision transformers","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Chu"},{"key":"ref39","first-page":"23495","article-title":"Inception transformer","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Si"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.4324\/9781410605337-29"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"},{"key":"ref43","article-title":"mixup: Beyond empirical risk minimization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00612"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.7000"},{"key":"ref46","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov"},{"key":"ref47","article-title":"SGDR: Stochastic gradient descent with warm restarts","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov"},{"key":"ref48","article-title":"OpenMMLab pose estimation toolbox and benchmark","author":"Contributors","year":"2020"},{"key":"ref49","first-page":"6981","article-title":"Single-network whole-body pose estimation","volume-title":"Proc. IEEE Conf. Comput. Vis. Pattern Recognit.","author":"Hidalgo"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2929257"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.143"},{"key":"ref52","first-page":"2277","article-title":"Associative embedding: End-to-end learning for joint detection and grouping","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Newell"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00543"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01231-1_29"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58545-7_12"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3197352"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref58","article-title":"Adam: A method for stochastic optimization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kingma"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_29"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00742"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.214"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01159"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01084"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58571-6_42"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.395"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01231-1_33"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01112"},{"key":"ref69","article-title":"Objects as points","author":"Zhou","year":"2019"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58607-2_31"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00198"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.544"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00656"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2023.3303397"},{"key":"ref75","article-title":"Vision GNN: An image is worth graph of nodes","author":"Han","year":"2022"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01055"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.324"},{"key":"ref78","article-title":"MMDetection: Open MMLab detection toolbox and benchmark","author":"Chen","year":"2019"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/10746266\/10594800.pdf?arnumber=10594800","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T23:36:27Z","timestamp":1732664187000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10594800\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12]]},"references-count":78,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2024.3425768","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12]]}}}