{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,13]],"date-time":"2026-01-13T05:15:49Z","timestamp":1768281349945,"version":"3.49.0"},"reference-count":99,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62525103"],"award-info":[{"award-number":["62525103"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["624B2082"],"award-info":[{"award-number":["624B2082"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62271281"],"award-info":[{"award-number":["62271281"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62441235"],"award-info":[{"award-number":["62441235"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62571294"],"award-info":[{"award-number":["62571294"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1109\/tpami.2025.3616854","type":"journal-article","created":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T17:38:56Z","timestamp":1759340336000},"page":"1373-1389","source":"Crossref","is-referenced-by-count":1,"title":["CAIT: Triple-Win\n                    <u>C<\/u>\n                    ompression Toward High\n                    <u>A<\/u>\n                    ccuracy, Fast\n                    <u>I<\/u>\n                    nference, and Favorable\n                    <u>T<\/u>\n                    ransferability for ViTs"],"prefix":"10.1109","volume":"48","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0030-551X","authenticated-orcid":false,"given":"Ao","family":"Wang","sequence":"first","affiliation":[{"name":"BNRist, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4180-5801","authenticated-orcid":false,"given":"Hui","family":"Chen","sequence":"additional","affiliation":[{"name":"BNRist, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1390-7424","authenticated-orcid":false,"given":"Zijia","family":"Lin","sequence":"additional","affiliation":[{"name":"School of Software, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5843-6411","authenticated-orcid":false,"given":"Sicheng","family":"Zhao","sequence":"additional","affiliation":[{"name":"BNRist, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4361-956X","authenticated-orcid":false,"given":"Jungong","family":"Han","sequence":"additional","affiliation":[{"name":"Department of Automation, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0137-9975","authenticated-orcid":false,"given":"Guiguang","family":"Ding","sequence":"additional","affiliation":[{"name":"BNRist, Tsinghua University, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"An image is worth 16 \u00d7 16 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Dosovitskiy","year":"2021"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3152247"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3268446"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3329173"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref6","first-page":"10347","article-title":"Training data-efficient image transformers & distillation through attention","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Touvron","year":"2021"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3330016"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3207091"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3202765"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2023.3256763"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3183612"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01212"},{"key":"ref13","article-title":"Emergent abilities of large language models","volume-title":"Trans. Mach. Learn. Res.","author":"Wei","year":"2022"},{"issue":"1","key":"ref14","article-title":"Palm: Scaling language modeling with pathways","volume":"24","author":"Chowdhery","year":"2023","journal-title":"J. Mach. Learn. Res."},{"key":"ref15","article-title":"GPT-4 technical report","author":"OpenAI","year":"2024"},{"key":"ref16","article-title":"Pali: A jointly-scaled multilingual language-image model","volume-title":"11th Int. Conf. Learn. Representations","author":"Chen","year":"2023"},{"key":"ref17","first-page":"8583","article-title":"Scaling vision with sparse mixture of experts","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Riquelme","year":"2021"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01179"},{"key":"ref19","first-page":"7480","article-title":"Scaling vision transformers to 22 billion parameters","volume-title":"Proc. 40th Int. Conf. Mach. Learn., ser.Proc. Mach. Learn. Res.","author":"Dehghani","year":"2023"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref21","first-page":"19974","article-title":"Chasing sparsity in vision transformers: An end-to-end exploration","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Chen","year":"2021"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20083-0_37"},{"key":"ref23","article-title":"Unified visual transformer compression","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Yu","year":"2022"},{"key":"ref24","first-page":"9010","article-title":"Savit: Structure-aware vision transformer pruning via collaborative optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Zheng","year":"2022"},{"key":"ref25","first-page":"13937","article-title":"Dynamicvit: Efficient vision transformers with dynamic token sparsification","volume":"34","author":"Rao","year":"2021","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref26","article-title":"Vision transformer pruning","author":"Zhu","year":"2021"},{"key":"ref27","first-page":"18 547","article-title":"Nvit: Vision transformer compression and parameter redistribution","volume-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","author":"Yang","year":"2023"},{"key":"ref28","first-page":"13974","article-title":"VTC-LFC: Vision transformer compression with low-frequency components","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Wang","year":"2022"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01185"},{"key":"ref30","article-title":"EVit: Expediting vision transformers via token reorganizations","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Liang","year":"2022"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICME52920.2022.9859786"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref33","first-page":"24898","article-title":"IA-RED2: Interpretability-aware redundancy reduction for vision transformers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Pan","year":"2021"},{"key":"ref34","article-title":"Token merging: Your vit but faster","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Bolya","year":"2023"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00208"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01228-1_26"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2956516"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2019.00270"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20202"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00600"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3355890"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01186"},{"key":"ref46","article-title":"Beit: BERT pre-training of image transformers","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bao","year":"2022"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00041"},{"key":"ref49","first-page":"20014","article-title":"XCiT: Cross-covariance image transformers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Ali","year":"2021"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01204"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20053-3_17"},{"key":"ref52","article-title":"When vision transformers outperform resnets without pre-training or strong data augmentations","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Chen","year":"2022"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-92659-5_34"},{"key":"ref54","article-title":"Deformable detr: Deformable transformers for end-to-end object detection","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhu","year":"2021"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00165"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00290"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01474"},{"key":"ref58","article-title":"Training vision transformers for image retrieval","author":"El-Nouby","year":"2021"},{"key":"ref59","first-page":"17864","article-title":"Per-pixel classification is not all you need for semantic segmentation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Cheng","year":"2021"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00863"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2022.3168697"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00583"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00831"},{"key":"ref65","article-title":"Tokenlearner: What can 8 learned tokens do for images and videos?","author":"Ryoo","year":"2021","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01183"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20083-0_26"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/309"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00447"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00447"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/336"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01197"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298681"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.5555\/3157096.3157329"},{"key":"ref75","article-title":"fvcore library","year":"2019"},{"key":"ref76","article-title":"Cp-vit: Cascade vision transformer pruning via progressive sparsity prediction","author":"Song","year":"2022"},{"key":"ref77","article-title":"PyTorch 2: Faster machine learning through dynamic Python bytecode transformation and graph compilation","volume-title":"Proc. 29th ACM Int. Conf. Architectural Support Program. Lang. Operating Syst.","volume":"2","author":"Ansel","year":"2024"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00656"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2017.544"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2104.10858"},{"key":"ref81","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov","year":"2019"},{"key":"ref82","article-title":"Mmsegmentation: Openmmlab semantic segmentation toolbox and benchmark","author":"Contributors","year":"2020"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref84","article-title":"Designing bert for convolutional networks: Sparse and hierarchical masked modeling","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Tian","year":"2023"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.278"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00616"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.7303\/SYN3193805"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2018.2837502"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1016\/j.compmedimag.2015.02.007"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-37734-2_37"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1109\/TGRS.2022.3176603"},{"key":"ref94","article-title":"2D semantic labeling contest - Potsdam","year":"2021"},{"key":"ref95","first-page":"28","article-title":"iSAID: A large-scale dataset for instance segmentation in aerial images","volume-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit. Workshops","author":"Zamir","year":"2019"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01574"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01386"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01055"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/11345188\/11186163.pdf?arnumber=11186163","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,12]],"date-time":"2026-01-12T22:01:04Z","timestamp":1768255264000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11186163\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2]]},"references-count":99,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2025.3616854","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,2]]}}}