{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,4]],"date-time":"2026-05-04T09:58:25Z","timestamp":1777888705253,"version":"3.51.4"},"reference-count":61,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100004826","name":"Beijing Natural Science Foundation","doi-asserted-by":"publisher","award":["4252054"],"award-info":[{"award-number":["4252054"]}],"id":[{"id":"10.13039\/501100004826","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004739","name":"Youth Innovation Promotion Association CAS","doi-asserted-by":"publisher","award":["2022132"],"award-info":[{"award-number":["2022132"]}],"id":[{"id":"10.13039\/501100004739","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005090","name":"Beijing Nova Program","doi-asserted-by":"publisher","award":["20230484276"],"award-info":[{"award-number":["20230484276"]}],"id":[{"id":"10.13039\/501100005090","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iccv51701.2025.00383","type":"proceedings-article","created":{"date-parts":[[2026,4,29]],"date-time":"2026-04-29T19:45:49Z","timestamp":1777491949000},"page":"4019-4028","source":"Crossref","is-referenced-by-count":0,"title":["Semantic Equitable Clustering: A Simple and Effective Strategy for Clustering Vision Tokens"],"prefix":"10.1109","author":[{"given":"Qihang","family":"Fan","sequence":"first","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences,MAIS &#x0026; NLPR,Beijing,China"}]},{"given":"Huaibo","family":"Huang","sequence":"additional","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences,MAIS &#x0026; NLPR,Beijing,China"}]},{"given":"Mingrui","family":"Chen","sequence":"additional","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences,MAIS &#x0026; NLPR,Beijing,China"}]},{"given":"Ran","family":"He","sequence":"additional","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences,MAIS &#x0026; NLPR,Beijing,China"}]}],"member":"263","reference":[{"key":"ref1","author":"Bai","year":"2023","journal-title":"Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00644"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01311"},{"key":"ref4","author":"Chen","year":"2019","journal-title":"MMDetection: Open mmlab detection toolbox and benchmark"},{"key":"ref5","author":"Chen","year":"2023","journal-title":"Shikra: Unleashing multimodal llm\u2019s referential dialogue magic"},{"key":"ref6","article-title":"Conditional positional encodings for vision transformers","author":"Chu","year":"2023","journal-title":"ICLR"},{"key":"ref7","year":"2020","journal-title":"Mmsegmentation, an open source semantic segmentation toolbox"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.52202\/075280-2142"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20053-3_5"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01181"},{"key":"ref11","article-title":"An image is worth $16 \\times 16$ words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2021","journal-title":"ICLR"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.52202\/075280-0670"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00539"},{"key":"ref14","author":"Gao","year":"2023","journal-title":"Llama-adapter v2: Parameter-efficient visual instruction model"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00552"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01186"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-023-0364-2"},{"key":"ref18","article-title":"Demystify mamba in vision: A linear attention perspective","author":"Han","year":"2024","journal-title":"NeurIPS"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00599"},{"key":"ref20","article-title":"Global context vision transformers","author":"Hatamizadeh","year":"2023","journal-title":"ICML"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref22","article-title":"Vision transformer with super token sampling","author":"Huang","year":"2023","journal-title":"CVPR"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1093\/oed\/2293015977"},{"key":"ref24","article-title":"All tokens matter: Token labeling for training better vision transformers","author":"Jiang","year":"2021","journal-title":"NeurIPS"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01792"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00656"},{"key":"ref27","article-title":"Obelics: An open web-scale filtered dataset of interleaved image-text documents","author":"Lauren\u00e7on","year":"2024","journal-title":"NeurIPS"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00714"},{"key":"ref29","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","author":"Li","year":"2023","journal-title":"ICML"},{"key":"ref30","author":"Li","year":"2022","journal-title":"Uniformer: Unified transformer for efficient spatiotemporal representation learning"},{"key":"ref31","article-title":"Moganet: Multi-order gated aggregation network","author":"Li","year":"2024","journal-title":"ICLR"},{"key":"ref32","article-title":"Not all patches are what you need: Expediting vision transformers via token reorganizations","volume-title":"International Conference on Learning Representations","author":"Liang","year":"2022"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.324"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00553"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02484"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02484"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/166"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"ref40","article-title":"Dynamicvit: Efficient vision transformers with dynamic token sparsification","author":"Rao","year":"2021","journal-title":"NeurIPS"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.52202\/068431-1707"},{"key":"ref42","article-title":"Quadtree attention for vision transformers","author":"Tang","year":"2022","journal-title":"ICLR"},{"key":"ref43","article-title":"Training data-efficient image transformers & distillation through attention","author":"Touvron","year":"2021","journal-title":"ICML"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20053-3_27"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-022-0274-8"},{"key":"ref47","article-title":"Crossformer: A versatile vision transformer hinging on cross-scale attention","author":"Wang","year":"2022","journal-title":"ICLR"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01385"},{"key":"ref49","article-title":"Not all images are worth $16\\times 16$ words: Dynamic vision transformers with adaptive sequence length","author":"Wang","year":"2021","journal-title":"NeurIPS"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01548"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00475"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01228-1_26"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00943"},{"key":"ref54","article-title":"Moat: Alternating mobile convolution and attention brings strong vision models","author":"Yang","year":"2023","journal-title":"ICLR"},{"key":"ref55","article-title":"Focal selfattention for local-global interactions in vision transformers","author":"Yang","year":"2021","journal-title":"NeurIPS"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20053-3_28"},{"key":"ref57","author":"Ye","year":"2024","journal-title":"mplug-owl: Modularization empowers large language models with multimodality"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00044"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01082"},{"key":"ref60","author":"Zhu","year":"2023","journal-title":"Minigpt-4: Enhancing vision-language understanding with advanced large language models"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00995"}],"event":{"name":"2025 IEEE\/CVF International Conference on Computer Vision (ICCV)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,10,19]]},"end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/CVF International Conference on Computer Vision (ICCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11443115\/11443287\/11445070.pdf?arnumber=11445070","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T04:55:17Z","timestamp":1777611317000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11445070\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":61,"URL":"https:\/\/doi.org\/10.1109\/iccv51701.2025.00383","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}