{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,24]],"date-time":"2026-01-24T07:45:09Z","timestamp":1769240709293,"version":"3.49.0"},"reference-count":81,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62125603"],"award-info":[{"award-number":["62125603"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62321005"],"award-info":[{"award-number":["62321005"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62336004"],"award-info":[{"award-number":["62336004"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62441616"],"award-info":[{"award-number":["62441616"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004826","name":"Beijing Natural Science Foundation","doi-asserted-by":"publisher","award":["L247009"],"award-info":[{"award-number":["L247009"]}],"id":[{"id":"10.13039\/501100004826","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2026,1]]},"DOI":"10.1109\/tpami.2025.3603181","type":"journal-article","created":{"date-parts":[[2025,8,28]],"date-time":"2025-08-28T18:09:25Z","timestamp":1756404565000},"page":"33-46","source":"Crossref","is-referenced-by-count":1,"title":["Efficient High-Order Spatial Interactions for Visual Perception"],"prefix":"10.1109","volume":"48","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-6943-3085","authenticated-orcid":false,"given":"Zuyan","family":"Liu","sequence":"first","affiliation":[{"name":"Department of Automation, Beijing National Research Center for Information Science and Technology (BNRist), State Key Lab of Intelligent Technologies and Systems, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3952-8753","authenticated-orcid":false,"given":"Yongming","family":"Rao","sequence":"additional","affiliation":[{"name":"Department of Automation, Beijing National Research Center for Information Science and Technology (BNRist), State Key Lab of Intelligent Technologies and Systems, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0920-1576","authenticated-orcid":false,"given":"Wenliang","family":"Zhao","sequence":"additional","affiliation":[{"name":"Department of Automation, Beijing National Research Center for Information Science and Technology (BNRist), State Key Lab of Intelligent Technologies and Systems, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7701-234X","authenticated-orcid":false,"given":"Jie","family":"Zhou","sequence":"additional","affiliation":[{"name":"Department of Automation, Beijing National Research Center for Information Science and Technology (BNRist), State Key Lab of Intelligent Technologies and Systems, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6121-5529","authenticated-orcid":false,"given":"Jiwen","family":"Lu","sequence":"additional","affiliation":[{"name":"Department of Automation, Beijing National Research Center for Information Science and Technology (BNRist), State Key Lab of Intelligent Technologies and Systems, Tsinghua University, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.170"},{"key":"ref2","article-title":"Are we done with ImageNet?","author":"Beyer","year":"2020"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00644"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00511"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01104"},{"key":"ref6","first-page":"1","article-title":"Vision transformer adapter for dense predictions","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Chen","year":"2023"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"ref8","first-page":"17864","article-title":"Per-pixel classification is not all you need for semantic segmentation","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Cheng","year":"2021"},{"key":"ref9","first-page":"9355","article-title":"Twins: Revisiting the design of spatial attention in vision transformers","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Chu","year":"2021"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01324"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.261"},{"key":"ref12","first-page":"3965","article-title":"CoAtNet: Marrying convolution and attention for all data sizes","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Dai","year":"2021"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52688.2022.01166"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01181"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2010.11929"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1088\/1742-5468\/ac9830"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-023-0364-2"},{"key":"ref20","article-title":"Demystifying local vision transformer: Sparse connectivity, weight sharing, and dynamic weight","author":"Han","year":"2021"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00823"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01501"},{"key":"ref25","article-title":"MobileNets: Efficient convolutional neural networks for mobile vision applications","author":"Howard","year":"2017"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00745"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01112"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00090"},{"key":"ref29","article-title":"OpenClip","author":"Ilharco","year":"2021"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.5555\/3045118.3045167"},{"key":"ref31","first-page":"667","article-title":"Dynamic filter networks","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Jia","year":"2016"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.2104.10858"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58558-7_29"},{"key":"ref34","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Krizhevsky","year":"2012"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1989.1.4.541"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00126"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3074057"},{"key":"ref38","first-page":"828","article-title":"PointCNN: Convolution on X-transformed points","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Li","year":"2018"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.106"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01170"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"ref44","first-page":"1","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov","year":"2019"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00089"},{"key":"ref46","first-page":"652","article-title":"PointNet: Deep learning on point sets for 3D classification and segmentation","volume-title":"Proc. IEEE Conf. Comput. Vis. Pattern Recognit.","author":"Qi","year":"2017"},{"key":"ref47","first-page":"5099","article-title":"PointNet: Deep hierarchical feature learning on point sets in a metric space","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Qi","year":"2017"},{"key":"ref48","first-page":"23192","article-title":"PointNeXt: Revisiting PointNet with improved training and scaling strategies","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Qian","year":"2022"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00180"},{"key":"ref50","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2021"},{"key":"ref51","first-page":"10353","article-title":"HorNet: Efficient high-order spatial interactions with recursive gated convolutions","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Rao","year":"2022"},{"key":"ref52","first-page":"980","article-title":"Global filter networks for image classification","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Rao","year":"2021"},{"key":"ref53","first-page":"5389","article-title":"Do ImageNet classifiers generalize to ImageNet?","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Recht","year":"2019"},{"key":"ref54","first-page":"8583","article-title":"Scaling vision with sparse mixture of experts","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Riquelme","year":"2021"},{"key":"ref55","first-page":"25278","article-title":"LAION-5B: An open large-scale dataset for training next generation image-text models","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Schuhmann","year":"2022"},{"key":"ref56","article-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2014"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.4324\/9781410605337-29"},{"key":"ref58","first-page":"6105","article-title":"EfficientNet: Rethinking model scaling for convolutional neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Tan","year":"2019"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00830"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00651"},{"key":"ref61","first-page":"10347","article-title":"Training data-efficient image transformers & distillation through attention","volume-title":"Int. Conf. Mach. Learn. Proc. Mach. Learn. Res.","author":"Touvron","year":"2021"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20053-3_27"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref64","first-page":"10506","article-title":"Learning robust global representations by penalizing local predictive power","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Wang","year":"2019"},{"key":"ref65","article-title":"Linformer: Self-attention with linear complexity","author":"Wang","year":"2020"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1145\/3326362"},{"key":"ref68","article-title":"PyTorch image models","author":"Wightman","year":"2019"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"ref70","first-page":"1","article-title":"CLIPSelf: Vision transformer distills itself for open-vocabulary dense prediction","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wu","year":"2024"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01228-1_26"},{"key":"ref72","first-page":"30392","article-title":"Early convolutions help transformers see better","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Xiao","year":"2021"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00333"},{"key":"ref74","first-page":"4203","article-title":"Focal modulation networks","volume":"35","author":"Yang","year":"2022","journal-title":"NeurIPS"},{"key":"ref75","first-page":"30008","article-title":"Focal attention for long-range interactions in vision transformers","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Yang","year":"2021"},{"key":"ref76","first-page":"1","article-title":"Large batch optimization for deep learning: Training BERT in 76 minutes","volume-title":"Proc. Int. Conf. Learn. Representations","author":"You","year":"2020"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52688.2022.01055"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.5244\/C.30.87"},{"key":"ref80","first-page":"1","article-title":"Dino: Detr with improved denoising anchor boxes for end-to-end object detection","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang","year":"2023"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.544"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/11275622\/11142586.pdf?arnumber=11142586","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,4]],"date-time":"2025-12-04T07:58:39Z","timestamp":1764835119000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11142586\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,1]]},"references-count":81,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2025.3603181","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,1]]}}}