{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T04:01:53Z","timestamp":1775016113501,"version":"3.50.1"},"reference-count":84,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T00:00:00Z","timestamp":1733011200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62225208"],"award-info":[{"award-number":["62225208"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62171431"],"award-info":[{"award-number":["62171431"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1109\/tpami.2024.3429508","type":"journal-article","created":{"date-parts":[[2024,7,24]],"date-time":"2024-07-24T18:29:44Z","timestamp":1721845784000},"page":"9766-9779","source":"Crossref","is-referenced-by-count":16,"title":["Fast-iTPN: Integrally Pre-Trained Transformer Pyramid Network With Token Migration"],"prefix":"10.1109","volume":"46","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5103-3748","authenticated-orcid":false,"given":"Yunjie","family":"Tian","sequence":"first","affiliation":[{"name":"School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4831-9451","authenticated-orcid":false,"given":"Lingxi","family":"Xie","sequence":"additional","affiliation":[{"name":"Huawei Inc., Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-4585-3386","authenticated-orcid":false,"given":"Jihao","family":"Qiu","sequence":"additional","affiliation":[{"name":"School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0454-3929","authenticated-orcid":false,"given":"Jianbin","family":"Jiao","sequence":"additional","affiliation":[{"name":"School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2197-9038","authenticated-orcid":false,"given":"Yaowei","family":"Wang","sequence":"additional","affiliation":[{"name":"Peng Cheng Laboratory, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7252-5047","authenticated-orcid":false,"given":"Qi","family":"Tian","sequence":"additional","affiliation":[{"name":"Huawei Inc., Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1215-6259","authenticated-orcid":false,"given":"Qixiang","family":"Ye","sequence":"additional","affiliation":[{"name":"School of Electronic, Electrical and Communication Engineering, University of Chinese Academy of Sciences, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"An image is worth 16 \u00d7 16 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Dosovitskiy"},{"key":"ref2","article-title":"BEiT: BERT pre-training of image transformers","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bao"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00943"},{"key":"ref5","article-title":"ConvMAE: Masked convolution meets masked autoencoders","author":"Gao","year":"2022"},{"key":"ref6","first-page":"19997","article-title":"Green hierarchical vision transformer for masked image modeling","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Huang"},{"key":"ref7","article-title":"HiViT: A simpler and more efficient design of hierarchical vision transformer","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang"},{"key":"ref8","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00852"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1038\/nature14539"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3065386"},{"key":"ref13","first-page":"6105","article-title":"EfficientNet: Rethinking model scaling for convolutional neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Tan"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"ref18","first-page":"16664","article-title":"Adaptformer: Adapting vision transformers for scalable visual recognition","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chen"},{"key":"ref19","first-page":"12934","article-title":"EfficientFormer: Vision transformers at mobilenet speed","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Li"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00009"},{"key":"ref21","first-page":"3965","article-title":"CoatNet: Marrying convolution and attention for all data sizes","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Dai"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01785"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"ref24","first-page":"19160","article-title":"Container: Context aggregation networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Lu"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00476"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-022-0274-8"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01181"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3206108"},{"key":"ref29","article-title":"Focal self-attention for local-global interactions in vision transformers","author":"Yang","year":"2021"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20053-3_27"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.106"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00042"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.167"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46466-4_5"},{"key":"ref35","article-title":"Unsupervised representation learning by predicting image rotations","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Gidaris"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.278"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46487-9_40"},{"key":"ref38","article-title":"Beyond masking: Demystifying token-based pre-training for vision transformers","author":"Tian","year":"2022"},{"key":"ref39","article-title":"Semantic-aware generation for self-supervised visual representation learning","author":"Tian","year":"2021"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.5555\/3495724.3497510"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.5555\/3524938.3525087"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref44","first-page":"9912","article-title":"Unsupervised learning of visual features by contrasting cluster assignments","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Caron"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01641"},{"key":"ref46","first-page":"22682","article-title":"Aligning pretraining for detection via object-level contrastive learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Wei"},{"key":"ref47","first-page":"14290","article-title":"SemMAE: Semantic-guided masking for learning masked autoencoders","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Li"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20056-4_14"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19821-2_26"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20056-4_7"},{"key":"ref51","article-title":"CAE V2: Context autoencoder with clip target","author":"Zhang","year":"2022"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25252"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02178"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01426"},{"key":"ref55","article-title":"Efficient self-supervised vision pretraining with local masked reconstruction","author":"Chen","year":"2022"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-023-01852-4"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1810.04805"},{"key":"ref58","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Brown"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"ref60","article-title":"EVA-02: A visual representation for neon genesis","author":"Fang","year":"2023"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01385"},{"key":"ref62","article-title":"Axial attention in multidimensional transformers","author":"Ho","year":"2019"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00069"},{"key":"ref64","first-page":"9355","article-title":"Twins: Revisiting the design of spatial attention in vision transformers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chu"},{"key":"ref65","first-page":"12992","article-title":"Glance-and-gaze vision transformer","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yu"},{"key":"ref66","article-title":"Distilling the knowledge in a neural network","author":"Hinton","year":"2015"},{"key":"ref67","first-page":"1195","article-title":"Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Tarvainen"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20056-4_20"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-023-01852-4"},{"key":"ref70","article-title":"Token merging: Your ViT but faster","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bolya"},{"key":"ref71","article-title":"Layer normalization","author":"Ba","year":"2016"},{"key":"ref72","article-title":"Searching for activation functions","author":"Ramachandran","year":"2017"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i1.25130"},{"key":"ref74","first-page":"1298","article-title":"data2vec: A general framework for self-supervised learning in speech, vision and language","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Baevski"},{"key":"ref75","article-title":"BEiT v2: Masked image modeling with vector-quantized visual tokenizers","author":"Peng","year":"2022"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.01838"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20077-9_17"},{"key":"ref78","article-title":"An empirical study of training self-supervised visual transformers","author":"Chen","year":"2021"},{"key":"ref79","article-title":"iBOT: Image BERT pre-training with online tokenizer","author":"Zhou","year":"2021"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01228-1_26"},{"key":"ref81","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00418"},{"key":"ref84","first-page":"91","article-title":"Faster R-CNN: Towards real-time object detection with region proposal networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ren"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/10746266\/10609333.pdf?arnumber=10609333","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:00:54Z","timestamp":1732665654000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10609333\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12]]},"references-count":84,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2024.3429508","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12]]}}}