{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T09:48:50Z","timestamp":1742982530209,"version":"3.40.3"},"publisher-location":"Cham","reference-count":53,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031729393"},{"type":"electronic","value":"9783031729409"}],"license":[{"start":{"date-parts":[[2024,11,17]],"date-time":"2024-11-17T00:00:00Z","timestamp":1731801600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,17]],"date-time":"2024-11-17T00:00:00Z","timestamp":1731801600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72940-9_15","type":"book-chapter","created":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T20:42:21Z","timestamp":1731789741000},"page":"258-274","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Stitched ViTs are Flexible Vision Backbones"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1717-7844","authenticated-orcid":false,"given":"Zizheng","family":"Pan","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6745-3050","authenticated-orcid":false,"given":"Jing","family":"Liu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1505-0284","authenticated-orcid":false,"given":"Haoyu","family":"He","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9444-3763","authenticated-orcid":false,"given":"Jianfei","family":"Cai","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0074-0303","authenticated-orcid":false,"given":"Bohan","family":"Zhuang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,17]]},"reference":[{"key":"15_CR1","doi-asserted-by":"crossref","unstructured":"Bansal, Y., Nakkiran, P., Barak, B.: Revisiting model stitching to compare neural representations. In: NeurIPS, pp. 225\u2013236 (2021)","DOI":"10.1088\/1742-5468\/ac3a74"},{"key":"15_CR2","unstructured":"Bao, H., Dong, L., Piao, S., Wei, F.: Beit: BERT pre-training of image transformers. In: ICLR (2022)"},{"key":"15_CR3","unstructured":"Brown, T., et al.: Language models are few-shot learners. In: NeurIPS, vol. 33, pp. 1877\u20131901 (2020)"},{"key":"15_CR4","doi-asserted-by":"crossref","unstructured":"Caesar, H., Uijlings, J., Ferrari, V.: Coco-stuff: thing and stuff classes in context. In: CVPR, pp. 1209\u20131218 (2018)","DOI":"10.1109\/CVPR.2018.00132"},{"key":"15_CR5","doi-asserted-by":"crossref","unstructured":"Caron, M., et al.: Emerging properties in self-supervised vision transformers. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"15_CR6","doi-asserted-by":"crossref","unstructured":"Chen, G., Liu, F., Meng, Z., Liang, S.: Revisiting parameter-efficient tuning: are we really there yet? In: EMNLP (2022)","DOI":"10.18653\/v1\/2022.emnlp-main.168"},{"key":"15_CR7","unstructured":"Chen, S., et al.: Adaptformer: adapting vision transformers for scalable visual recognition. In: NeurIPS (2022)"},{"key":"15_CR8","doi-asserted-by":"crossref","unstructured":"Chen, X., Xie, S., He, K.: An empirical study of training self-supervised vision transformers. In: ICCV, pp. 9620\u20139629 (2021)","DOI":"10.1109\/ICCV48922.2021.00950"},{"key":"15_CR9","unstructured":"Chen, Z., et al.: Vision transformer adapter for dense predictions. In: ICLR (2023)"},{"key":"15_CR10","unstructured":"Csisz\u00e1rik, A., Kor\u00f6si-Szab\u00f3, P., Matszangosz, \u00c1.K., Papp, G., Varga, D.: Similarity and matching of neural network representations. In: NeurIPS, pp. 5656\u20135668 (2021)"},{"key":"15_CR11","unstructured":"Dehghani, M., et\u00a0al.: Scaling vision transformers to 22 billion parameters. arXiv (2023)"},{"key":"15_CR12","doi-asserted-by":"crossref","unstructured":"Dong, X., et al.: PeCo: perceptual codebook for BERT pre-training of vision transformers. In: AAAI (2023)","DOI":"10.1609\/aaai.v37i1.25130"},{"key":"15_CR13","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. ICLR (2021)"},{"key":"15_CR14","doi-asserted-by":"crossref","unstructured":"Fang, Y., et al.: Eva: exploring the limits of masked visual representation learning at scale. arXiv (2022)","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"15_CR15","doi-asserted-by":"crossref","unstructured":"Fang, Y., Yang, S., Wang, S., Ge, Y., Shan, Y., Wang, X.: Unleashing vanilla vision transformer with masked image modeling for object detection. arXiv (2022)","DOI":"10.1109\/ICCV51070.2023.00574"},{"key":"15_CR16","unstructured":"Fu, Z., Yang, H., So, A.M.C., Lam, W., Bing, L., Collier, N.: On the effectiveness of parameter-efficient fine-tuning. arXiv (2022)"},{"key":"15_CR17","unstructured":"Gao, P., Ma, T., Li, H., Lin, Z., Dai, J., Qiao, Y.: MCMAE: masked convolution meets masked autoencoders. In: NeurIPS (2022)"},{"key":"15_CR18","doi-asserted-by":"crossref","unstructured":"He, K., Chen, X., Xie, S., Li, Y., Doll\u00e1r, P., Girshick, R.B.: Masked autoencoders are scalable vision learners. In: CVPR, pp. 15979\u201315988 (2022)","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"15_CR19","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask R-CNN. In: ICCV, pp. 2961\u20132969 (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"15_CR20","unstructured":"Hu, E.J., et al.: LoRa: low-rank adaptation of large language models. In: ICLR. OpenReview.net (2022)"},{"key":"15_CR21","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"709","DOI":"10.1007\/978-3-031-19827-4_41","volume-title":"ECCV 2022","author":"M Jia","year":"2022","unstructured":"Jia, M., et al.: Visual prompt tuning. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13693, pp. 709\u2013727. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19827-4_41"},{"key":"15_CR22","unstructured":"Kaplan, J., et al.: Scaling laws for neural language models. arXiv (2020)"},{"key":"15_CR23","doi-asserted-by":"crossref","unstructured":"Kirillov, A., et al.: Segment anything. arXiv (2023)","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"15_CR24","doi-asserted-by":"crossref","unstructured":"Lenc, K., Vedaldi, A.: Understanding image representations by measuring their equivariance and equivalence. In: CVPR, pp. 991\u2013999 (2015)","DOI":"10.1109\/CVPR.2015.7298701"},{"key":"15_CR25","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"280","DOI":"10.1007\/978-3-031-20077-9_17","volume-title":"ECCV 2022","author":"Y Li","year":"2022","unstructured":"Li, Y., Mao, H., Girshick, R.B., He, K.: Exploring plain vision transformer backbones for object detection. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13669, pp. 280\u2013296. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20077-9_17"},{"key":"15_CR26","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"15_CR27","unstructured":"Liu, J., Cai, J., Zhuang, B.: FocusFormer: focusing on what we need via architecture sampler. arXiv (2022)"},{"key":"15_CR28","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: ICCV, pp. 9992\u201310002 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"15_CR29","unstructured":"Mahabadi, R.K., Henderson, J., Ruder, S.: Compacter: efficient low-rank hypercomplex adapter layers. In: NeurIPS, pp. 1022\u20131035 (2021)"},{"key":"15_CR30","unstructured":"Oquab, M., et al.: DINOv2: learning robust visual features without supervision. arXiv (2023)"},{"key":"15_CR31","doi-asserted-by":"crossref","unstructured":"Pan, Z., Cai, J., Zhuang, B.: Stitchable neural networks. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01545"},{"key":"15_CR32","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: ICML, pp. 8748\u20138763 (2021)"},{"issue":"8","key":"15_CR33","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., et al.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019)","journal-title":"OpenAI blog"},{"key":"15_CR34","doi-asserted-by":"crossref","unstructured":"Ranftl, R., Bochkovskiy, A., Koltun, V.: Vision transformers for dense prediction. In: ICCV, pp. 12179\u201312188 (2021)","DOI":"10.1109\/ICCV48922.2021.01196"},{"key":"15_CR35","doi-asserted-by":"crossref","unstructured":"Russakovsky, O., et\u00a0al.: ImageNet large scale visual recognition challenge. IJCV, 211\u2013252 (2015)","DOI":"10.1007\/s11263-015-0816-y"},{"key":"15_CR36","unstructured":"Schuhmann, C., et\u00a0al.: LAION-5B: an open large-scale dataset for training next generation image-text models. In: NeurIPS (2022)"},{"key":"15_CR37","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"746","DOI":"10.1007\/978-3-642-33715-4_54","volume-title":"Computer Vision \u2013 ECCV 2012","author":"N Silberman","year":"2012","unstructured":"Silberman, N., Hoiem, D., Kohli, P., Fergus, R.: Indoor segmentation and support inference from RGBD images. In: Fitzgibbon, A., Lazebnik, S., Perona, P., Sato, Y., Schmid, C. (eds.) ECCV 2012. LNCS, vol. 7576, pp. 746\u2013760. Springer, Heidelberg (2012). https:\/\/doi.org\/10.1007\/978-3-642-33715-4_54"},{"key":"15_CR38","doi-asserted-by":"crossref","unstructured":"Strudel, R., Pinel, R.G., Laptev, I., Schmid, C.: Segmenter: transformer for semantic segmentation. In: ICCV, pp. 7242\u20137252 (2021)","DOI":"10.1109\/ICCV48922.2021.00717"},{"key":"15_CR39","doi-asserted-by":"crossref","unstructured":"Sun, C., Shrivastava, A., Singh, S., Gupta, A.: Revisiting unreasonable effectiveness of data in deep learning era. In: ICCV, pp. 843\u2013852 (2017)","DOI":"10.1109\/ICCV.2017.97"},{"key":"15_CR40","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: ICML, pp. 10347\u201310357 (2021)"},{"key":"15_CR41","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"516","DOI":"10.1007\/978-3-031-20053-3_30","volume-title":"ECCV 2022","author":"H Touvron","year":"2022","unstructured":"Touvron, H., Cord, M., J\u00e9gou, H.: DeiT III: revenge of the ViT. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13684, pp. 516\u2013533. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20053-3_30"},{"key":"15_CR42","doi-asserted-by":"crossref","unstructured":"Wang, D., Li, M., Gong, C., Chandra, V.: AttentiveNAS: improving neural architecture search via attentive sampling. In: CVPR, pp. 6418\u20136427 (2021)","DOI":"10.1109\/CVPR46437.2021.00635"},{"key":"15_CR43","doi-asserted-by":"crossref","unstructured":"Xie, Z., et al.: SimMIM: a simple framework for masked image modeling. In: CVPR, pp. 9653\u20139663 (2022)","DOI":"10.1109\/CVPR52688.2022.00943"},{"key":"15_CR44","unstructured":"Yang, X., Zhou, D., Liu, S., Ye, J., Wang, X.: Deep model reassembly. In: NeurIPS (2022)"},{"key":"15_CR45","doi-asserted-by":"crossref","unstructured":"Yu, J., Huang, T.S.: Universally slimmable networks and improved training techniques. In: ICCV, pp. 1803\u20131811 (2019)","DOI":"10.1109\/ICCV.2019.00189"},{"key":"15_CR46","unstructured":"Yu, J., Wang, Z., Vasudevan, V., Yeung, L., Seyedhosseini, M., Wu, Y.: CoCa: contrastive captioners are image-text foundation models. arXiv (2022)"},{"key":"15_CR47","unstructured":"Yu, J., Yang, L., Xu, N., Yang, J., Huang, T.S.: Slimmable neural networks. In: ICLR (2019)"},{"key":"15_CR48","doi-asserted-by":"crossref","unstructured":"Zhai, X., Kolesnikov, A., Houlsby, N., Beyer, L.: Scaling vision transformers. In: CVPR, pp. 12104\u201312113 (2022)","DOI":"10.1109\/CVPR52688.2022.01179"},{"key":"15_CR49","unstructured":"Zhang, B., et al.: SegViT: semantic segmentation with plain vision transformers. In: NeurIPS (2022)"},{"key":"15_CR50","unstructured":"Zhang, S., et\u00a0al.: OPT: open pre-trained transformer language models. Arxiv (2022)"},{"key":"15_CR51","doi-asserted-by":"crossref","unstructured":"Zheng, S., et\u00a0al.: Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In: CVPR, pp. 6881\u20136890 (2021)","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"15_CR52","doi-asserted-by":"crossref","unstructured":"Zhou, B., Zhao, H., Puig, X., Fidler, S., Barriuso, A., Torralba, A.: Scene parsing through ADE20K dataset. In: CVPR, pp. 633\u2013641 (2017)","DOI":"10.1109\/CVPR.2017.544"},{"key":"15_CR53","unstructured":"Zhou, J., et al.: Image BERT pre-training with online tokenizer. In: ICLR (2022)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72940-9_15","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T21:34:02Z","timestamp":1731792842000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72940-9_15"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,17]]},"ISBN":["9783031729393","9783031729409"],"references-count":53,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72940-9_15","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,11,17]]},"assertion":[{"value":"17 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}