{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,31]],"date-time":"2026-01-31T04:39:51Z","timestamp":1769834391332,"version":"3.49.0"},"publisher-location":"Cham","reference-count":35,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031919787","type":"print"},{"value":"9783031919794","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-91979-4_20","type":"book-chapter","created":{"date-parts":[[2025,5,31]],"date-time":"2025-05-31T19:06:45Z","timestamp":1748718405000},"page":"268-278","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Famba-V: Fast Vision Mamba with\u00a0Cross-Layer Token Fusion"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-1777-9027","authenticated-orcid":false,"given":"Hui","family":"Shen","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2790-0290","authenticated-orcid":false,"given":"Zhongwei","family":"Wan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0007-6483-9357","authenticated-orcid":false,"given":"Xin","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7002-6757","authenticated-orcid":false,"given":"Mi","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,12]]},"reference":[{"key":"20_CR1","unstructured":"Bolya, D., Fu, C.Y., Dai, X., Zhang, P., Feichtenhofer, C., Hoffman, J.: Token merging: your ViT but faster. In: The Eleventh International Conference on Learning Representations (2022)"},{"key":"20_CR2","doi-asserted-by":"crossref","unstructured":"Cao, Q., Paranjape, B., Hajishirzi, H.: PuMer: Pruning and merging tokens for efficient vision language models. arXiv preprint arXiv:2305.17530 (2023)","DOI":"10.18653\/v1\/2023.acl-long.721"},{"key":"20_CR3","unstructured":"Dao, T., Gu, A.: Transformers are SSMs: Generalized models and efficient algorithms through structured state space duality. arXiv preprint arXiv:2405.21060 (2024)"},{"key":"20_CR4","doi-asserted-by":"crossref","unstructured":"Ding, Y., et al.: Towards accurate post-training quantization for vision transformer. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 5380\u20135388 (2022)","DOI":"10.1145\/3503161.3547826"},{"key":"20_CR5","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16x16 words: transformers for image recognition at scale. In: International Conference on Learning Representations (2020)"},{"key":"20_CR6","unstructured":"Gu, A., Dao, T.: Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752 (2023)"},{"key":"20_CR7","unstructured":"Gu, A., Goel, K., R\u00e9, C.: Efficiently modeling long sequences with structured state spaces. arXiv preprint arXiv:2111.00396 (2021)"},{"key":"20_CR8","first-page":"572","volume":"34","author":"A Gu","year":"2021","unstructured":"Gu, A., et al.: Combining recurrent, convolutional, and continuous-time models with linear state space layers. Adv. Neural. Inf. Process. Syst. 34, 572\u2013585 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"20_CR9","unstructured":"Gupta, A., Gu, A., Berant, J.: Diagonal state spaces are as effective as structured state spaces. Advances in Neural Information Processing Systems (2022)"},{"key":"20_CR10","doi-asserted-by":"crossref","unstructured":"Kong, Z., et\u00a0al.: SPViT: enabling faster vision transformers via latency-aware soft token pruning. In: European Conference on Computer Vision, pp. 620\u2013640. Springer (2022)","DOI":"10.1007\/978-3-031-20083-0_37"},{"key":"20_CR11","unstructured":"Krizhevsky, A., Hinton, G., et\u00a0al.: Learning multiple layers of features from tiny images. University of Toronto (2009)"},{"key":"20_CR12","doi-asserted-by":"crossref","unstructured":"Lei, X., ZHang, W., Cao, W.: DVMSR: Distillated vision mamba for efficient super-resolution. arXiv preprint arXiv:2405.03008 (2024)","DOI":"10.1109\/CVPRW63382.2024.00653"},{"key":"20_CR13","unstructured":"Liang, Y., Ge, C., Tong, Z., Song, Y., Wang, J., Xie, P.: Not all patches are what you need: Expediting vision transformers via token reorganizations. arXiv preprint arXiv:2202.07800 (2022)"},{"key":"20_CR14","doi-asserted-by":"crossref","unstructured":"Liu, Y., Yang, H., Dong, Z., Keutzer, K., Du, L., Zhang, S.: NoisyQuant: noisy bias-enhanced post-training activation quantization for vision transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 20321\u201320330 (2023)","DOI":"10.1109\/CVPR52729.2023.01946"},{"key":"20_CR15","unstructured":"Mehta, H., Gupta, A., Cutkosky, A., Neyshabur, B.: Long range language modeling via gated state spaces. In: International Conference on Learning Representations (2023)"},{"key":"20_CR16","doi-asserted-by":"crossref","unstructured":"Meng, L., et al.: AdaViT: adaptive vision transformers for efficient image recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12309\u201312318 (2022)","DOI":"10.1109\/CVPR52688.2022.01199"},{"key":"20_CR17","doi-asserted-by":"crossref","unstructured":"Pei, X., Huang, T., Xu, C.: EfficientVMamba: Atrous selective scan for light weight visual mamba. arXiv preprint arXiv:2403.09977 (2024)","DOI":"10.1609\/aaai.v39i6.32690"},{"key":"20_CR18","unstructured":"Qin, S., et al.: MambaVC: Learned visual compression with selective state spaces. arXiv preprint arXiv:2405.15413 (2024)"},{"key":"20_CR19","first-page":"13937","volume":"34","author":"Y Rao","year":"2021","unstructured":"Rao, Y., Zhao, W., Liu, B., Lu, J., Zhou, J., Hsieh, C.J.: DynamicViT: efficient vision transformers with dynamic token sparsification. Adv. Neural. Inf. Process. Syst. 34, 13937\u201313949 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"20_CR20","unstructured":"Ren, L., Liu, Y., Lu, Y., Shen, Y., Liang, C., Chen, W.: Samba: Simple hybrid state space models for efficient unlimited context language modeling. arXiv preprint arXiv:2406.07522 (2024)"},{"key":"20_CR21","doi-asserted-by":"crossref","unstructured":"Tang, Y., et al.: Patch slimming for efficient vision transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12165\u201312174 (2022)","DOI":"10.1109\/CVPR52688.2022.01185"},{"key":"20_CR22","unstructured":"Tao, C., et al.: Scaling laws with vocabulary: Larger models deserve larger vocabularies. arXiv preprint arXiv:2407.13623 (2024)"},{"key":"20_CR23","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"20_CR24","unstructured":"Wan, Z., et\u00a0al.: Efficient large language models: A survey. arXiv preprint arXiv:2312.038631 (2023)"},{"key":"20_CR25","unstructured":"Wan, Z., et al.: D2O: Dynamic discriminative operations for efficient generative inference of large language models. arXiv preprint arXiv:2406.13035 (2024)"},{"key":"20_CR26","doi-asserted-by":"crossref","unstructured":"Wan, Z., et al.: LOOK-M: Look-once optimization in KV cache for efficient multimodal long-context inference. arXiv preprint arXiv:2406.18139 (2024)","DOI":"10.18653\/v1\/2024.findings-emnlp.235"},{"key":"20_CR27","doi-asserted-by":"crossref","unstructured":"Wang, J., et al.: Selective structured state-spaces for long-form video understanding. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6387\u20136397 (2023)","DOI":"10.1109\/CVPR52729.2023.00618"},{"key":"20_CR28","unstructured":"Wang, X., Zheng, Y., Wan, Z., Zhang, M.: SVD-LLM: Truncation-aware singular value decomposition for large language model compression. arXiv preprint arXiv:2403.07378 (2024)"},{"key":"20_CR29","doi-asserted-by":"crossref","unstructured":"Yang, H., Yin, H., Shen, M., Molchanov, P., Li, H., Kautz, J.: Global vision transformer pruning with hessian-aware saliency. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18547\u201318557 (2023)","DOI":"10.1109\/CVPR52729.2023.01779"},{"key":"20_CR30","unstructured":"Yao, J., Hong, D., Li, C., Chanussot, J.: SpectralMamba: Efficient mamba for hyperspectral image classification. arXiv preprint arXiv:2404.08489 (2024)"},{"key":"20_CR31","doi-asserted-by":"crossref","unstructured":"Yin, H., Vahdat, A., Alvarez, J.M., Mallya, A., Kautz, J., Molchanov, P.: A-ViT: adaptive tokens for efficient vision transformer. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10809\u201310818 (2022)","DOI":"10.1109\/CVPR52688.2022.01054"},{"key":"20_CR32","doi-asserted-by":"crossref","unstructured":"Yuan, Z., Xue, C., Chen, Y., Wu, Q., Sun, G.: PTQ4ViT: post-training quantization for vision transformers with twin uniform quantization. In: European Conference on Computer Vision, pp. 191\u2013207. Springer (2022)","DOI":"10.1007\/978-3-031-19775-8_12"},{"key":"20_CR33","unstructured":"Zhang, Y., Liu, Y., Miao, D., Zhang, Q., Shi, Y., Hu, L.: MG-ViT: a multi-granularity method for compact and efficient vision transformers. Adv. Neural Inf. Process. Syst. 36 (2024)"},{"key":"20_CR34","unstructured":"Zhu, L., Liao, B., Zhang, Q., Wang, X., Liu, W., Wang, X.: Vision mamba: Efficient visual representation learning with bidirectional state space model. arXiv preprint arXiv:2401.09417 (2024)"},{"key":"20_CR35","unstructured":"Zhu, M., Tang, Y., Han, K.: Vision transformer pruning. arXiv preprint arXiv:2104.08500 (2021)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-91979-4_20","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,31]],"date-time":"2025-05-31T19:07:08Z","timestamp":1748718428000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-91979-4_20"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031919787","9783031919794"],"references-count":35,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-91979-4_20","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"12 May 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}