{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T04:57:14Z","timestamp":1772859434573,"version":"3.50.1"},"reference-count":54,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2025,3,6]],"date-time":"2025-03-06T00:00:00Z","timestamp":1741219200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,3,6]],"date-time":"2025-03-06T00:00:00Z","timestamp":1741219200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2025,5]]},"DOI":"10.1007\/s11760-025-03907-0","type":"journal-article","created":{"date-parts":[[2025,3,6]],"date-time":"2025-03-06T06:03:55Z","timestamp":1741241035000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["TranMamba: a lightweight hybrid transformer-Mamba network for single image super-resolution"],"prefix":"10.1007","volume":"19","author":[{"given":"Long","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Yi","family":"Wan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,3,6]]},"reference":[{"issue":"2","key":"3907_CR1","doi-asserted-by":"publisher","first-page":"295","DOI":"10.1109\/TPAMI.2015.2439281","volume":"38","author":"C Dong","year":"2016","unstructured":"Dong, C., Loy, C.C., He, K., Tang, X.: Image super-resolution using deep convolutional networks. IEEE Trans. Pattern Anal. Mach. Intell. 38(2), 295\u2013307 (2016). https:\/\/doi.org\/10.1109\/TPAMI.2015.2439281","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"3907_CR2","doi-asserted-by":"crossref","unstructured":"Dong, C., Loy, C.C., Tang, X.: Accelerating the super-resolution convolutional neural network. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) Computer Vision - ECCV 2016, pp. 391\u2013407. Springer, Cham (2016)","DOI":"10.1007\/978-3-319-46475-6_25"},{"key":"3907_CR3","doi-asserted-by":"publisher","unstructured":"Shi, W., Caballero, J., Husz\u00e1r, F., Totz, J., Aitken, A.P., Bishop, R., Rueckert, D., Wang, Z.: Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1874\u20131883 (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.207","DOI":"10.1109\/CVPR.2016.207"},{"key":"3907_CR4","doi-asserted-by":"crossref","unstructured":"Hui, Z., Gao, X., Yang, Y., Wang, X.: Lightweight image super-resolution with information multi-distillation network. In: Proceedings of the 27th ACM International Conference on Multimedia (ACM MM), pp. 2024\u20132032 (2019)","DOI":"10.1145\/3343031.3351084"},{"key":"3907_CR5","doi-asserted-by":"crossref","unstructured":"Liu, J., Tang, J., Wu, G.: Residual feature distillation network for lightweight image super-resolution. In: Bartoli, A., Fusiello, A. (eds.) Computer Vision - ECCV 2020 Workshops, pp. 41\u201355. Springer, Cham (2020)","DOI":"10.1007\/978-3-030-67070-2_2"},{"key":"3907_CR6","doi-asserted-by":"crossref","unstructured":"Li, Z., Liu, Y., Chen, X., Cai, H., Gu, J., Qiao, Y., Dong, C.: Blueprint separable residual network for efficient image super-resolution. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pp. 833\u2013843 (2022)","DOI":"10.1109\/CVPRW56347.2022.00099"},{"key":"3907_CR7","doi-asserted-by":"publisher","unstructured":"Liang, J., Cao, J., Sun, G., Zhang, K., Van\u00a0Gool, L., Timofte, R.: Swinir: Image restoration using swin transformer. In: 2021 IEEE\/CVF International Conference on Computer Vision Workshops (ICCVW), pp. 1833\u20131844 (2021). https:\/\/doi.org\/10.1109\/ICCVW54120.2021.00210","DOI":"10.1109\/ICCVW54120.2021.00210"},{"key":"3907_CR8","doi-asserted-by":"crossref","unstructured":"Choi, H., Lee, J.-S., Yang, J.: N-gram in swin transformers for efficient lightweight image super-resolution. 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2071\u20132081 (2022)","DOI":"10.1109\/CVPR52729.2023.00206"},{"key":"3907_CR9","doi-asserted-by":"publisher","unstructured":"Zhou, Y., Li, Z., Guo, C.-L., Bai, S., Cheng, M.-M., Hou, Q.: Srformer: Permuted self-attention for single image super-resolution. In: 2023 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 12734\u201312745 (2023). https:\/\/doi.org\/10.1109\/ICCV51070.2023.01174","DOI":"10.1109\/ICCV51070.2023.01174"},{"key":"3907_CR10","doi-asserted-by":"crossref","unstructured":"Wang, Y., Liu, Y., Zhao, S., Li, J., Zhang, L.: CAMixerSR: Only Details Need More \"Attention\" (2024)","DOI":"10.1109\/CVPR52733.2024.02441"},{"key":"3907_CR11","doi-asserted-by":"publisher","unstructured":"Lu, Z., Li, J., Liu, H., Huang, C., Zhang, L., Zeng, T.: Transformer for single image super-resolution. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 456\u2013465 (2022). https:\/\/doi.org\/10.1109\/CVPRW56347.2022.00061","DOI":"10.1109\/CVPRW56347.2022.00061"},{"key":"3907_CR12","doi-asserted-by":"crossref","unstructured":"Chen, Z., Zhang, Y., Gu, J., Kong, L., Yang, X., Yu, F.: Dual aggregation transformer for image super-resolution. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01131"},{"key":"3907_CR13","unstructured":"Gu, A., Dao, T.: Mamba: Linear-time sequence modeling with selective state spaces. arXiv preprint arXiv:2312.00752 (2023)"},{"key":"3907_CR14","unstructured":"Dao, T., Gu, A.: Transformers are SSMs: Generalized models and efficient algorithms through structured state space duality. In: International Conference on Machine Learning (ICML) (2024)"},{"key":"3907_CR15","doi-asserted-by":"crossref","unstructured":"Guo, H., Li, J., Dai, T., Ouyang, Z., Ren, X., Xia, S.-T.: Mambair: A simple baseline for image restoration with state-space model. In: ECCV (2024)","DOI":"10.1007\/978-3-031-72649-1_13"},{"key":"3907_CR16","unstructured":"Lieber, O., Lenz, B.: Jamba: A hybrid transformer-mamba language model. arXiv:abs\/2403.19887 (2024)"},{"key":"3907_CR17","unstructured":"Hatamizadeh, A., Kautz, J.: Mambavision: A hybrid mamba-transformer vision backbone. arXiv preprint arXiv:2407.08083 (2024)"},{"key":"3907_CR18","doi-asserted-by":"crossref","unstructured":"Ding, X., Zhang, X., Han, J., Ding, G.: Diverse branch block: Building a convolution as an inception-like unit. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10886\u201310895 (2021)","DOI":"10.1109\/CVPR46437.2021.01074"},{"key":"3907_CR19","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"3907_CR20","doi-asserted-by":"publisher","unstructured":"Zhang, A., Ren, W., Liu, Y., Cao, X.: Lightweight image super-resolution with superpixel token interaction. In: 2023 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 12682\u201312691 (2023). https:\/\/doi.org\/10.1109\/ICCV51070.2023.01169","DOI":"10.1109\/ICCV51070.2023.01169"},{"key":"3907_CR21","unstructured":"Gu, A., Johnson, I., Goel, K., Saab, K.K., Dao, T., Rudra, A., R\u2019e, C.: Combining recurrent, convolutional, and continuous-time models with linear state-space layers. In: Neural Information Processing Systems (2021). https:\/\/api.semanticscholar.org\/CorpusID:239998472"},{"key":"3907_CR22","unstructured":"Fu, D.Y., Dao, T., Saab, K.K., Thomas, A.W., Rudra, A., R\u00e9, C.: Hungry Hungry Hippos: Towards language modeling with state space models. In: International Conference on Learning Representations (2023)"},{"key":"3907_CR23","unstructured":"Gu, A., Goel, K., R\u00e9, C.: Efficiently modeling long sequences with structured state spaces. In: The International Conference on Learning Representations (ICLR) (2022)"},{"key":"3907_CR24","unstructured":"Smith, J.T.H., Warrington, A., Linderman, S.: Simplified state space layers for sequence modeling. In: The Eleventh International Conference on Learning Representations (2023). https:\/\/openreview.net\/forum?id=Ai8Hw3AXqks"},{"key":"3907_CR25","unstructured":"Waleffe, R., Byeon, W., Riach, D., Norick, B., Korthikanti, V., Dao, T., Gu, A., Hatamizadeh, A., Singh, S., Narayanan, D., Kulshreshtha, G., Singh, V., Casper, J., Kautz, J., Shoeybi, M., Catanzaro, B.: An Empirical Study of Mamba-based Language Models (2024). https:\/\/arxiv.org\/abs\/2406.07887"},{"key":"3907_CR26","unstructured":"Fei, Z., Fan, M., Yu, C., Li, D., Zhang, Y., Huang, J.: Dimba: transformer-mamba diffusion models (2024)"},{"key":"3907_CR27","doi-asserted-by":"crossref","unstructured":"Zhang, W., Huang, J., Wang, R., Wei, C., Huang, W., Qiao, Y.: Integration of mamba and transformer - mat for long-short range time series forecasting with application to weather dynamics. arXiv:abs\/2409.08530 (2024)","DOI":"10.1109\/ICECCE63537.2024.10823516"},{"key":"3907_CR28","unstructured":"Wang, Z., Chen, Z., Wu, Y., Zhao, Z., Zhou, L., Xu, D.: PoinTramba: a hybrid transformer-mamba framework for point cloud analysis (2024). https:\/\/arxiv.org\/abs\/2405.15463"},{"key":"3907_CR29","unstructured":"Ba, J., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv:abs\/1607.06450 (2016)"},{"key":"3907_CR30","unstructured":"Nair, V., Hinton, G.E.: Rectified linear units improve restricted boltzmann machines. In: ICML 2010, pp. 807\u2013814 (2010)"},{"issue":"1","key":"3907_CR31","doi-asserted-by":"publisher","first-page":"47","DOI":"10.1109\/TCI.2016.2644865","volume":"3","author":"H Zhao","year":"2017","unstructured":"Zhao, H., Gallo, O., Frosio, I., Kautz, J.: Loss functions for image restoration with neural networks. IEEE Trans. Comput. Imaging 3(1), 47\u201357 (2017). https:\/\/doi.org\/10.1109\/TCI.2016.2644865","journal-title":"IEEE Trans. Comput. Imaging"},{"key":"3907_CR32","unstructured":"Liu, Y., Tian, Y., Zhao, Y., Yu, H., Xie, L., Wang, Y., Ye, Q., Liu, Y.: Vmamba: Visual state space model. arXiv preprint arXiv:2401.10166 (2024)"},{"key":"3907_CR33","doi-asserted-by":"publisher","unstructured":"Yu, W., Luo, M., Zhou, P., Si, C., Zhou, Y., Wang, X., Feng, J., Yan, S.: Metaformer is actually what you need for vision. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10809\u201310819 (2022). https:\/\/doi.org\/10.1109\/CVPR52688.2022.01055","DOI":"10.1109\/CVPR52688.2022.01055"},{"key":"3907_CR34","doi-asserted-by":"crossref","unstructured":"Huang, Z., Zhang, Z., Lan, C., Zha, Z.-J., Lu, Y., Guo, B.: Adaptive frequency filters as efficient global token mixers. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00556"},{"key":"3907_CR35","doi-asserted-by":"crossref","unstructured":"Ding, X., Guo, Y., Ding, G., Han, J.: Acnet: Strengthening the kernel skeletons for powerful cnn via asymmetric convolution blocks. In: The IEEE International Conference on Computer Vision (ICCV) (2019)","DOI":"10.1109\/ICCV.2019.00200"},{"key":"3907_CR36","doi-asserted-by":"crossref","unstructured":"Ding, X., Zhang, X., Ma, N., Han, J., Ding, G., Sun, J.: Repvgg: Making vgg-style convnets great again. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13733\u201313742 (2021)","DOI":"10.1109\/CVPR46437.2021.01352"},{"key":"3907_CR37","unstructured":"Tolstikhin, I.O., Houlsby, N., Kolesnikov, A., Beyer, L., Zhai, X., Unterthiner, T., Yung, J., Steiner, A., Keysers, D., Uszkoreit, J., Lucic, M., Dosovitskiy, A.: Mlp-mixer: An all-mlp architecture for vision. In: Ranzato, M., Beygelzimer, A., Dauphin, Y., Liang, P.S., Vaughan, J.W. (eds.) Advances in Neural Information Processing Systems, vol. 34, pp. 24261\u201324272 (2021)"},{"key":"3907_CR38","doi-asserted-by":"crossref","unstructured":"Shi, D.: Transnext: Robust foveal visual perception for vision transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 17773\u201317783 (2024)","DOI":"10.1109\/CVPR52733.2024.01683"},{"key":"3907_CR39","unstructured":"Ramachandran, P., Zoph, B., Le, Q.V.: Swish: a self-gated activation function. arXiv: Neural and Evolutionary Computing (2017)"},{"key":"3907_CR40","unstructured":"Howard, A.G., Zhu, M., Chen, B., Kalenichenko, D., Wang, W., Weyand, T., Andreetto, M., Adam, H.: Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861 (2017)"},{"key":"3907_CR41","unstructured":"Hendrycks, D., Gimpel, K.: Gaussian error linear units (gelus). arXiv: Learning (2016)"},{"key":"3907_CR42","unstructured":"Vaswani, A., Shazeer, N.M., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: Neural Information Processing Systems (2017). https:\/\/api.semanticscholar.org\/CorpusID:13756489"},{"key":"3907_CR43","unstructured":"Wei, G., Zhang, Z., Lan, C., Lu, Y., Chen, Z.: Activemlp: An mlp-like architecture with active token mixer. arXiv preprint arXiv:2203.06108 (2022)"},{"key":"3907_CR44","doi-asserted-by":"crossref","unstructured":"Zhang, X., Zeng, H., Guo, S., Zhang, L.: Efficient long-range attention network for image super-resolution. In: European Conference on Computer Vision (2022)","DOI":"10.1007\/978-3-031-19790-1_39"},{"key":"3907_CR45","doi-asserted-by":"crossref","unstructured":"Agustsson, E., Timofte, R.: Ntire 2017 challenge on single image super-resolution: Dataset and study. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops (2017)","DOI":"10.1109\/CVPRW.2017.150"},{"key":"3907_CR46","doi-asserted-by":"crossref","unstructured":"Lim, B., Son, S., Kim, H., Nah, S., Lee, K.M.: Enhanced deep residual networks for single image super-resolution. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops (2017)","DOI":"10.1109\/CVPRW.2017.151"},{"issue":"4","key":"3907_CR47","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004). https:\/\/doi.org\/10.1109\/TIP.2003.819861","journal-title":"IEEE Trans. Image Process."},{"key":"3907_CR48","doi-asserted-by":"crossref","unstructured":"Bevilacqua, M., Roumy, A., Guillemot, C., Morel, A.: Low-complexity single image super-resolution based on nonnegative neighbor embedding. In: British Machine Vision Conference (2012)","DOI":"10.5244\/C.26.135"},{"key":"3907_CR49","unstructured":"Zeyde, R., Elad, M., Protter, M.: On single image scale-up using sparse-representations. In: International Conference on Curves and Surfaces (2010)"},{"key":"3907_CR50","doi-asserted-by":"publisher","unstructured":"Martin, D., Fowlkes, C., Tal, D., Malik, J.: A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In: Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, vol. 2, pp. 416\u20134232 (2001). https:\/\/doi.org\/10.1109\/ICCV.2001.937655","DOI":"10.1109\/ICCV.2001.937655"},{"key":"3907_CR51","doi-asserted-by":"crossref","unstructured":"Huang, J.B., Singh, A., Ahuja, N.: Single image super-resolution from transformed self-exemplars. In: IEEE (2015)","DOI":"10.1109\/CVPR.2015.7299156"},{"key":"3907_CR52","doi-asserted-by":"crossref","first-page":"21811","DOI":"10.1007\/s11042-016-4020-z","volume":"76","author":"Y Matsui","year":"2015","unstructured":"Matsui, Y., Ito, K., Aramaki, Y., Fujimoto, A., Ogawa, T., Yamasaki, T., Aizawa, K.: Sketch-based manga retrieval using manga109 dataset. Multimed. Tools Appl. 76, 21811\u201321838 (2015)","journal-title":"Multimed. Tools Appl."},{"key":"3907_CR53","doi-asserted-by":"crossref","unstructured":"Zhou, K., Yang, Y., Cavallaro, A., Xiang, T.: Omni-scale feature learning for person re-identification. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00380"},{"key":"3907_CR54","doi-asserted-by":"crossref","unstructured":"Yuan, J., Xiao, H.C., Guan, Y., Wang, W., Meng, H., Li, R., Zhi-Yong: Gated cnn: Integrating multi-scale feature layers for object detection. Pattern Recognition: The Journal of the Pattern Recognition Society 105 (2020)","DOI":"10.1016\/j.patcog.2019.107131"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-025-03907-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-025-03907-0\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-025-03907-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,8]],"date-time":"2025-04-08T20:08:36Z","timestamp":1744142916000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-025-03907-0"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3,6]]},"references-count":54,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2025,5]]}},"alternative-id":["3907"],"URL":"https:\/\/doi.org\/10.1007\/s11760-025-03907-0","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"value":"1863-1703","type":"print"},{"value":"1863-1711","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3,6]]},"assertion":[{"value":"29 August 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 November 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"30 January 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 March 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"371"}}