{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,7,19]],"date-time":"2024-07-19T06:30:10Z","timestamp":1721370610929},"reference-count":29,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2024,2,22]],"date-time":"2024-02-22T00:00:00Z","timestamp":1708560000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,2,22]],"date-time":"2024-02-22T00:00:00Z","timestamp":1708560000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"The Guangdong Provincial Key Laboratory of Human Digital Twin","award":["No. 2022B1212010004"],"award-info":[{"award-number":["No. 2022B1212010004"]}]},{"name":"The Open-Fund of WNLO","award":["Grant No. 2018WNLOKF027"],"award-info":[{"award-number":["Grant No. 2018WNLOKF027"]}]},{"name":"The Graduate Innovative Fund of Wuhan Institute of Technology","award":["No. CX2022349"],"award-info":[{"award-number":["No. CX2022349"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Machine Vision and Applications"],"published-print":{"date-parts":[[2024,3]]},"DOI":"10.1007\/s00138-024-01513-7","type":"journal-article","created":{"date-parts":[[2024,2,22]],"date-time":"2024-02-22T19:02:56Z","timestamp":1708628576000},"update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["A pixel and channel enhanced up-sampling module for biomedical image segmentation"],"prefix":"10.1007","volume":"35","author":[{"given":"Xuan","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Guoping","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Xinglong","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Wentao","family":"Liao","sequence":"additional","affiliation":[]},{"given":"Xuesong","family":"Leng","sequence":"additional","affiliation":[]},{"given":"Xiaxia","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xinwei","family":"He","sequence":"additional","affiliation":[]},{"given":"Chang","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,2,22]]},"reference":[{"key":"1513_CR1","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L.u., Polosukhin, I.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"1513_CR2","doi-asserted-by":"crossref","unstructured":"Long, J., Shelhamer, E., Darrell, T.: Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3431\u20133440 (2015)","DOI":"10.1109\/CVPR.2015.7298965"},{"issue":"4","key":"1513_CR3","doi-asserted-by":"publisher","first-page":"834","DOI":"10.1109\/TPAMI.2017.2699184","volume":"40","author":"L-C Chen","year":"2018","unstructured":"Chen, L.-C., Papandreou, G., Kokkinos, I., Murphy, K., Yuille, A.L.: Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs. IEEE Trans. Pattern Anal. Mach. Intell. 40(4), 834\u2013848 (2018). https:\/\/doi.org\/10.1109\/TPAMI.2017.2699184","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1513_CR4","doi-asserted-by":"publisher","unstructured":"Chen, L.-C., Papandreou, G., Schroff, F., Adam, H.: Rethinking Atrous Convolution for Semantic Image Segmentation (2017). https:\/\/doi.org\/10.48550\/arXiv.1706.05587","DOI":"10.48550\/arXiv.1706.05587"},{"key":"1513_CR5","doi-asserted-by":"publisher","first-page":"66467","DOI":"10.1109\/ACCESS.2023.3290496","volume":"11","author":"U Jarujareet","year":"2023","unstructured":"Jarujareet, U., Wiratchawa, K., Panpisut, P., Intharah, T.: Deepddm: A compact deep-learning assisted platform for micro-rheological assessment of micro-volume fluids. IEEE Access 11, 66467\u201366477 (2023). https:\/\/doi.org\/10.1109\/ACCESS.2023.3290496","journal-title":"IEEE Access"},{"key":"1513_CR6","doi-asserted-by":"crossref","unstructured":"Zheng, S., Lu, J., Zhao, H., Zhu, X., Luo, Z., Wang, Y., Fu, Y., Feng, J., Xiang, T., Torr, P.H.S., Zhang, L.: Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 6881\u20136890 (2021)","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"1513_CR7","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"1513_CR8","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIM.2022.3178991","volume":"71","author":"A Lin","year":"2022","unstructured":"Lin, A., Chen, B., Xu, J., Zhang, Z., Lu, G., Zhang, D.: Ds-transunet: Dual swin transformer u-net for medical image segmentation. IEEE Trans. Instrum. Meas. 71, 1\u201315 (2022). https:\/\/doi.org\/10.1109\/TIM.2022.3178991","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"1513_CR9","doi-asserted-by":"publisher","unstructured":"Poudel, R.P.K., Liwicki, S., Cipolla, R.: Fast-SCNN: Fast Semantic Segmentation Network (2019). https:\/\/doi.org\/10.48550\/arXiv.1902.04502","DOI":"10.48550\/arXiv.1902.04502"},{"key":"1513_CR10","doi-asserted-by":"publisher","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: Convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W.M., Frangi, A.F. (eds.) Medical Image Computing and Computer-Assisted Intervention \u2013 MICCAI 2015, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"1513_CR11","doi-asserted-by":"publisher","unstructured":"Paszke, A., Chaurasia, A., Kim, S., Culurciello, E.: ENet: A Deep Neural Network Architecture for Real-Time Semantic Segmentation (2016). https:\/\/doi.org\/10.48550\/arXiv.1606.02147","DOI":"10.48550\/arXiv.1606.02147"},{"issue":"2","key":"1513_CR12","doi-asserted-by":"publisher","first-page":"1183","DOI":"10.1109\/TII.2018.2849348","volume":"15","author":"X Zhang","year":"2019","unstructured":"Zhang, X., Chen, Z., Wu, Q.M.J., Cai, L., Lu, D., Li, X.: Fast semantic segmentation for scene perception. IEEE Trans. Industr. Inf. 15(2), 1183\u20131192 (2019). https:\/\/doi.org\/10.1109\/TII.2018.2849348","journal-title":"IEEE Trans. Industr. Inf."},{"key":"1513_CR13","doi-asserted-by":"crossref","unstructured":"Noh, H., Hong, S., Han, B.: Learning deconvolution network for semantic segmentation. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV), pp. 1520\u20131528 (2015)","DOI":"10.1109\/ICCV.2015.178"},{"issue":"12","key":"1513_CR14","doi-asserted-by":"publisher","first-page":"2481","DOI":"10.1109\/TPAMI.2016.2644615","volume":"39","author":"V Badrinarayanan","year":"2017","unstructured":"Badrinarayanan, V., Kendall, A., Cipolla, R.: Segnet: A deep convolutional encoder-decoder architecture for image segmentation. IEEE Trans. Pattern Anal. Mach. Intell. 39(12), 2481\u20132495 (2017). https:\/\/doi.org\/10.1109\/TPAMI.2016.2644615","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1513_CR15","doi-asserted-by":"publisher","unstructured":"Cao, H., Wang, Y., Chen, J., Jiang, D., Zhang, X., Tian, Q., Wang, M.: Swin-unet: Unet-like pure transformer for medical image segmentation. In: Karlinsky, L., Michaeli, T., Nishino, K. (eds.) Computer Vision \u2013 ECCV 2022 Workshops, pp. 205\u2013218. Springer, Cham (2023). https:\/\/doi.org\/10.1007\/978-3-031-25066-8_9","DOI":"10.1007\/978-3-031-25066-8_9"},{"issue":"9","key":"1513_CR16","doi-asserted-by":"publisher","first-page":"791","DOI":"10.1016\/j.imavis.2005.05.005","volume":"23","author":"M-J Chen","year":"2005","unstructured":"Chen, M.-J., Huang, C.-H., Lee, W.-L.: A fast edge-oriented algorithm for image interpolation. Image Vis. Comput. 23(9), 791\u2013798 (2005). https:\/\/doi.org\/10.1016\/j.imavis.2005.05.005","journal-title":"Image Vis. Comput."},{"key":"1513_CR17","doi-asserted-by":"publisher","unstructured":"Asuni, N., Giachetti, A.: Accuracy improvements and artifacts removal in edge based image interpolation. In: Proceedings of the Third International Conference on Computer Vision Theory and Applications - Volume 1: VISAPP, (VISIGRAPP 2008), vol. 2, pp. 58\u201365. SciTePress, Funchal (2008). https:\/\/doi.org\/10.5220\/0001074100580065","DOI":"10.5220\/0001074100580065"},{"key":"1513_CR18","doi-asserted-by":"publisher","unstructured":"Seo, H., Huang, C., Bassenne, M., Xiao, R., Xing, L.: Modified u-net (mu-net) with incorporation of object-dependent high level features for improved liver and liver-tumor segmentation in ct images. IEEE Trans. Med. Imaging 39(5), 1316\u20131325 (2020). https:\/\/doi.org\/10.1109\/TMI.2019.2948320","DOI":"10.1109\/TMI.2019.2948320"},{"key":"1513_CR19","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7132\u20137141 (2018)","DOI":"10.1109\/CVPR.2018.00745"},{"key":"1513_CR20","doi-asserted-by":"crossref","unstructured":"Wang, Q., Wu, B., Zhu, P., Li, P., Zuo, W., Hu, Q.: Eca-net: Efficient channel attention for deep convolutional neural networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11534\u201311542 (2020)","DOI":"10.1109\/CVPR42600.2020.01155"},{"key":"1513_CR21","unstructured":"Nair, V., Hinton, G.E.: Rectified linear units improve restricted boltzmann machines. In: Proceedings of the 27th International Conference on Machine Learning (ICML-10), pp. 807\u2013814 (2010)"},{"issue":"6009","key":"1513_CR22","doi-asserted-by":"publisher","first-page":"1404","DOI":"10.1126\/science.1191776","volume":"330","author":"A Li","year":"2010","unstructured":"Li, A., Gong, H., Zhang, B., Wang, Q., Yan, C., Wu, J., Liu, Q., Zeng, S., Luo, Q.: Micro-optical sectioning tomography to obtain a high-resolution atlas of the mouse brain. Science 330(6009), 1404\u20131408 (2010). https:\/\/doi.org\/10.1126\/science.1191776","journal-title":"Science"},{"key":"1513_CR23","doi-asserted-by":"crossref","unstructured":"Chen, L.-C., Zhu, Y., Papandreou, G., Schroff, F., Adam, H.: Encoder-decoder with atrous separable convolution for semantic image segmentation. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 801\u2013818 (2018)","DOI":"10.1007\/978-3-030-01234-2_49"},{"key":"1513_CR24","doi-asserted-by":"crossref","unstructured":"Zhao, H., Shi, J., Qi, X., Wang, X., Jia, J.: Pyramid scene parsing network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2881\u20132890 (2017)","DOI":"10.1109\/CVPR.2017.660"},{"key":"1513_CR25","doi-asserted-by":"publisher","unstructured":"Chaurasia, A., Culurciello, E.: Linknet: Exploiting encoder representations for efficient semantic segmentation. In: 2017 IEEE Visual Communications and Image Processing (VCIP), pp. 1\u20134 (2017). https:\/\/doi.org\/10.1109\/VCIP.2017.8305148","DOI":"10.1109\/VCIP.2017.8305148"},{"key":"1513_CR26","doi-asserted-by":"publisher","unstructured":"Chen, J., Lu, Y., Yu, Q., Luo, X., Adeli, E., Wang, Y., Lu, L., Yuille, A.L., Zhou, Y.: TransUNet: Transformers Make Strong Encoders for Medical Image Segmentation (2021). https:\/\/doi.org\/10.48550\/arXiv.2102.04306","DOI":"10.48550\/arXiv.2102.04306"},{"key":"1513_CR27","doi-asserted-by":"publisher","unstructured":"Xu, G., Wu, X., Zhang, X., He, X.: LeViT-UNet: Make Faster Encoders with Transformer for Medical Image Segmentation (2021). https:\/\/doi.org\/10.48550\/arXiv.2107.08623","DOI":"10.48550\/arXiv.2107.08623"},{"key":"1513_CR28","doi-asserted-by":"publisher","unstructured":"Wang, Y., Zhou, Q., Xiong, J., Wu, X., Jin, X.: Esnet: An efficient symmetric network for real-time semantic segmentation. In: Lin, Z., Wang, L., Yang, J., Shi, G., Tan, T., Zheng, N., Chen, X., Zhang, Y. (eds.) Pattern Recognition and Computer Vision, pp. 41\u201352. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-31723-2_4","DOI":"10.1007\/978-3-030-31723-2_4"},{"key":"1513_CR29","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"}],"container-title":["Machine Vision and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-024-01513-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00138-024-01513-7\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-024-01513-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,23]],"date-time":"2024-03-23T06:38:12Z","timestamp":1711175892000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00138-024-01513-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,2,22]]},"references-count":29,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2024,3]]}},"alternative-id":["1513"],"URL":"https:\/\/doi.org\/10.1007\/s00138-024-01513-7","relation":{},"ISSN":["0932-8092","1432-1769"],"issn-type":[{"value":"0932-8092","type":"print"},{"value":"1432-1769","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,2,22]]},"assertion":[{"value":"28 August 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 December 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 January 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 February 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"We confirm that there are no known conflicts of interest associated with this publication and there has been no significant financial support for this work that could have influenced its outcome.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval"}},{"value":"Not applicable.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to participate"}},{"value":"Not applicable.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}}],"article-number":"30"}}