{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,16]],"date-time":"2026-04-16T08:37:12Z","timestamp":1776328632501,"version":"3.50.1"},"reference-count":32,"publisher":"MDPI AG","issue":"21","license":[{"start":{"date-parts":[[2023,10,26]],"date-time":"2023-10-26T00:00:00Z","timestamp":1698278400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62373001"],"award-info":[{"award-number":["62373001"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62303014"],"award-info":[{"award-number":["62303014"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["GXXT-2021-030"],"award-info":[{"award-number":["GXXT-2021-030"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["2020YFA0908700"],"award-info":[{"award-number":["2020YFA0908700"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["2308085QF225"],"award-info":[{"award-number":["2308085QF225"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["2023AH050089"],"award-info":[{"award-number":["2023AH050089"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["2023AH050061"],"award-info":[{"award-number":["2023AH050061"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"University Synergy Innovation Program of Anhui Province","award":["62373001"],"award-info":[{"award-number":["62373001"]}]},{"name":"University Synergy Innovation Program of Anhui Province","award":["62303014"],"award-info":[{"award-number":["62303014"]}]},{"name":"University Synergy Innovation Program of Anhui Province","award":["GXXT-2021-030"],"award-info":[{"award-number":["GXXT-2021-030"]}]},{"name":"University Synergy Innovation Program of Anhui Province","award":["2020YFA0908700"],"award-info":[{"award-number":["2020YFA0908700"]}]},{"name":"University Synergy Innovation Program of Anhui Province","award":["2308085QF225"],"award-info":[{"award-number":["2308085QF225"]}]},{"name":"University Synergy Innovation Program of Anhui Province","award":["2023AH050089"],"award-info":[{"award-number":["2023AH050089"]}]},{"name":"University Synergy Innovation Program of Anhui Province","award":["2023AH050061"],"award-info":[{"award-number":["2023AH050061"]}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["62373001"],"award-info":[{"award-number":["62373001"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["62303014"],"award-info":[{"award-number":["62303014"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["GXXT-2021-030"],"award-info":[{"award-number":["GXXT-2021-030"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2020YFA0908700"],"award-info":[{"award-number":["2020YFA0908700"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2308085QF225"],"award-info":[{"award-number":["2308085QF225"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2023AH050089"],"award-info":[{"award-number":["2023AH050089"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2023AH050061"],"award-info":[{"award-number":["2023AH050061"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003995","name":"Anhui Provincial Natural Science Foundation","doi-asserted-by":"publisher","award":["62373001"],"award-info":[{"award-number":["62373001"]}],"id":[{"id":"10.13039\/501100003995","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003995","name":"Anhui Provincial Natural Science Foundation","doi-asserted-by":"publisher","award":["62303014"],"award-info":[{"award-number":["62303014"]}],"id":[{"id":"10.13039\/501100003995","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003995","name":"Anhui Provincial Natural Science Foundation","doi-asserted-by":"publisher","award":["GXXT-2021-030"],"award-info":[{"award-number":["GXXT-2021-030"]}],"id":[{"id":"10.13039\/501100003995","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003995","name":"Anhui Provincial Natural Science Foundation","doi-asserted-by":"publisher","award":["2020YFA0908700"],"award-info":[{"award-number":["2020YFA0908700"]}],"id":[{"id":"10.13039\/501100003995","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003995","name":"Anhui Provincial Natural Science Foundation","doi-asserted-by":"publisher","award":["2308085QF225"],"award-info":[{"award-number":["2308085QF225"]}],"id":[{"id":"10.13039\/501100003995","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003995","name":"Anhui Provincial Natural Science Foundation","doi-asserted-by":"publisher","award":["2023AH050089"],"award-info":[{"award-number":["2023AH050089"]}],"id":[{"id":"10.13039\/501100003995","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003995","name":"Anhui Provincial Natural Science Foundation","doi-asserted-by":"publisher","award":["2023AH050061"],"award-info":[{"award-number":["2023AH050061"]}],"id":[{"id":"10.13039\/501100003995","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Education Department of Anhui Province","award":["62373001"],"award-info":[{"award-number":["62373001"]}]},{"name":"Education Department of Anhui Province","award":["62303014"],"award-info":[{"award-number":["62303014"]}]},{"name":"Education Department of Anhui Province","award":["GXXT-2021-030"],"award-info":[{"award-number":["GXXT-2021-030"]}]},{"name":"Education Department of Anhui Province","award":["2020YFA0908700"],"award-info":[{"award-number":["2020YFA0908700"]}]},{"name":"Education Department of Anhui Province","award":["2308085QF225"],"award-info":[{"award-number":["2308085QF225"]}]},{"name":"Education Department of Anhui Province","award":["2023AH050089"],"award-info":[{"award-number":["2023AH050089"]}]},{"name":"Education Department of Anhui Province","award":["2023AH050061"],"award-info":[{"award-number":["2023AH050061"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["Sensors"],"abstract":"<jats:p>Medical image segmentation plays a crucial role in clinical diagnosis, treatment planning, and disease monitoring. The automatic segmentation method based on deep learning has developed rapidly, with segmentation results comparable to clinical experts for large objects, but the segmentation accuracy for small objects is still unsatisfactory. Current segmentation methods based on deep learning find it difficult to extract multiple scale features of medical images, leading to an insufficient detection capability for smaller objects. In this paper, we propose a context feature fusion and attention mechanism based network for small target segmentation in medical images called CFANet. CFANet is based on U-Net structure, including the encoder and the decoder, and incorporates two key modules, context feature fusion (CFF) and effective channel spatial attention (ECSA), in order to improve segmentation performance. The CFF module utilizes contextual information from different scales to enhance the representation of small targets. By fusing multi-scale features, the network captures local and global contextual cues, which are critical for accurate segmentation. The ECSA module further enhances the network\u2019s ability to capture long-range dependencies by incorporating attention mechanisms at the spatial and channel levels, which allows the network to focus on information-rich regions while suppressing irrelevant or noisy features. Extensive experiments are conducted on four challenging medical image datasets, namely ADAM, LUNA16, Thoracic OAR, and WORD. Experimental results show that CFANet outperforms state-of-the-art methods in terms of segmentation accuracy and robustness. The proposed method achieves excellent performance in segmenting small targets in medical images, demonstrating its potential in various clinical applications.<\/jats:p>","DOI":"10.3390\/s23218739","type":"journal-article","created":{"date-parts":[[2023,10,26]],"date-time":"2023-10-26T07:22:15Z","timestamp":1698304935000},"page":"8739","update-policy":"https:\/\/doi.org\/10.3390\/mdpi_crossmark_policy","source":"Crossref","is-referenced-by-count":15,"title":["CFANet: Context Feature Fusion and Attention Mechanism Based Network for Small Target Segmentation in Medical Images"],"prefix":"10.3390","volume":"23","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4223-3422","authenticated-orcid":false,"given":"Ruifen","family":"Cao","sequence":"first","affiliation":[{"name":"Information Materials and Intelligent Sensing Laboratory of Anhui Province, School of Computer Science and Technology, Anhui University, Hefei 230601, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-5963-7387","authenticated-orcid":false,"given":"Long","family":"Ning","sequence":"additional","affiliation":[{"name":"Information Materials and Intelligent Sensing Laboratory of Anhui Province, School of Computer Science and Technology, Anhui University, Hefei 230601, China"}]},{"given":"Chao","family":"Zhou","sequence":"additional","affiliation":[{"name":"Institute of Energy, Hefei Comprehensive National Science Center, Hefei 230031, China"}]},{"given":"Pijing","family":"Wei","sequence":"additional","affiliation":[{"name":"Institutes of Physical Science and Information Technology, Anhui University, Hefei 230601, China"}]},{"given":"Yun","family":"Ding","sequence":"additional","affiliation":[{"name":"Key Laboratory of Intelligent Computing and Signal Processing of Ministry of Education, School of Artificial Intelligence, Anhui University, Hefei 230601, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3960-8852","authenticated-orcid":false,"given":"Dayu","family":"Tan","sequence":"additional","affiliation":[{"name":"Institutes of Physical Science and Information Technology, Anhui University, Hefei 230601, China"}]},{"given":"Chunhou","family":"Zheng","sequence":"additional","affiliation":[{"name":"Key Laboratory of Intelligent Computing and Signal Processing of Ministry of Education, School of Artificial Intelligence, Anhui University, Hefei 230601, China"}]}],"member":"1968","published-online":{"date-parts":[[2023,10,26]]},"reference":[{"key":"ref_1","doi-asserted-by":"crossref","first-page":"101666","DOI":"10.1016\/j.media.2020.101666","article-title":"Multi-Task Learning for the Segmentation of Organs at Risk with Label Dependence","volume":"61","author":"He","year":"2020","journal-title":"Med. Image Anal."},{"key":"ref_2","first-page":"640","article-title":"Fully Convolutional Networks for Semantic Segmentation","volume":"39","author":"Long","year":"2015","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"ref_3","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P., and Brox, T. (2015). U-Net: Convolutional Networks for Biomedical Image Segmentation, Springer International Publishing.","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref_4","doi-asserted-by":"crossref","unstructured":"Milletari, F., Navab, N., and Ahmadi, S.A. (2016). V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation, IEEE.","DOI":"10.1109\/3DV.2016.79"},{"key":"ref_5","doi-asserted-by":"crossref","unstructured":"\u00c7i\u00e7ek, \u00d6., Abdulkadir, A., Lienkamp, S.S., Brox, T., and Ronneberger, O. (2016). 3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation, Springer.","DOI":"10.1007\/978-3-319-46723-8_49"},{"key":"ref_6","doi-asserted-by":"crossref","first-page":"2281","DOI":"10.1109\/TMI.2019.2903562","article-title":"CE-Net: Context Encoder Network for 2D Medical Image Segmentation","volume":"38","author":"Gu","year":"2019","journal-title":"IEEE Trans. Med. Imaging"},{"key":"ref_7","doi-asserted-by":"crossref","unstructured":"Gu, R., Wang, G., Song, T., Huang, R., Aertsen, M., Deprest, J., Ourselin, S., Vercauteren, T., and Zhang, S. (2021). CA-Net: Comprehensive Attention Convolutional Neural Networks for Explainable Medical Image Segmentation, IEEE.","DOI":"10.1109\/TMI.2020.3035253"},{"key":"ref_8","doi-asserted-by":"crossref","first-page":"203","DOI":"10.1038\/s41592-020-01008-z","article-title":"nnU-Net: A self-configuring method for deep learning-based biomedical image segmentation","volume":"18","author":"Isensee","year":"2021","journal-title":"Nat. Methods"},{"key":"ref_9","doi-asserted-by":"crossref","unstructured":"Wang, F., Jiang, M., Qian, C., Yang, S., and Tang, X. (2017, January 21\u201326). Residual Attention Network for Image Classification. Proceedings of the 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Honolulu, HI, USA.","DOI":"10.1109\/CVPR.2017.683"},{"key":"ref_10","doi-asserted-by":"crossref","first-page":"816672","DOI":"10.3389\/fonc.2021.816672","article-title":"AttR2U-Net: A Fully Automated Model for MRI Nasopharyngeal Carcinoma Segmentation Based on Spatial Attention and Residual Recurrent Convolution","volume":"11","author":"Zhang","year":"2021","journal-title":"Front. Oncol."},{"key":"ref_11","doi-asserted-by":"crossref","unstructured":"Zhao, H., Shi, J., Qi, X., Wang, X., and Jia, J. (2017). Pyramid Scene Parsing Network. IEEE Comput. Soc., 2881\u20132890.","DOI":"10.1109\/CVPR.2017.660"},{"key":"ref_12","unstructured":"Oktay, O., Schlemper, J., Folgoc, L.L., Lee, M., Heinrich, M., Misawa, K., Mori, K., Mcdonagh, S., Hammerla, N.Y., and Kainz, B. (2018). Attention U-Net: Learning Where to Look for the Pancreas. arXiv."},{"key":"ref_13","doi-asserted-by":"crossref","unstructured":"Chollet, F. (2017, January 21\u201326). Xception: Deep Learning with Depthwise Separable Convolutions. Proceedings of the 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Honolulu, HI, USA.","DOI":"10.1109\/CVPR.2017.195"},{"key":"ref_14","doi-asserted-by":"crossref","first-page":"1740","DOI":"10.1016\/j.wear.2018.12.089","article-title":"Restoration of defocused ferrograph images using a large kernel convolutional neural network","volume":"s426\u2013s427","author":"Wu","year":"2019","journal-title":"Wear"},{"key":"ref_15","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., and Polosukhin, I. (2017). Attention Is All You Need. arXiv."},{"key":"ref_16","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., and Houlsby, N. (2020). An Image is Worth 16 \u00d7 16 Words: Transformers for Image Recognition at Scale. arXiv."},{"key":"ref_17","unstructured":"Chen, J., Lu, Y., Yu, Q., Luo, X., and Zhou, Y. (2021). TransUNet: Transformers Make Strong Encoders for Medical Image Segmentation. arXiv."},{"key":"ref_18","unstructured":"Wang, H., Cao, P., Wang, J., and Zaiane, O.R. (2021, January 2\u20139). UCTransNet: Rethinking the Skip Connections in U-Net from a Channel-wise Perspective with Transformer. Proceedings of the AAAI Conference on Artificial Intelligence, Virtually."},{"key":"ref_19","unstructured":"Cao, H., Wang, Y., Chen, J., Jiang, D., Zhang, X., Tian, Q., and Wang, M. (2021). Swin-Unet: Unet-like Pure Transformer for Medical Image Segmentation, Springer Nature."},{"key":"ref_20","unstructured":"Zhou, H.Y., Guo, J., Zhang, Y., Yu, L., Wang, L., and Yu, Y. (2021). nnFormer: Interleaved Transformer for Volumetric Segmentation. arXiv."},{"key":"ref_21","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., and Sun, J. (2016). Deep Residual Learning for Image Recognition, IEEE.","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref_22","doi-asserted-by":"crossref","unstructured":"Zhang, H., Zu, K., Lu, J., Zou, Y., and Meng, D. (2022, January 4\u20138). EPSANet: An Efficient Pyramid Squeeze Attention Block on Convolutional Neural Network. Proceedings of the Asian Conference on Computer Vision, Macao, China.","DOI":"10.1007\/978-3-031-26313-2_33"},{"key":"ref_23","doi-asserted-by":"crossref","first-page":"102642","DOI":"10.1016\/j.media.2022.102642","article-title":"WORD: A large scale dataset, benchmark and clinical applicable study for abdominal organ segmentation from CT image","volume":"82","author":"Luo","year":"2022","journal-title":"Med. Image Anal."},{"key":"ref_24","doi-asserted-by":"crossref","unstructured":"Zhou, Z., Siddiquee, M.M.R., Tajbakhsh, N., and Liang, J. (2018). UNet++: A Nested U-Net Architecture for Medical Image Segmentation, Springer International Publishing.","DOI":"10.1007\/978-3-030-00889-5_1"},{"key":"ref_25","doi-asserted-by":"crossref","first-page":"94","DOI":"10.1016\/j.isprsjprs.2020.01.013","article-title":"ResUNet-a: A deep learning framework for semantic segmentation of remotely sensed data - ScienceDirect","volume":"162","author":"Diakogiannis","year":"2020","journal-title":"ISPRS J. Photogramm. Remote Sens."},{"key":"ref_26","doi-asserted-by":"crossref","first-page":"118216","DOI":"10.1016\/j.neuroimage.2021.118216","article-title":"Comparing methods of detecting and segmenting unruptured intracranial aneurysms on TOF-MRAS: The ADAM Challenge","volume":"238","author":"Timmins","year":"2021","journal-title":"NeuroImage"},{"key":"ref_27","first-page":"141","article-title":"An overview of intracranial aneurysms","volume":"9","author":"Keedy","year":"2006","journal-title":"Mcgill J. Med. Mjm Int. Forum Adv. Med Sci. Stud."},{"key":"ref_28","doi-asserted-by":"crossref","first-page":"178","DOI":"10.1016\/j.patrec.2021.01.036","article-title":"SmaAt-UNet: Precipitation nowcasting using a small attention-UNet architecture","volume":"145","author":"Trebing","year":"2021","journal-title":"Pattern Recognit. Lett."},{"key":"ref_29","doi-asserted-by":"crossref","unstructured":"(2011). The National Lung Screening Trial Research Team Reduced lung cancer mortality with low-dose computed tomographic screening. N. Engl. J. Med., 365, 395\u2013409.","DOI":"10.1056\/NEJMoa1102873"},{"key":"ref_30","first-page":"330","article-title":"Screening for lung cancer: U.S. Preventive Services Task Force recommendation statement","volume":"160","author":"Moyer","year":"2014","journal-title":"Ann. Intern. Med."},{"key":"ref_31","doi-asserted-by":"crossref","first-page":"357","DOI":"10.1016\/j.neucom.2020.08.086","article-title":"Cascaded SE-ResUnet for Segmentation of Thoracic Organs at Risk","volume":"453","author":"Cao","year":"2020","journal-title":"Neurocomputing"},{"key":"ref_32","doi-asserted-by":"crossref","first-page":"480","DOI":"10.1038\/s42256-019-0099-z","article-title":"Clinically applicable deep learning framework for organs at risk delineation in CT images","volume":"1","author":"Tang","year":"2019","journal-title":"Nat. Mach. Intell."}],"container-title":["Sensors"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.mdpi.com\/1424-8220\/23\/21\/8739\/pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T21:12:10Z","timestamp":1760130730000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.mdpi.com\/1424-8220\/23\/21\/8739"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,10,26]]},"references-count":32,"journal-issue":{"issue":"21","published-online":{"date-parts":[[2023,11]]}},"alternative-id":["s23218739"],"URL":"https:\/\/doi.org\/10.3390\/s23218739","relation":{},"ISSN":["1424-8220"],"issn-type":[{"value":"1424-8220","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,10,26]]}}}