{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,27]],"date-time":"2026-02-27T15:24:15Z","timestamp":1772205855018,"version":"3.50.1"},"reference-count":45,"publisher":"MDPI AG","issue":"24","license":[{"start":{"date-parts":[[2022,12,10]],"date-time":"2022-12-10T00:00:00Z","timestamp":1670630400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100004663","name":"Key Projects of Global Change and Response of Ministry of Science and Technology of China","doi-asserted-by":"publisher","award":["2020YFA0608203"],"award-info":[{"award-number":["2020YFA0608203"]}],"id":[{"id":"10.13039\/501100004663","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004663","name":"Key Projects of Global Change and Response of Ministry of Science and Technology of China","doi-asserted-by":"publisher","award":["ZYGX2019J064"],"award-info":[{"award-number":["ZYGX2019J064"]}],"id":[{"id":"10.13039\/501100004663","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004663","name":"Key Projects of Global Change and Response of Ministry of Science and Technology of China","doi-asserted-by":"publisher","award":["2022ZDZX0001"],"award-info":[{"award-number":["2022ZDZX0001"]}],"id":[{"id":"10.13039\/501100004663","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004663","name":"Key Projects of Global Change and Response of Ministry of Science and Technology of China","doi-asserted-by":"publisher","award":["2021YFS0335"],"award-info":[{"award-number":["2021YFS0335"]}],"id":[{"id":"10.13039\/501100004663","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004663","name":"Key Projects of Global Change and Response of Ministry of Science and Technology of China","doi-asserted-by":"publisher","award":["FY-APP-2021.0304"],"award-info":[{"award-number":["FY-APP-2021.0304"]}],"id":[{"id":"10.13039\/501100004663","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004663","name":"Key Projects of Global Change and Response of Ministry of Science and Technology of China","doi-asserted-by":"publisher","award":["CXFZ2022J031"],"award-info":[{"award-number":["CXFZ2022J031"]}],"id":[{"id":"10.13039\/501100004663","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005408","name":"Central Universities, UESTC","doi-asserted-by":"publisher","award":["2020YFA0608203"],"award-info":[{"award-number":["2020YFA0608203"]}],"id":[{"id":"10.13039\/501100005408","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005408","name":"Central Universities, UESTC","doi-asserted-by":"publisher","award":["ZYGX2019J064"],"award-info":[{"award-number":["ZYGX2019J064"]}],"id":[{"id":"10.13039\/501100005408","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005408","name":"Central Universities, UESTC","doi-asserted-by":"publisher","award":["2022ZDZX0001"],"award-info":[{"award-number":["2022ZDZX0001"]}],"id":[{"id":"10.13039\/501100005408","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005408","name":"Central Universities, UESTC","doi-asserted-by":"publisher","award":["2021YFS0335"],"award-info":[{"award-number":["2021YFS0335"]}],"id":[{"id":"10.13039\/501100005408","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005408","name":"Central Universities, UESTC","doi-asserted-by":"publisher","award":["FY-APP-2021.0304"],"award-info":[{"award-number":["FY-APP-2021.0304"]}],"id":[{"id":"10.13039\/501100005408","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005408","name":"Central Universities, UESTC","doi-asserted-by":"publisher","award":["CXFZ2022J031"],"award-info":[{"award-number":["CXFZ2022J031"]}],"id":[{"id":"10.13039\/501100005408","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Major Science and Technology Projects of Sichuan Province","award":["2020YFA0608203"],"award-info":[{"award-number":["2020YFA0608203"]}]},{"name":"Major Science and Technology Projects of Sichuan Province","award":["ZYGX2019J064"],"award-info":[{"award-number":["ZYGX2019J064"]}]},{"name":"Major Science and Technology Projects of Sichuan Province","award":["2022ZDZX0001"],"award-info":[{"award-number":["2022ZDZX0001"]}]},{"name":"Major Science and Technology Projects of Sichuan Province","award":["2021YFS0335"],"award-info":[{"award-number":["2021YFS0335"]}]},{"name":"Major Science and Technology Projects of Sichuan Province","award":["FY-APP-2021.0304"],"award-info":[{"award-number":["FY-APP-2021.0304"]}]},{"name":"Major Science and Technology Projects of Sichuan Province","award":["CXFZ2022J031"],"award-info":[{"award-number":["CXFZ2022J031"]}]},{"name":"Science and Technology Support Project of Sichuan Province","award":["2020YFA0608203"],"award-info":[{"award-number":["2020YFA0608203"]}]},{"name":"Science and Technology Support Project of Sichuan Province","award":["ZYGX2019J064"],"award-info":[{"award-number":["ZYGX2019J064"]}]},{"name":"Science and Technology Support Project of Sichuan Province","award":["2022ZDZX0001"],"award-info":[{"award-number":["2022ZDZX0001"]}]},{"name":"Science and Technology Support Project of Sichuan Province","award":["2021YFS0335"],"award-info":[{"award-number":["2021YFS0335"]}]},{"name":"Science and Technology Support Project of Sichuan Province","award":["FY-APP-2021.0304"],"award-info":[{"award-number":["FY-APP-2021.0304"]}]},{"name":"Science and Technology Support Project of Sichuan Province","award":["CXFZ2022J031"],"award-info":[{"award-number":["CXFZ2022J031"]}]},{"name":"China Meteorological Administration Project","award":["2020YFA0608203"],"award-info":[{"award-number":["2020YFA0608203"]}]},{"name":"China Meteorological Administration Project","award":["ZYGX2019J064"],"award-info":[{"award-number":["ZYGX2019J064"]}]},{"name":"China Meteorological Administration Project","award":["2022ZDZX0001"],"award-info":[{"award-number":["2022ZDZX0001"]}]},{"name":"China Meteorological Administration Project","award":["2021YFS0335"],"award-info":[{"award-number":["2021YFS0335"]}]},{"name":"China Meteorological Administration Project","award":["FY-APP-2021.0304"],"award-info":[{"award-number":["FY-APP-2021.0304"]}]},{"name":"China Meteorological Administration Project","award":["CXFZ2022J031"],"award-info":[{"award-number":["CXFZ2022J031"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["Remote Sensing"],"abstract":"<jats:p>Multi-object semantic segmentation from remote sensing images has gained significant attention in land resource surveying, global change monitoring, and disaster detection. Compared to other application scenarios, the objects in the remote sensing field are larger and have a wider range of distribution. In addition, some similar targets, such as roads and concrete-roofed buildings, are easily misjudged. However, existing convolutional neural networks operate only in the local receptive field, and this limits their capacity to represent the potential association between different objects and surrounding features. This paper develops a Multi-task Quadruple Attention Network (MQANet) to address the above-mentioned issues and increase segmentation accuracy. The MQANet contains four attention modules: position attention module (PAM), channel attention module (CAM), label attention module (LAM), and edge attention module (EAM). The quadruple attention modules obtain global features by expanding the receptive fields of the network and introducing spatial context information in the label. Then, a multi-tasking mechanism which splits a multi-category segmentation task into several binary-classification segmentation tasks is introduced to improve the ability to identify similar objects. The proposed MQANet network was applied to the Potsdam dataset, the Vaihingen dataset and self-annotated images from Chongzhou and Wuzhen (CZ-WZ), representative cities in China. Our MQANet performs better over the baseline net by a large margin of +6.33 OA and +7.05 Mean F1-score on the Vaihingen dataset, +3.57 OA and +2.83 Mean F1-score on the Potsdam dataset, and +3.88 OA and +8.65 Mean F1-score on the self-annotated dataset (CZ-WZ dataset). In addition, each image execution time of the MQANet model is reduced 66.6 ms compared to UNet. Moreover, the effectiveness of MQANet was also proven by comparative experiments with other studies.<\/jats:p>","DOI":"10.3390\/rs14246256","type":"journal-article","created":{"date-parts":[[2022,12,12]],"date-time":"2022-12-12T04:34:20Z","timestamp":1670819660000},"page":"6256","update-policy":"https:\/\/doi.org\/10.3390\/mdpi_crossmark_policy","source":"Crossref","is-referenced-by-count":17,"title":["MQANet: Multi-Task Quadruple Attention Network of Multi-Object Semantic Segmentation from Remote Sensing Images"],"prefix":"10.3390","volume":"14","author":[{"given":"Yuxia","family":"Li","sequence":"first","affiliation":[{"name":"School of Automation Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China"}]},{"given":"Yu","family":"Si","sequence":"additional","affiliation":[{"name":"School of Automation Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China"}]},{"given":"Zhonggui","family":"Tong","sequence":"additional","affiliation":[{"name":"School of Automation Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9875-9853","authenticated-orcid":false,"given":"Lei","family":"He","sequence":"additional","affiliation":[{"name":"School of Software Engineering, Chengdu University of Information Technology, Chengdu 610225, China"},{"name":"Sichuan Province Engineering Technology Research Center of Support Software of Informatization Application, Chengdu 610225, China"}]},{"given":"Jinglin","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Automation Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China"}]},{"given":"Shiyu","family":"Luo","sequence":"additional","affiliation":[{"name":"School of Automation Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China"}]},{"given":"Yushu","family":"Gong","sequence":"additional","affiliation":[{"name":"School of Automation Engineering, University of Electronic Science and Technology of China, Chengdu 611731, China"}]}],"member":"1968","published-online":{"date-parts":[[2022,12,10]]},"reference":[{"key":"ref_1","doi-asserted-by":"crossref","first-page":"4494","DOI":"10.1109\/TNNLS.2017.2749428","article-title":"L1-Norm distance minimization-based fast robust twin support vector $ k $-plane clustering","volume":"29","author":"Ye","year":"2017","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"ref_2","doi-asserted-by":"crossref","first-page":"1905","DOI":"10.1109\/JSTARS.2019.2915588","article-title":"Adjacent superpixel-based multiscale spatial-spectral kernel for hyperspectral classification","volume":"12","author":"Sun","year":"2019","journal-title":"IEEE J. Sel. Top. Appl. Earth Obs. Remote Sens."},{"key":"ref_3","first-page":"1","article-title":"Semisupervised feature extraction of hyperspectral image using nonlinear geodesic sparse hypergraphs","volume":"60","author":"Duan","year":"2021","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_4","doi-asserted-by":"crossref","unstructured":"Long, J., Shelhamer, E., and Darrell, T. (2015, January 7\u201312). Fully convolutional networks for semantic segmentation. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Boston, MA, USA.","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"ref_5","doi-asserted-by":"crossref","unstructured":"Gualtieri, J.A., and Cromp, R.F. (1999). Support vector machines for hyperspectral remote sensing classification. 27th AIPR Workshop: Advances in Computer-Assisted Recognition, SPIE.","DOI":"10.1117\/12.339824"},{"key":"ref_6","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P., and Brox, T. (2015). U-net: Convolutional networks for biomedical image segmentation. International Conference on Medical Image Computing and Computer-Assisted Intervention, Springer.","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref_7","doi-asserted-by":"crossref","first-page":"2481","DOI":"10.1109\/TPAMI.2016.2644615","article-title":"Segnet: A deep convolutional encoder-decoder architecture for image segmentation","volume":"39","author":"Badrinarayanan","year":"2017","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"ref_8","unstructured":"Chen, L.-C., Papandreou, G., Kokkinos, I., Murphy, K., and Yuille, A.L. (2014, January 14\u201316). Semantic image segmentation with deep convolutional nets and fully connected crfs. Proceedings of the International Conference on Learning Representations, Banff, AB, Canada."},{"key":"ref_9","doi-asserted-by":"crossref","first-page":"834","DOI":"10.1109\/TPAMI.2017.2699184","article-title":"Deeplab: Semantic image segmentation with deep convolutional nets, atrous convolution, and fully connected crfs","volume":"40","author":"Chen","year":"2017","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"ref_10","unstructured":"Chen, L.C., Papandreou, G., Kokkinos, I., Murphy, K., and Yuille, A.L. (2018). Rethinking atrous convolution for semantic image segmentation liang-chieh. IEEE Trans. Pattern Anal. Mach. Intell., 5."},{"key":"ref_11","doi-asserted-by":"crossref","unstructured":"Chen, L.-C., Zhu, Y., Papandreou, G., Schroff, F., and Adam, H. (2018, January 8\u201314). Encoder-decoder with atrous separable convolution for semantic image segmentation. Proceedings of the European Conference on Computer Vision (ECCV), Munich, Germany.","DOI":"10.1007\/978-3-030-01234-2_49"},{"key":"ref_12","doi-asserted-by":"crossref","unstructured":"Chollet, F. (2017, January 21\u201326). Xception: Deep learning with depthwise separable convolutions. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Honolulu, HI, USA.","DOI":"10.1109\/CVPR.2017.195"},{"key":"ref_13","doi-asserted-by":"crossref","unstructured":"Zheng, Z., Zhong, Y., Wang, J., and Ma, A. (2020, January 13\u201319). Foreground-aware relation network for geospatial object segmentation in high spatial resolution remote sensing imagery. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, Virtual.","DOI":"10.1109\/CVPR42600.2020.00415"},{"key":"ref_14","first-page":"1","article-title":"EFCNet: Ensemble Full Convolutional Network for Semantic Segmentation of High-Resolution Remote Sensing Images","volume":"19","author":"Chen","year":"2021","journal-title":"IEEE Geosci. Remote Sens. Lett."},{"key":"ref_15","doi-asserted-by":"crossref","first-page":"8552","DOI":"10.1109\/JSTARS.2021.3102137","article-title":"DSPCANet: Dual-Channel Scale-Aware Segmentation Network With Position and Channel Attentions for High-Resolution Aerial Images","volume":"14","author":"Li","year":"2021","journal-title":"IEEE J. Sel. Top. Appl. Earth Obs. Remote Sens."},{"key":"ref_16","doi-asserted-by":"crossref","first-page":"417","DOI":"10.1007\/s12524-021-01475-7","article-title":"Agricultural field extraction with deep learning algorithm and satellite imagery","volume":"50","author":"Sharifi","year":"2022","journal-title":"J. Indian Soc. Remote Sens."},{"key":"ref_17","doi-asserted-by":"crossref","unstructured":"Wang, X., Girshick, R., Gupta, A., and He, K. (2018, January 18\u201322). Non-local neural networks. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Salt Lake City, UT, USA.","DOI":"10.1109\/CVPR.2018.00813"},{"key":"ref_18","doi-asserted-by":"crossref","unstructured":"Fu, J., Liu, J., Tian, H., Li, Y., Bao, Y., Fang, Z., and Lu, H. (2019, January 16\u201320). Dual attention network for scene segmentation. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, Long Beach, CA, USA.","DOI":"10.1109\/CVPR.2019.00326"},{"key":"ref_19","unstructured":"Chen, Y., Kalantidis, Y., Li, J., Yan, S., and Feng, J. (2018). A^ 2-nets: Double attention networks. Adv. Neural Inf. Process. Syst., 31."},{"key":"ref_20","unstructured":"Li, X., Zhong, Z., Wu, J., Yang, Y., Lin, Z., and Liu, H. (November, January 27). Expectation-maximization attention networks for semantic segmentation. Proceedings of the IEEE\/CVF International Conference on Computer Vision, Seoul, Korea."},{"key":"ref_21","doi-asserted-by":"crossref","unstructured":"Woo, S., Park, J., Lee, J.-Y., and Kweon, I.S. (2018, January 8\u201314). Cbam: Convolutional block attention module. Proceedings of the European Conference on Computer Vision (ECCV), Munich, Germany.","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"ref_22","doi-asserted-by":"crossref","first-page":"426","DOI":"10.1109\/TGRS.2020.2994150","article-title":"LANet: Local attention embedding to improve the semantic segmentation of remote sensing images","volume":"59","author":"Ding","year":"2020","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_23","first-page":"1","article-title":"Hybrid multiple attention network for semantic segmentation in aerial images","volume":"60","author":"Niu","year":"2021","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_24","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1109\/TGRS.2022.3231215","article-title":"Spectral\u2013Spatial Feature Tokenization Transformer for Hyperspectral Image Classification","volume":"60","author":"Sun","year":"2022","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_25","doi-asserted-by":"crossref","unstructured":"Zhou, L., Zhang, C., and Wu, M. (2018, January 18\u201322). D-LinkNet: LinkNet with pretrained encoder and dilated convolution for high resolution satellite imagery road extraction. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, Salt Lake City, UT, USA.","DOI":"10.1109\/CVPRW.2018.00034"},{"key":"ref_26","doi-asserted-by":"crossref","first-page":"2436","DOI":"10.1109\/TIP.2020.3046921","article-title":"Gated path selection network for semantic segmentation","volume":"30","author":"Geng","year":"2021","journal-title":"IEEE Trans. Image Process."},{"key":"ref_27","doi-asserted-by":"crossref","first-page":"1169","DOI":"10.1109\/TIP.2020.3042065","article-title":"Cgnet: A light-weight context guided network for semantic segmentation","volume":"30","author":"Wu","year":"2020","journal-title":"IEEE Trans. Image Process."},{"key":"ref_28","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141., and Polosukhin, I. (2017). Attention is all you need. Adv. Neural Inf. Process. Syst., 30."},{"key":"ref_29","unstructured":"Park, J., Woo, S., Lee, J., and Kweon, I. (2018, January 3\u20136). BAM: Bottleneck Attention Module. Proceedings of the British Machine Vision Conference, Newcastle, UK."},{"key":"ref_30","doi-asserted-by":"crossref","unstructured":"Zhao, H., Zhang, Y., Liu, S., Shi, J., Loy, C.C., Lin, D., and Jia, J. (2018, January 8\u201314). Psanet: Point-wise spatial attention network for scene parsing. Proceedings of the European Conference on Computer Vision (ECCV), Munich, Germany.","DOI":"10.1007\/978-3-030-01240-3_17"},{"key":"ref_31","doi-asserted-by":"crossref","first-page":"41","DOI":"10.1023\/A:1007379606734","article-title":"Multitask learning","volume":"28","author":"Caruana","year":"1997","journal-title":"Mach. Learn."},{"key":"ref_32","doi-asserted-by":"crossref","first-page":"149","DOI":"10.1613\/jair.731","article-title":"A model of inductive bias learning","volume":"12","author":"Baxter","year":"2000","journal-title":"J. Artif. Intell. Res."},{"key":"ref_33","doi-asserted-by":"crossref","unstructured":"Ben-David, S., and Schuller, R. (2003). Exploiting task relatedness for multiple task learning. Learning Theory and Kernel Machines, Springer.","DOI":"10.1007\/978-3-540-45167-9_41"},{"key":"ref_34","unstructured":"Maurer, A., Pontil, M., and Romera-Paredes, B. (2013, January 16\u201321). Sparse coding for multitask and transfer learning. Proceedings of the International Conference on Machine Learning PMLR, Atlanta, GA, USA."},{"key":"ref_35","unstructured":"Ando, R.K., Zhang, T., and Bartlett, P. (2005). A framework for learning predictive structures from multiple tasks and unlabeled data. J. Mach. Learn. Res., 6."},{"key":"ref_36","doi-asserted-by":"crossref","first-page":"104205","DOI":"10.1016\/j.engappai.2021.104205","article-title":"An effective combination of loss gradients for multi-task learning applied on instance segmentation and depth estimation","volume":"100","author":"Nakamura","year":"2021","journal-title":"Eng. Appl. Artif. Intell."},{"key":"ref_37","doi-asserted-by":"crossref","unstructured":"Duong, L., Cohn, T., Bird, S., and Cook, P. (2015, January 26\u201331). Low resource dependency parsing: Cross-lingual parameter sharing in a neural network parser. Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), Beijing, China.","DOI":"10.3115\/v1\/P15-2139"},{"key":"ref_38","unstructured":"Yang, Y., and Hospedales, T. (2017, January 24\u201326). Deep Multi-task Representation Learning: A Tensor Factorisation Approach. Proceedings of the 5th International Conference on Learning Representations, Toulon, France."},{"key":"ref_39","unstructured":"Kendall, A., Gal, Y., and Cipolla, R. (2018, January 18\u201322). Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Salt Lake City, UT, USA."},{"key":"ref_40","doi-asserted-by":"crossref","unstructured":"Borse, S., Wang, Y., Zhang, Y., and Porikli, F. (2021, January 19\u201325). Inverseform: A loss function for structured boundary-aware segmentation. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, Virtual.","DOI":"10.1109\/CVPR46437.2021.00584"},{"key":"ref_41","unstructured":"ISPRS (2018, September 04). 2D Semantic Labeling Contest\u2014Potsdam. Available online: https:\/\/www.isprs.org\/education\/benchmarks\/UrbanSemLab\/2d-sem-label-potsdam.aspx."},{"key":"ref_42","unstructured":"ISPRS (2018, September 04). 2D Semantic Labeling Contest\u2014Vaihingen. Available online: https:\/\/www.isprs.org\/education\/benchmarks\/UrbanSemLab\/2d-sem-label-vaihingen.aspx."},{"key":"ref_43","doi-asserted-by":"crossref","unstructured":"Chu, X., Chen, L., and Yu, W. (2022, January 19\u201324). NAFSSR: Stereo Image Super-Resolution Using NAFNet. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, New Orleand, LA, USA.","DOI":"10.1109\/CVPRW56347.2022.00130"},{"key":"ref_44","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., and Sun, G. (2018, January 18\u201322). Squeeze-and-excitation networks. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Salt Lake City, UT, USA.","DOI":"10.1109\/CVPR.2018.00745"},{"key":"ref_45","unstructured":"Liu, Z. (2021, January 17\u201319). Semantic Segmentation of Remote sensing images via combining residuals and multi-scale modules. Proceedings of the ICMLCA 2021; 2nd International Conference on Machine Learning and Computer Application, Shenyang, China."}],"container-title":["Remote Sensing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.mdpi.com\/2072-4292\/14\/24\/6256\/pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,11]],"date-time":"2025-10-11T01:37:40Z","timestamp":1760146660000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.mdpi.com\/2072-4292\/14\/24\/6256"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,12,10]]},"references-count":45,"journal-issue":{"issue":"24","published-online":{"date-parts":[[2022,12]]}},"alternative-id":["rs14246256"],"URL":"https:\/\/doi.org\/10.3390\/rs14246256","relation":{},"ISSN":["2072-4292"],"issn-type":[{"value":"2072-4292","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,12,10]]}}}