{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T17:52:36Z","timestamp":1775065956369,"version":"3.50.1"},"reference-count":67,"publisher":"MDPI AG","issue":"8","license":[{"start":{"date-parts":[[2024,4,17]],"date-time":"2024-04-17T00:00:00Z","timestamp":1713312000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2022ZD0160400"],"award-info":[{"award-number":["2022ZD0160400"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["62206031"],"award-info":[{"award-number":["62206031"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["62301092"],"award-info":[{"award-number":["62301092"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2021M700613"],"award-info":[{"award-number":["2021M700613"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2022M720581"],"award-info":[{"award-number":["2022M720581"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2023T160762"],"award-info":[{"award-number":["2023T160762"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2023CDJXY-036"],"award-info":[{"award-number":["2023CDJXY-036"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"name":"National Natural Science Foundation of China","award":["2022ZD0160400"],"award-info":[{"award-number":["2022ZD0160400"]}]},{"name":"National Natural Science Foundation of China","award":["62206031"],"award-info":[{"award-number":["62206031"]}]},{"name":"National Natural Science Foundation of China","award":["62301092"],"award-info":[{"award-number":["62301092"]}]},{"name":"National Natural Science Foundation of China","award":["2021M700613"],"award-info":[{"award-number":["2021M700613"]}]},{"name":"National Natural Science Foundation of China","award":["2022M720581"],"award-info":[{"award-number":["2022M720581"]}]},{"name":"National Natural Science Foundation of China","award":["2023T160762"],"award-info":[{"award-number":["2023T160762"]}]},{"name":"National Natural Science Foundation of China","award":["2023CDJXY-036"],"award-info":[{"award-number":["2023CDJXY-036"]}]},{"name":"China Postdoctoral Science Foundation","award":["2022ZD0160400"],"award-info":[{"award-number":["2022ZD0160400"]}]},{"name":"China Postdoctoral Science Foundation","award":["62206031"],"award-info":[{"award-number":["62206031"]}]},{"name":"China Postdoctoral Science Foundation","award":["62301092"],"award-info":[{"award-number":["62301092"]}]},{"name":"China Postdoctoral Science Foundation","award":["2021M700613"],"award-info":[{"award-number":["2021M700613"]}]},{"name":"China Postdoctoral Science Foundation","award":["2022M720581"],"award-info":[{"award-number":["2022M720581"]}]},{"name":"China Postdoctoral Science Foundation","award":["2023T160762"],"award-info":[{"award-number":["2023T160762"]}]},{"name":"China Postdoctoral Science Foundation","award":["2023CDJXY-036"],"award-info":[{"award-number":["2023CDJXY-036"]}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["2022ZD0160400"],"award-info":[{"award-number":["2022ZD0160400"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["62206031"],"award-info":[{"award-number":["62206031"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["62301092"],"award-info":[{"award-number":["62301092"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["2021M700613"],"award-info":[{"award-number":["2021M700613"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["2022M720581"],"award-info":[{"award-number":["2022M720581"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["2023T160762"],"award-info":[{"award-number":["2023T160762"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["2023CDJXY-036"],"award-info":[{"award-number":["2023CDJXY-036"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["Remote Sensing"],"abstract":"<jats:p>Remote sensing image dehazing is a well-known remote sensing image processing task focused on restoring clean images from hazy images. The Transformer network, based on the self-attention mechanism, has demonstrated remarkable advantages in various image restoration tasks, due to its capacity to capture long-range dependencies within images. However, it is weak at modeling local context. Conversely, convolutional neural networks (CNNs) are adept at capturing local contextual information. Local contextual information could provide more details, while long-range dependencies capture global structure information. The combination of long-range dependencies and local context modeling is beneficial for remote sensing image dehazing. Therefore, in this paper, we propose a CNN-based adaptive local context enrichment module (ALCEM) to extract contextual information within local regions. Subsequently, we integrate our proposed ALCEM into the multi-head self-attention and feed-forward network of the Transformer, constructing a novel locally enhanced attention (LEA) and a local continuous-enhancement feed-forward network (LCFN). The LEA utilizes the ALCEM to inject local context information that is complementary to the long-range relationship modeled by multi-head self-attention, which is beneficial to removing haze and restoring details. The LCFN extracts multi-scale spatial information and selectively fuses them by the the ALCEM, which supplements more informative information compared with existing regular feed-forward networks with only position-specific information flow. Powered by the LEA and LCFN, a novel Transformer-based dehazing network termed LCEFormer is proposed to restore clear images from hazy remote sensing images, which combines the advantages of CNN and Transformer. Experiments conducted on three distinct datasets, namely DHID, ERICE, and RSID, demonstrate that our proposed LCEFormer achieves the state-of-the-art performance in hazy scenes. Specifically, our LCEFormer outperforms DCIL by 0.78 dB and 0.018 for PSNR and SSIM on the DHID dataset.<\/jats:p>","DOI":"10.3390\/rs16081422","type":"journal-article","created":{"date-parts":[[2024,4,17]],"date-time":"2024-04-17T10:52:37Z","timestamp":1713351157000},"page":"1422","update-policy":"https:\/\/doi.org\/10.3390\/mdpi_crossmark_policy","source":"Crossref","is-referenced-by-count":14,"title":["Remote Sensing Image Dehazing via a Local Context-Enriched Transformer"],"prefix":"10.3390","volume":"16","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9872-9286","authenticated-orcid":false,"given":"Jing","family":"Nie","sequence":"first","affiliation":[{"name":"School of Microelectronics and Communication Engineering, Chongqing University, Chongqing 400044, China"}]},{"given":"Jin","family":"Xie","sequence":"additional","affiliation":[{"name":"School of Big Data and Software Engineering, Chongqing University, Chongqing 400044, China"},{"name":"Shanghai Artificial Intelligence Laboratory, Shanghai 200232, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8022-4172","authenticated-orcid":false,"given":"Hanqing","family":"Sun","sequence":"additional","affiliation":[{"name":"Changchun Institute of Optics, Fine Mechanics and Physics, Chinese Academy of Sciences, Changchun 130033, China"}]}],"member":"1968","published-online":{"date-parts":[[2024,4,17]]},"reference":[{"key":"ref_1","doi-asserted-by":"crossref","unstructured":"Wei, J., Cao, Y., Yang, K., Chen, L., and Wu, Y. (2023). Self-Supervised Remote Sensing Image Dehazing Network Based on Zero-Shot Learning. Remote Sens., 15.","DOI":"10.3390\/rs15112732"},{"key":"ref_2","doi-asserted-by":"crossref","unstructured":"Yu, J., Liang, D., Hang, B., and Gao, H. (2022). Aerial image dehazing using reinforcement learning. Remote Sens., 14.","DOI":"10.3390\/rs14235998"},{"key":"ref_3","doi-asserted-by":"crossref","unstructured":"Jia, J., Pan, M., Li, Y., Yin, Y., Chen, S., Qu, H., Chen, X., and Jiang, B. (2023). GLTF-Net: Deep-Learning Network for Thick Cloud Removal of Remote Sensing Images via Global\u2013Local Temporality and Features. Remote Sens., 15.","DOI":"10.3390\/rs15215145"},{"key":"ref_4","doi-asserted-by":"crossref","first-page":"10412","DOI":"10.1109\/ACCESS.2023.3240648","article-title":"A Non-Reference Evaluation of Underwater Image Enhancement Methods Using a New Underwater Image Dataset","volume":"11","author":"Saleem","year":"2023","journal-title":"IEEE Access"},{"key":"ref_5","doi-asserted-by":"crossref","unstructured":"Paheding, S., Reyes, A.A., Kasaragod, A., and Oommen, T. (2022, January 18\u201324). GAF-NAU: Gramian Angular Field Encoded Neighborhood Attention U-Net for Pixel-Wise Hyperspectral Image Classification. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, New Orleans, LA, USA.","DOI":"10.1109\/CVPRW56347.2022.00056"},{"key":"ref_6","doi-asserted-by":"crossref","unstructured":"Bazi, Y., Bashmal, L., Rahhal, M.M.A., Dayil, R.A., and Ajlan, N.A. (2021). Vision transformers for remote sensing image classification. Remote Sens., 13.","DOI":"10.3390\/rs13030516"},{"key":"ref_7","doi-asserted-by":"crossref","first-page":"4251","DOI":"10.1109\/TIP.2022.3177322","article-title":"Rotation-invariant attention network for hyperspectral image classification","volume":"31","author":"Zheng","year":"2022","journal-title":"IEEE Trans. Image Process."},{"key":"ref_8","doi-asserted-by":"crossref","unstructured":"Liu, Y., and Jiang, W. (2024). OII: An Orientation Information Integrating Network for Oriented Object Detection in Remote Sensing Images. Remote Sens., 16.","DOI":"10.3390\/rs16050731"},{"key":"ref_9","doi-asserted-by":"crossref","unstructured":"Xu, C., Zheng, X., and Lu, X. (2022). Multi-level alignment network for cross-domain ship detection. Remote Sens., 14.","DOI":"10.3390\/rs14102389"},{"key":"ref_10","doi-asserted-by":"crossref","first-page":"8933","DOI":"10.1109\/JSTARS.2023.3315544","article-title":"Cross-Modal Local Calibration and Global Context Modeling Network for RGB\u2013Infrared Remote-Sensing Object Detection","volume":"16","author":"Xie","year":"2023","journal-title":"IEEE J. Sel. Top. Appl. Earth Obs. Remote Sens."},{"key":"ref_11","doi-asserted-by":"crossref","first-page":"143","DOI":"10.1007\/s12145-019-00380-5","article-title":"Change detection techniques for remote sensing applications: A survey","volume":"12","author":"Asokan","year":"2019","journal-title":"Earth Sci. Inform."},{"key":"ref_12","first-page":"1","article-title":"Unsupervised change detection by cross-resolution difference learning","volume":"60","author":"Zheng","year":"2021","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_13","doi-asserted-by":"crossref","unstructured":"Ma, J., Liu, D., Qin, S., Jia, G., Zhang, J., and Xu, Z. (2023). An Asymmetric Feature Enhancement Network for Multiple Object Tracking of Unmanned Aerial Vehicle. Remote Sens., 16.","DOI":"10.3390\/rs16010070"},{"key":"ref_14","doi-asserted-by":"crossref","first-page":"4702914","DOI":"10.1109\/TGRS.2023.3336665","article-title":"Multiple Source Domain Adaptation for Multiple Object Tracking in Satellite Video","volume":"61","author":"Zheng","year":"2023","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_15","doi-asserted-by":"crossref","unstructured":"Qi, L., Zuo, D., Wang, Y., Tao, Y., Tang, R., Shi, J., Gong, J., and Li, B. (2024). Convolutional Neural Network-Based Method for Agriculture Plot Segmentation in Remote Sensing Images. Remote Sens., 16.","DOI":"10.3390\/rs16020346"},{"key":"ref_16","doi-asserted-by":"crossref","unstructured":"Paheding, S., Reyes, A.A., Rajaneesh, A., Sajinkumar, K., and Oommen, T. (2024, January 4\u20138). MarsLS-Net: Martian Landslides Segmentation Network and Benchmark Dataset. Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), Waikoloa, HI, USA.","DOI":"10.1109\/WACV57701.2024.00805"},{"key":"ref_17","doi-asserted-by":"crossref","first-page":"2341","DOI":"10.1109\/TPAMI.2010.168","article-title":"Single image haze removal using dark channel prior","volume":"33","author":"He","year":"2011","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"ref_18","doi-asserted-by":"crossref","unstructured":"Berman, D., and Avidan, S. (2016, January 27\u201330). Non-local image dehazing. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Las Vegas, NV, USA.","DOI":"10.1109\/CVPR.2016.185"},{"key":"ref_19","doi-asserted-by":"crossref","unstructured":"Tan, R.T. (2008, January 23\u201328). Visibility in bad weather from a single image. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Anchorage, AK, USA.","DOI":"10.1109\/CVPR.2008.4587643"},{"key":"ref_20","doi-asserted-by":"crossref","first-page":"13","DOI":"10.1145\/2651362","article-title":"Dehazing using color-lines","volume":"34","author":"Fattal","year":"2014","journal-title":"ACM Trans. Graph."},{"key":"ref_21","doi-asserted-by":"crossref","first-page":"5187","DOI":"10.1109\/TIP.2016.2598681","article-title":"Dehazenet: An end-to-end system for single image haze removal","volume":"25","author":"Cai","year":"2016","journal-title":"IEEE Trans. Image Process."},{"key":"ref_22","doi-asserted-by":"crossref","first-page":"3211","DOI":"10.1109\/TCSVT.2018.2880223","article-title":"Visual Haze Removal by a Unified Generative Adversarial Network","volume":"29","author":"Pang","year":"2019","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"ref_23","unstructured":"Liu, X., Ma, Y., Shi, Z., and Chen, J. (November, January 27). GridDehazeNet: Attention-Based Multi-Scale Network for Image Dehazing. Proceedings of the IEEE International Conference on Computer Vision, Seoul, Republic of Korea."},{"key":"ref_24","unstructured":"Hang, D., Jinshan, P., Zhe, H., Xiang, L., Fei, W., and Ming-Hsuan, Y. (2020, January 13\u201319). Multi-Scale Boosted Dehazing Network with Dense Feature Fusion. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Seattle, WA, USA."},{"key":"ref_25","unstructured":"McCartney, E.J. (1976). Optics of the Atmosphere: Scattering by Molecules and Particles, John Wiley and Sons, Inc."},{"key":"ref_26","doi-asserted-by":"crossref","unstructured":"Zhang, H., and Patel, V.M. (2018, January 18\u201322). Densely Connected Pyramid Dehazing Network. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Salt Lake City, UT, USA.","DOI":"10.1109\/CVPR.2018.00337"},{"key":"ref_27","doi-asserted-by":"crossref","unstructured":"Qin, X., Wang, Z., Bai, Y., Xie, X., and Jia, H. (2020, January 7\u201312). FFA-Net: Feature Fusion Attention Network for Single Image Dehazing. Proceedings of the AAAI Conference on Artificial Intelligence, New York, NY, USA.","DOI":"10.1609\/aaai.v34i07.6865"},{"key":"ref_28","doi-asserted-by":"crossref","unstructured":"Pang, Y., Nie, J., Xie, J., Han, J., and Li, X. (2020, January 14\u201319). BidNet: Binocular image dehazing without explicit disparity estimation. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Virtual.","DOI":"10.1109\/CVPR42600.2020.00597"},{"key":"ref_29","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., and Gelly, S. (2020). An Image is Worth 16 \u00d7 16 Words: Transformers for Image Recognition at Scale. arXiv."},{"key":"ref_30","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., and Guo, B. (2021). Swin Transformer: Hierarchical Vision Transformer using Shifted Windows. arXiv.","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref_31","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., and Polosukhin, I. (2017). Attention Is All You Need. arXiv."},{"key":"ref_32","doi-asserted-by":"crossref","unstructured":"Wang, Z., Cun, X., Bao, J., and Liu, J. (2021). Uformer: A General U-Shaped Transformer for Image Restoration. arXiv.","DOI":"10.1109\/CVPR52688.2022.01716"},{"key":"ref_33","doi-asserted-by":"crossref","first-page":"1927","DOI":"10.1109\/TIP.2023.3256763","article-title":"Vision transformers for single image dehazing","volume":"32","author":"Song","year":"2023","journal-title":"IEEE Trans. Image Process."},{"key":"ref_34","doi-asserted-by":"crossref","unstructured":"Wu, P., Pan, Z., Tang, H., and Hu, Y. (2022). Cloudformer: A Cloud-Removal Network Combining Self-Attention Mechanism and Convolution. Remote Sens., 14.","DOI":"10.3390\/rs14236132"},{"key":"ref_35","doi-asserted-by":"crossref","unstructured":"Guo, C., Yan, Q., Anwar, S., Cong, R., Ren, W., and Li, C. (2022, January 18\u201324). Image Dehazing Transformer with Transmission-Aware 3D Position Embeddingn. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, New Orleans, LA, USA.","DOI":"10.1109\/CVPR52688.2022.00572"},{"key":"ref_36","first-page":"5631016","article-title":"Dense Haze Removal Based on Dynamic Collaborative Inference Learning for Remote Sensing Images","volume":"60","author":"Zhang","year":"2022","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_37","unstructured":"Lin, D., Xu, G., Wang, X., Wang, Y., Sun, X., and Fu, K. (2019). A remote sensing image dataset for cloud removal. arXiv."},{"key":"ref_38","doi-asserted-by":"crossref","first-page":"4702914","DOI":"10.1109\/TGRS.2023.3285228","article-title":"Trinity-Net: Gradient-Guided Swin Transformer-Based Remote Sensing Image Dehazing and Beyond","volume":"61","author":"Chi","year":"2023","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_39","doi-asserted-by":"crossref","unstructured":"Yang, Y., and Newsam, S. (2010, January 2\u20135). Bag-of-visual-words and spatial extensions for land-use classification. Proceedings of the 18th SIGSPATIAL International Conference on Advances in Geographic Information Systems, San Jose, CA, USA.","DOI":"10.1145\/1869790.1869829"},{"key":"ref_40","doi-asserted-by":"crossref","unstructured":"Li, B., Peng, X., Wang, Z., Xu, J., and Feng, D. (2017, January 22\u201329). Aod-net: All-in-one dehazing network. Proceedings of the IEEE International Conference on Computer Vision, Venice, Italy.","DOI":"10.1109\/ICCV.2017.511"},{"key":"ref_41","doi-asserted-by":"crossref","unstructured":"Wang, Z., Cun, X., Bao, J., Zhou, W., Liu, J., and Li, H. (2022, January 18\u201324). Uformer: A general u-shaped transformer for image restoration. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, New Orleans, LA, USA.","DOI":"10.1109\/CVPR52688.2022.01716"},{"key":"ref_42","doi-asserted-by":"crossref","unstructured":"Dong, J., and Pan, J. (2020, January 23\u201328). Physics-based feature dehazing networks. Proceedings of the European Conference on Computer Vision, Glasgow, UK.","DOI":"10.1007\/978-3-030-58577-8_12"},{"key":"ref_43","doi-asserted-by":"crossref","unstructured":"Wang, J., Wu, S., Xu, K., and Yuan, Z. (2023). Frequency Compensated Diffusion Model for Real-scene Dehazing. arXiv.","DOI":"10.2139\/ssrn.4573127"},{"key":"ref_44","doi-asserted-by":"crossref","first-page":"112902","DOI":"10.1016\/j.rse.2022.112902","article-title":"Attention mechanism-based generative adversarial networks for cloud removal in Landsat images","volume":"271","author":"Xu","year":"2022","journal-title":"Remote. Sens. Environ."},{"key":"ref_45","doi-asserted-by":"crossref","unstructured":"Enomoto, K., Sakurada, K., Wang, W., Fukui, H., Matsuoka, M., Nakamura, R., and Kawaguchi, N. (2017, January 21\u201326). Filmy cloud removal on satellite imagery with multispectral conditional generative adversarial nets. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, Honolulu, HI, USA.","DOI":"10.1109\/CVPRW.2017.197"},{"key":"ref_46","doi-asserted-by":"crossref","first-page":"5619612","DOI":"10.1109\/TGRS.2022.3157917","article-title":"Thick Cloud Removal in Optical Remote Sensing Images Using a Texture Complexity Guided Self-Paced Learning Method","volume":"60","author":"Tao","year":"2022","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_47","doi-asserted-by":"crossref","unstructured":"Wang, W., Xie, E., Li, X., Fan, D., Song, K., Liang, D., Lu, T., Luo, P., and Shao, L. (2021, January 11\u201317). Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions. Proceedings of the IEEE International Conference on Computer Vision, Montreal, QC, Canada.","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref_48","unstructured":"Zhang, Q., and Yang, Y.B. (2021, January 7\u201310). ResT: An Efficient Transformer for Visual Recognition. Proceedings of the Advances in Neural Information Processing Systems, Virtual."},{"key":"ref_49","doi-asserted-by":"crossref","first-page":"126535","DOI":"10.1016\/j.neucom.2023.126535","article-title":"Visual transformer with stable prior and patch-level attention for single image dehazing","volume":"551","author":"Liu","year":"2023","journal-title":"Neurocomputing"},{"key":"ref_50","doi-asserted-by":"crossref","unstructured":"Zamir, S.W., Arora, A., Khan, S., Hayat, M., Khan, F.S., and Yang, M.H. (2022, January 18\u201324). Restormer: Efficient Transformer for High-Resolution Image Restoration. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, New Orleans, LA, USA.","DOI":"10.1109\/CVPR52688.2022.00564"},{"key":"ref_51","unstructured":"Charbonnier, P., Blanc-Feraud, L., Aubert, G., and Barlaud, M. (1994, January 13\u201316). Two deterministic half-quadratic regularization algorithms for computed imaging. Proceedings of the 1st International Conference on Image Processing, Austin, TX, USA."},{"key":"ref_52","doi-asserted-by":"crossref","unstructured":"Huang, B., Li, Z., Yang, C., Sun, F., and Song, Y. (2020, January 1\u20135). Single Satellite Optical Imagery Dehazing using SAR Image Prior Based on conditional Generative Adversarial Networks. Proceedings of the 2020 IEEE Winter Conference on Applications of Computer Vision (WACV), Snowmass Village, CO, USA.","DOI":"10.1109\/WACV45572.2020.9093471"},{"key":"ref_53","unstructured":"MMagic Contributors (2024, February 24). MMagic: OpenMMLab Multimodal Advanced, Generative, and Intelligent Creation Toolbox. Available online: https:\/\/github.com\/open-mmlab\/mmagic."},{"key":"ref_54","unstructured":"Paszke, A., Gross, S., Chintala, S., Chanan, G., Yang, E., DeVito, Z., Lin, Z., Desmaison, A., Antiga, L., and Lerer, A. (2017, January 4\u20139). Automatic differentiation in PyTorch. Proceedings of the NIPS workshop, Long Beach, CA, USA."},{"key":"ref_55","unstructured":"Loshchilov, I., and Hutter, F. (2017). Decoupled weight decay regularization. arXiv."},{"key":"ref_56","unstructured":"Loshchilov, I., and Hutter, F. (2017, January 24\u201326). SGDR: Stochastic gradient descent with warm restarts. Proceedings of the International Conference on Learning Representations, Toulon, France."},{"key":"ref_57","doi-asserted-by":"crossref","unstructured":"Yang, H.H., Yang, C.H.H., and James Tsai, Y.C. (2020, January 4\u20139). Y-Net: Multi-Scale Feature Aggregation Network With Wavelet Structure Similarity Loss Function For Single Image Dehazing. Proceedings of the ICASSP 2020\u20142020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), Virtual.","DOI":"10.1109\/ICASSP40776.2020.9053920"},{"key":"ref_58","doi-asserted-by":"crossref","first-page":"1751","DOI":"10.1109\/LGRS.2020.3006533","article-title":"A Coarse-to-Fine Two-Stage Attentive Network for Haze Removal of Remote Sensing Images","volume":"18","author":"Li","year":"2021","journal-title":"IEEE Geosci. Remote. Sens. Lett."},{"key":"ref_59","doi-asserted-by":"crossref","unstructured":"Wang, S., Wu, H., and Zhang, L. (2021, January 19\u201322). Afdn: Attention-Based Feedback Dehazing Network For Uav Remote Sensing Image Haze Removal. Proceedings of the IEEE International Conference on Image Processing (ICIP), Anchorage, AK, USA.","DOI":"10.1109\/ICIP42928.2021.9506604"},{"key":"ref_60","doi-asserted-by":"crossref","unstructured":"Zheng, Z., Ren, W., Cao, X., Hu, X., Wang, T., Song, F., and Jia, X. (2021, January 19\u201325). Ultra-high-definition image dehazing via multi-guided bilateral learning. Proceedings of the 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Nashville, TN, USA.","DOI":"10.1109\/CVPR46437.2021.01592"},{"key":"ref_61","doi-asserted-by":"crossref","first-page":"5401410","DOI":"10.1109\/TGRS.2021.3069889","article-title":"Hybrid-scale self-similarity exploitation for remote sensing image super-resolution","volume":"60","author":"Lei","year":"2022","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_62","doi-asserted-by":"crossref","first-page":"2861","DOI":"10.1109\/TIP.2010.2050625","article-title":"Image super-resolution via sparse representation","volume":"19","author":"Yang","year":"2010","journal-title":"IEEE Trans. Image Process."},{"key":"ref_63","doi-asserted-by":"crossref","first-page":"295","DOI":"10.1109\/TPAMI.2015.2439281","article-title":"Image super-resolution using deep convolutional networks","volume":"38","author":"Dong","year":"2015","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"ref_64","doi-asserted-by":"crossref","unstructured":"Dong, C., Loy, C.C., and Tang, X. (2016, January 11\u201314). Accelerating the super-resolution convolutional neural network. Proceedings of the Computer Vision\u2013ECCV 2016: 14th European Conference, Amsterdam, The Netherlands. Proceedings, Part II 14.","DOI":"10.1007\/978-3-319-46475-6_25"},{"key":"ref_65","doi-asserted-by":"crossref","first-page":"1243","DOI":"10.1109\/LGRS.2017.2704122","article-title":"Super-resolution for remote sensing images via local\u2013global combined network","volume":"14","author":"Lei","year":"2017","journal-title":"IEEE Geosci. Remote. Sens. Lett."},{"key":"ref_66","doi-asserted-by":"crossref","first-page":"1432","DOI":"10.1109\/LGRS.2019.2899576","article-title":"Remote sensing single-image superresolution based on a deep compendium model","volume":"16","author":"Haut","year":"2019","journal-title":"IEEE Geosci. Remote. Sens. Lett."},{"key":"ref_67","doi-asserted-by":"crossref","unstructured":"Qin, M., Mavromatis, S., Hu, L., Zhang, F., Liu, R., Sequeira, J., and Du, Z. (2020). Remote sensing single-image resolution improvement using a deep gradient-aware network with image-specific enhancement. Remote Sens., 12.","DOI":"10.3390\/rs12050758"}],"container-title":["Remote Sensing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.mdpi.com\/2072-4292\/16\/8\/1422\/pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T14:29:28Z","timestamp":1760106568000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.mdpi.com\/2072-4292\/16\/8\/1422"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,17]]},"references-count":67,"journal-issue":{"issue":"8","published-online":{"date-parts":[[2024,4]]}},"alternative-id":["rs16081422"],"URL":"https:\/\/doi.org\/10.3390\/rs16081422","relation":{},"ISSN":["2072-4292"],"issn-type":[{"value":"2072-4292","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4,17]]}}}