{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T06:01:38Z","timestamp":1775628098639,"version":"3.50.1"},"reference-count":31,"publisher":"MDPI AG","issue":"20","license":[{"start":{"date-parts":[[2024,10,21]],"date-time":"2024-10-21T00:00:00Z","timestamp":1729468800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"National Natural Science Foundation of China","award":["62271159"],"award-info":[{"award-number":["62271159"]}]},{"name":"National Natural Science Foundation of China","award":["62071136"],"award-info":[{"award-number":["62071136"]}]},{"name":"National Natural Science Foundation of China","award":["62002083"],"award-info":[{"award-number":["62002083"]}]},{"name":"National Natural Science Foundation of China","award":["61971153"],"award-info":[{"award-number":["61971153"]}]},{"name":"National Natural Science Foundation of China","award":["62371153"],"award-info":[{"award-number":["62371153"]}]},{"name":"National Natural Science Foundation of China","award":["YQ2022F002"],"award-info":[{"award-number":["YQ2022F002"]}]},{"name":"National Natural Science Foundation of China","award":["3072024XX0805"],"award-info":[{"award-number":["3072024XX0805"]}]},{"name":"National Natural Science Foundation of China","award":["GA23B003"],"award-info":[{"award-number":["GA23B003"]}]},{"name":"National Natural Science Foundation of China","award":["2023-CXPT-LC-005"],"award-info":[{"award-number":["2023-CXPT-LC-005"]}]},{"name":"Excellent Youth Foundation of Heilongjiang Province of China","award":["62271159"],"award-info":[{"award-number":["62271159"]}]},{"name":"Excellent Youth Foundation of Heilongjiang Province of China","award":["62071136"],"award-info":[{"award-number":["62071136"]}]},{"name":"Excellent Youth Foundation of Heilongjiang Province of China","award":["62002083"],"award-info":[{"award-number":["62002083"]}]},{"name":"Excellent Youth Foundation of Heilongjiang Province of China","award":["61971153"],"award-info":[{"award-number":["61971153"]}]},{"name":"Excellent Youth Foundation of Heilongjiang Province of China","award":["62371153"],"award-info":[{"award-number":["62371153"]}]},{"name":"Excellent Youth Foundation of Heilongjiang Province of China","award":["YQ2022F002"],"award-info":[{"award-number":["YQ2022F002"]}]},{"name":"Excellent Youth Foundation of Heilongjiang Province of China","award":["3072024XX0805"],"award-info":[{"award-number":["3072024XX0805"]}]},{"name":"Excellent Youth Foundation of Heilongjiang Province of China","award":["GA23B003"],"award-info":[{"award-number":["GA23B003"]}]},{"name":"Excellent Youth Foundation of Heilongjiang Province of China","award":["2023-CXPT-LC-005"],"award-info":[{"award-number":["2023-CXPT-LC-005"]}]},{"name":"Fundamental Research Funds for the Central Universities","award":["62271159"],"award-info":[{"award-number":["62271159"]}]},{"name":"Fundamental Research Funds for the Central Universities","award":["62071136"],"award-info":[{"award-number":["62071136"]}]},{"name":"Fundamental Research Funds for the Central Universities","award":["62002083"],"award-info":[{"award-number":["62002083"]}]},{"name":"Fundamental Research Funds for the Central Universities","award":["61971153"],"award-info":[{"award-number":["61971153"]}]},{"name":"Fundamental Research Funds for the Central Universities","award":["62371153"],"award-info":[{"award-number":["62371153"]}]},{"name":"Fundamental Research Funds for the Central Universities","award":["YQ2022F002"],"award-info":[{"award-number":["YQ2022F002"]}]},{"name":"Fundamental Research Funds for the Central Universities","award":["3072024XX0805"],"award-info":[{"award-number":["3072024XX0805"]}]},{"name":"Fundamental Research Funds for the Central Universities","award":["GA23B003"],"award-info":[{"award-number":["GA23B003"]}]},{"name":"Fundamental Research Funds for the Central Universities","award":["2023-CXPT-LC-005"],"award-info":[{"award-number":["2023-CXPT-LC-005"]}]},{"name":"Heilongjiang Province Key Research and Development Project","award":["62271159"],"award-info":[{"award-number":["62271159"]}]},{"name":"Heilongjiang Province Key Research and Development Project","award":["62071136"],"award-info":[{"award-number":["62071136"]}]},{"name":"Heilongjiang Province Key Research and Development Project","award":["62002083"],"award-info":[{"award-number":["62002083"]}]},{"name":"Heilongjiang Province Key Research and Development Project","award":["61971153"],"award-info":[{"award-number":["61971153"]}]},{"name":"Heilongjiang Province Key Research and Development Project","award":["62371153"],"award-info":[{"award-number":["62371153"]}]},{"name":"Heilongjiang Province Key Research and Development Project","award":["YQ2022F002"],"award-info":[{"award-number":["YQ2022F002"]}]},{"name":"Heilongjiang Province Key Research and Development Project","award":["3072024XX0805"],"award-info":[{"award-number":["3072024XX0805"]}]},{"name":"Heilongjiang Province Key Research and Development Project","award":["GA23B003"],"award-info":[{"award-number":["GA23B003"]}]},{"name":"Heilongjiang Province Key Research and Development Project","award":["2023-CXPT-LC-005"],"award-info":[{"award-number":["2023-CXPT-LC-005"]}]},{"name":"Key Laboratory of Target Cognition and Application Technology","award":["62271159"],"award-info":[{"award-number":["62271159"]}]},{"name":"Key Laboratory of Target Cognition and Application Technology","award":["62071136"],"award-info":[{"award-number":["62071136"]}]},{"name":"Key Laboratory of Target Cognition and Application Technology","award":["62002083"],"award-info":[{"award-number":["62002083"]}]},{"name":"Key Laboratory of Target Cognition and Application Technology","award":["61971153"],"award-info":[{"award-number":["61971153"]}]},{"name":"Key Laboratory of Target Cognition and Application Technology","award":["62371153"],"award-info":[{"award-number":["62371153"]}]},{"name":"Key Laboratory of Target Cognition and Application Technology","award":["YQ2022F002"],"award-info":[{"award-number":["YQ2022F002"]}]},{"name":"Key Laboratory of Target Cognition and Application Technology","award":["3072024XX0805"],"award-info":[{"award-number":["3072024XX0805"]}]},{"name":"Key Laboratory of Target Cognition and Application Technology","award":["GA23B003"],"award-info":[{"award-number":["GA23B003"]}]},{"name":"Key Laboratory of Target Cognition and Application Technology","award":["2023-CXPT-LC-005"],"award-info":[{"award-number":["2023-CXPT-LC-005"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["Remote Sensing"],"abstract":"<jats:p>With the simultaneous acquisition of the infrared and optical remote sensing images of the same target becoming increasingly easy, using multi-modal data for high-performance object detection has become a research focus. In remote sensing multi-modal data, infrared images lack color information, it is hard to detect difficult targets with low contrast, and optical images are easily affected by illuminance. One of the most effective ways to solve this problem is to integrate multi-modal images for high-performance object detection. The challenge of fusion object detection lies in how to fully integrate multi-modal image features with significant modal differences and avoid introducing interference information while taking advantage of complementary advantages. To solve these problems, a new multi-modal fusion object detection method is proposed. In this paper, the method is improved in terms of two aspects: firstly, a new dual-branch asymmetric attention backbone network (DAAB) is designed, which uses a semantic information supplement module (SISM) and a detail information supplement module (DISM) to supplement and enhance infrared and RGB image information, respectively. Secondly, we propose a feature fusion pyramid network (FFPN), which uses a Transformer-like strategy to carry out multi-modal feature fusion and suppress features that are not conducive to fusion during the fusion process. This method is a state-of-the-art process for both FLIR-aligned and DroneVehicle datasets. Experiments show that this method has strong competitiveness and generalization performance.<\/jats:p>","DOI":"10.3390\/rs16203904","type":"journal-article","created":{"date-parts":[[2024,10,21]],"date-time":"2024-10-21T09:58:24Z","timestamp":1729504704000},"page":"3904","update-policy":"https:\/\/doi.org\/10.3390\/mdpi_crossmark_policy","source":"Crossref","is-referenced-by-count":10,"title":["Multi-Modal Object Detection Method Based on Dual-Branch Asymmetric Attention Backbone and Feature Fusion Pyramid Network"],"prefix":"10.3390","volume":"16","author":[{"given":"Jinpeng","family":"Wang","sequence":"first","affiliation":[{"name":"College of Information and Communication Engineering, Harbin Engineering University, Harbin 150001, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9601-536X","authenticated-orcid":false,"given":"Nan","family":"Su","sequence":"additional","affiliation":[{"name":"College of Information and Communication Engineering, Harbin Engineering University, Harbin 150001, China"},{"name":"Key Laboratory of Advanced Marine Communication and Information Technology, Ministry of Industry and Information Technology, Harbin Engineering University, Harbin 150001, China"}]},{"given":"Chunhui","family":"Zhao","sequence":"additional","affiliation":[{"name":"College of Information and Communication Engineering, Harbin Engineering University, Harbin 150001, China"},{"name":"Key Laboratory of Advanced Marine Communication and Information Technology, Ministry of Industry and Information Technology, Harbin Engineering University, Harbin 150001, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0751-7726","authenticated-orcid":false,"given":"Yiming","family":"Yan","sequence":"additional","affiliation":[{"name":"College of Information and Communication Engineering, Harbin Engineering University, Harbin 150001, China"},{"name":"Key Laboratory of Advanced Marine Communication and Information Technology, Ministry of Industry and Information Technology, Harbin Engineering University, Harbin 150001, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7308-9590","authenticated-orcid":false,"given":"Shou","family":"Feng","sequence":"additional","affiliation":[{"name":"College of Information and Communication Engineering, Harbin Engineering University, Harbin 150001, China"},{"name":"Key Laboratory of Advanced Marine Communication and Information Technology, Ministry of Industry and Information Technology, Harbin Engineering University, Harbin 150001, China"}]}],"member":"1968","published-online":{"date-parts":[[2024,10,21]]},"reference":[{"key":"ref_1","doi-asserted-by":"crossref","first-page":"3019","DOI":"10.1109\/TGRS.2008.923026","article-title":"Wide-Area Traffic Monitoring With the SAR\/GMTI System PAMIR","volume":"46","author":"Klare","year":"2008","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_2","doi-asserted-by":"crossref","first-page":"182","DOI":"10.1016\/j.isprsjprs.2018.09.014","article-title":"Deep networks under scene-level supervision for multi-class geospatial object detection from remote sensing images","volume":"146","author":"Li","year":"2018","journal-title":"ISPRS J. Photogramm. Remote Sens."},{"key":"ref_3","doi-asserted-by":"crossref","first-page":"3325","DOI":"10.1109\/TGRS.2014.2374218","article-title":"Object detection in optical remote sensing images based on weakly supervised learning and high-level feature learning","volume":"53","author":"Han","year":"2015","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_4","doi-asserted-by":"crossref","first-page":"296","DOI":"10.1016\/j.isprsjprs.2019.11.023","article-title":"Object detection in optical remote sensing images: A survey and a new benchmark","volume":"159","author":"Li","year":"2020","journal-title":"ISPRS J. Photogramm. Remote Sens."},{"key":"ref_5","doi-asserted-by":"crossref","unstructured":"Park, J., Chen, J., Cho, Y.K., Kang, D.Y., and Son, B.J. (2019). CNN-Based Person Detection Using Infrared Images for Night-Time Intrusion Warning Systems. Sensors, 20.","DOI":"10.3390\/s20010034"},{"key":"ref_6","doi-asserted-by":"crossref","unstructured":"Chen, Y., and Shin, H. (2020). Pedestrian Detection at Night in Infrared Images Using an Attention-Guided Encoder-Decoder Convolutional Neural Network. Appl. Sci., 10.","DOI":"10.3390\/app10030809"},{"key":"ref_7","doi-asserted-by":"crossref","unstructured":"Zhou, K., Chen, L., and Cao, X. (2020, January 23\u201328). Improving Multispectral Pedestrian Detection by Addressing Modality Imbalance Problems. Proceedings of the European Conference on Computer Vision, Glasgow, UK.","DOI":"10.1007\/978-3-030-58523-5_46"},{"key":"ref_8","first-page":"509","article-title":"Multispectral pedestrian detection using deep fusion convolutional neural networks","volume":"587","author":"Wagner","year":"2016","journal-title":"ESANN"},{"key":"ref_9","doi-asserted-by":"crossref","unstructured":"Liu, J., Zhang, S., Wang, S., and Metaxas, D.N. (2016). Multispectral deep neural networks for pedestrian detection. arXiv.","DOI":"10.5244\/C.30.73"},{"key":"ref_10","doi-asserted-by":"crossref","first-page":"161","DOI":"10.1016\/j.patcog.2018.08.005","article-title":"Illumination-Aware Faster r-Cnn for Robust Multispectral Pedestrian Detection","volume":"85","author":"Li","year":"2019","journal-title":"Pattern Recognit."},{"key":"ref_11","doi-asserted-by":"crossref","unstructured":"Zhang, L., Zhu, X., Chen, X., Yang, X., Lei, Z., and Liu, Z. (November, January 27). Weakly aligned cross-modal learning for multispectral pedestrian detection. Proceedings of the 2019 IEEE\/CVF International Conference on Computer Vision (ICCV), Seoul, Republic of Korea.","DOI":"10.1109\/ICCV.2019.00523"},{"key":"ref_12","doi-asserted-by":"crossref","unstructured":"Meng, S., and Liu, Y. (2022, January 25\u201327). Multimodal Feature Fusion YOLOv5 for RGB-T Object Detection. Proceedings of the 2022 China Automation Congress (CAC), Xiamen, China.","DOI":"10.1109\/CAC57257.2022.10055263"},{"key":"ref_13","doi-asserted-by":"crossref","first-page":"4340","DOI":"10.1109\/TGRS.2020.3016820","article-title":"More diverse means better: Multimodal deep learning meets remote-sensing imagery classification","volume":"59","author":"Hong","year":"2021","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"ref_14","doi-asserted-by":"crossref","first-page":"148","DOI":"10.1016\/j.inffus.2018.11.017","article-title":"Fusion of Multispectral Data through Illumination-Aware Deep Neural Networks for Pedestrian Detection","volume":"50","author":"Guan","year":"2019","journal-title":"Inf. Fusion"},{"key":"ref_15","unstructured":"Li, C., Song, D., Tong, R., and Tang, M. (2018). Multispectral pedestrian detection via simultaneous detection and segmentation. arXiv."},{"key":"ref_16","doi-asserted-by":"crossref","first-page":"2132","DOI":"10.1109\/TCDS.2023.3238181","article-title":"YOLO-MS: Multispectral Object Detection via Feature Interaction and Self-Attention Guided Fusion","volume":"15","author":"Xie","year":"2023","journal-title":"IEEE Trans. Cogn. Dev. Syst."},{"key":"ref_17","doi-asserted-by":"crossref","unstructured":"Zhang, H., Fromont, E., Lefevre, S., and Avignon, B. (2021, January 3\u20138). Guided Attentive Feature Fusion for Multispectral Pedestrian Detection. Proceedings of the 2021 IEEE Winter Conference on Applications of Computer Vision (WACV), Waikoloa, HI, USA.","DOI":"10.1109\/WACV48630.2021.00012"},{"key":"ref_18","doi-asserted-by":"crossref","unstructured":"Zhang, H., Wu, C., Zhang, Z., Zhu, Y., Lin, H., Zhang, Z., Sun, Y., He, T., Mueller, J., and Manmatha, R. (2022, January 19\u201320). ResNeSt: Split-Attention Networks. Proceedings of the 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), New Orleans, LA, USA.","DOI":"10.1109\/CVPRW56347.2022.00309"},{"key":"ref_19","doi-asserted-by":"crossref","unstructured":"Li, X., Wang, W., Hu, X., and Yang, J. (2019, January 15\u201320). Selective Kernel Networks. Proceedings of the 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Long Beach, CA, USA.","DOI":"10.1109\/CVPR.2019.00060"},{"key":"ref_20","unstructured":"Yang, C., An, Z., Zhu, H., Hu, X., Xu, K., Li, C., Diao, B., and Xu, Y. (2019). Gated Convolutional Networks with Hybrid Connectivity for Image Classification. arXiv."},{"key":"ref_21","doi-asserted-by":"crossref","unstructured":"Fang, Q., Han, D., and Wang, Z. (2021). Cross-Modality Fusion Transformer for Multispectral Object Detection. arXiv.","DOI":"10.2139\/ssrn.4227745"},{"key":"ref_22","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., and Sun, J. (2016, January 27\u201330). Deep Residual Learning for Image Recognition. Proceedings of the 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Las Vegas, NV, USA.","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref_23","doi-asserted-by":"crossref","unstructured":"Zhang, H., Fromont, E., Lef, S., and Avignon, B. (2020, January 25\u201328). Multispectral fusion for object detection with cyclic fuse-and-refine blocks. Proceedings of the 2020 IEEE International Conference on Image Processing (ICIP), Abu Dhabi, United Arab Emirates.","DOI":"10.1109\/ICIP40778.2020.9191080"},{"key":"ref_24","doi-asserted-by":"crossref","first-page":"6700","DOI":"10.1109\/TCSVT.2022.3168279","article-title":"Drone-Based RGB-Infrared Cross-Modality Vehicle Detection Via Uncertainty-Aware Learning","volume":"32","author":"Sun","year":"2022","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"ref_25","doi-asserted-by":"crossref","first-page":"1137","DOI":"10.1109\/TPAMI.2016.2577031","article-title":"Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks","volume":"39","author":"Ren","year":"2017","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"ref_26","doi-asserted-by":"crossref","unstructured":"Zhang, H., Fromont, E., Lefevre, S., and Avignon, B. (2021, January 19\u201322). Deep Active Learning from Multispectral Data Through Cross-Modality Prediction Inconsistency. Proceedings of the 2021 IEEE International Conference on Image Processing (ICIP), Anchorage, AK, USA.","DOI":"10.1109\/ICIP42928.2021.9506322"},{"key":"ref_27","doi-asserted-by":"crossref","unstructured":"Yu, Y., and Da, F. (2023, January 17\u201324). Phase-Shifting Coder: Predicting Accurate Orientation in Oriented Object Detection. Proceedings of the 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Vancouver, BC, Canada.","DOI":"10.1109\/CVPR52729.2023.01283"},{"key":"ref_28","doi-asserted-by":"crossref","unstructured":"Ding, J., Xue, N., Long, Y., Xia, G.-S., and Lu, Q. (2019, January 15\u201320). Learning RoI Transformer for Oriented Object Detection in Aerial Images. Proceedings of the 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Long Beach, CA, USA.","DOI":"10.1109\/CVPR.2019.00296"},{"key":"ref_29","doi-asserted-by":"crossref","unstructured":"Wang, Q., Chi, Y., Shen, T., Song, J., Zhang, Z., and Zhu, Y. (2022). Improving RGB-Infrared Object Detection by Reducing Cross-Modality Redundancy. Remote Sens., 14.","DOI":"10.3390\/rs14092020"},{"key":"ref_30","doi-asserted-by":"crossref","first-page":"20","DOI":"10.1016\/j.inffus.2018.09.015","article-title":"Crossmodality interactive attention network for multispectral pedestrian detection","volume":"50","author":"Zhang","year":"2019","journal-title":"Inf. Fusion"},{"key":"ref_31","doi-asserted-by":"crossref","unstructured":"Yuan, M., Wang, Y., and Wei, X. (2022). Translation, Scale and Rotation: Cross-Modal Alignment Meets RGB-Infrared Vehicle Detection. arXiv.","DOI":"10.1007\/978-3-031-20077-9_30"}],"container-title":["Remote Sensing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.mdpi.com\/2072-4292\/16\/20\/3904\/pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T16:17:16Z","timestamp":1760113036000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.mdpi.com\/2072-4292\/16\/20\/3904"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,21]]},"references-count":31,"journal-issue":{"issue":"20","published-online":{"date-parts":[[2024,10]]}},"alternative-id":["rs16203904"],"URL":"https:\/\/doi.org\/10.3390\/rs16203904","relation":{},"ISSN":["2072-4292"],"issn-type":[{"value":"2072-4292","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,21]]}}}