{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,5]],"date-time":"2026-01-05T21:03:12Z","timestamp":1767646992497,"version":"3.48.0"},"reference-count":32,"publisher":"Springer Science and Business Media LLC","issue":"10","license":[{"start":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T00:00:00Z","timestamp":1764028800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T00:00:00Z","timestamp":1764028800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["No. 62201229"],"award-info":[{"award-number":["No. 62201229"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["No. 52225212"],"award-info":[{"award-number":["No. 52225212"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J. King Saud Univ. Comput. Inf. Sci."],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1007\/s44443-025-00345-3","type":"journal-article","created":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T10:35:03Z","timestamp":1764066903000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Machine vision based perception for vehicle-mounted UAV autonomous landing under GNSS-denied environments"],"prefix":"10.1007","volume":"37","author":[{"given":"Pengbo","family":"Ma","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9378-7935","authenticated-orcid":false,"given":"Chenyuan","family":"He","sequence":"additional","affiliation":[]},{"given":"Zhouyu","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Zhan","family":"Xv","sequence":"additional","affiliation":[]},{"given":"Hai","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yingfeng","family":"Cai","sequence":"additional","affiliation":[]},{"given":"Long","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Can","family":"Zhong","sequence":"additional","affiliation":[]},{"given":"Yiqun","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,25]]},"reference":[{"issue":"2","key":"345_CR1","doi-asserted-by":"publisher","first-page":"89","DOI":"10.3390\/drones7020089","volume":"7","author":"MY Arafat","year":"2023","unstructured":"Arafat MY, Alam MM, Moh S (2023) Vision-based navigation techniques for unmanned aerial vehicles: review and challenges. Drones 7(2):89","journal-title":"Drones"},{"issue":"4","key":"345_CR2","doi-asserted-by":"publisher","first-page":"2004","DOI":"10.1109\/TRO.2021.3133730","volume":"38","author":"S Cao","year":"2022","unstructured":"Cao S, Lu X, Shen S (2022) Gvins: tightly coupled gnss\u2013visual\u2013inertial fusion for smooth and consistent state estimation. IEEE Trans Rob 38(4):2004\u20132021","journal-title":"IEEE Trans Rob"},{"key":"345_CR3","unstructured":"Dosovitskiy A (2020) An image is worth 16x16 words: transformers for image recognition at scale. arXiv:2010.11929"},{"key":"345_CR4","doi-asserted-by":"crossref","unstructured":"Du B, Huang Y, Chen J, Huang D (2023) Adaptive sparse convolutional networks with global context enhancement for faster object detection on drone images. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 13435\u201313444","DOI":"10.1109\/CVPR52729.2023.01291"},{"key":"345_CR5","doi-asserted-by":"crossref","unstructured":"Hruby P, Duff T, Pollefeys M (2024) Efficient solution of point-line absolute pose. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 21316\u201321325","DOI":"10.1109\/CVPR52733.2024.02014"},{"key":"345_CR6","unstructured":"Jian Z, Li Q, Zheng S, Wang X, Chen X (2024) Lvcp: lidar-vision tightly coupled collaborative real-time relative positioning. arXiv:2407.10782"},{"key":"345_CR7","doi-asserted-by":"publisher","first-page":"185","DOI":"10.1016\/j.inffus.2022.09.019","volume":"90","author":"S Karim","year":"2023","unstructured":"Karim S, Tong G, Li J, Qadir A, Farooq U, Yu Y (2023) Current advances and future perspectives of image fusion: a comprehensive review. Inf Fusion 90:185\u2013217","journal-title":"Inf Fusion"},{"key":"345_CR8","doi-asserted-by":"crossref","unstructured":"Kim M, Koo J, Kim G (2023) Ep2p-loc: end-to-end 3d point to 2d pixel localization for large-scale visual localization. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp 21527\u201321537","DOI":"10.1109\/ICCV51070.2023.01968"},{"key":"345_CR9","first-page":"1","volume":"72","author":"J Lei","year":"2023","unstructured":"Lei J, Li J, Liu J, Zhou S, Zhang Q, Kasabov NK (2023) Galfusion: multi-exposure image fusion via a global\u2013local aggregation learning network. IEEE Trans Instrum Meas 72:1\u201315","journal-title":"IEEE Trans Instrum Meas"},{"key":"345_CR10","doi-asserted-by":"publisher","first-page":"102147","DOI":"10.1016\/j.inffus.2023.102147","volume":"103","author":"H Li","year":"2024","unstructured":"Li H, Wu X-J (2024) Crossfuse: a novel cross attention mechanism based infrared and visible image fusion approach. Inf Fusion 103:102147","journal-title":"Inf Fusion"},{"issue":"7","key":"345_CR11","doi-asserted-by":"publisher","first-page":"2864","DOI":"10.1109\/TIP.2013.2244222","volume":"22","author":"S Li","year":"2013","unstructured":"Li S, Kang X, Hu J (2013) Image fusion with guided filtering. IEEE Trans Image Process 22(7):2864\u20132875","journal-title":"IEEE Trans Image Process"},{"key":"345_CR12","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1016\/j.inffus.2023.02.011","volume":"95","author":"H Li","year":"2023","unstructured":"Li H, Zhao J, Li J, Yu Z, Lu G (2023) Feature dynamic alignment and refinement for infrared-visible image fusion: translation robust fusion. Inf Fusion 95:26\u201341","journal-title":"Inf Fusion"},{"key":"345_CR13","doi-asserted-by":"publisher","first-page":"1462","DOI":"10.1109\/TMM.2023.3234822","volume":"25","author":"Z Liu","year":"2023","unstructured":"Liu Z, Shang Y, Li T, Chen G, Wang Y, Hu Q, Zhu P (2023) Robust multi-drone multi-target tracking to resolve target occlusion: a benchmark. IEEE Trans Multimedia 25:1462\u20131476","journal-title":"IEEE Trans Multimedia"},{"key":"345_CR14","doi-asserted-by":"crossref","unstructured":"Liu J, Fan X, Huang Z, Wu G, Liu R, Zhong W, Luo Z (2022) Target-aware dual adversarial learning and a multi-scenario multi-modality benchmark to fuse infrared and visible for object detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 5802\u20135811","DOI":"10.1109\/CVPR52688.2022.00571"},{"key":"345_CR15","doi-asserted-by":"crossref","unstructured":"Lu C-P, Hager GD, Mjolsness E (2000) Fast and globally convergent pose estimation from video images. IEEE Trans Pattern Anal Mach Intell 22(6):610\u2013622","DOI":"10.1109\/34.862199"},{"key":"345_CR16","doi-asserted-by":"crossref","unstructured":"Mcleod S, Chng CK, Ono T, Shimizu Y, Hemmi R, Holden L et\u00a0al (2024) Robust perspective-n-crater for crater-based camera pose estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 6760\u20136769","DOI":"10.1109\/CVPRW63382.2024.00669"},{"key":"345_CR17","doi-asserted-by":"crossref","unstructured":"Moortgat-Pick A, Schwahn M, Adamczyk A, Duecker DA, Haddadin S (2024) Autonomous uav mission cycling: a mobile hub approach for precise landings and continuous operations in challenging environments. In: Proceedings of the 2024 IEEE International Conference on Robotics and Automation (ICRA), pp 8450\u20138456","DOI":"10.1109\/ICRA57147.2024.10611292"},{"key":"345_CR18","doi-asserted-by":"publisher","first-page":"1460","DOI":"10.1109\/TMM.2021.3065496","volume":"24","author":"R Nie","year":"2021","unstructured":"Nie R, Ma C, Cao J, Ding H, Zhou D (2021) A total variation with joint norms for infrared and visible image fusion. IEEE Trans Multimedia 24:1460\u20131472","journal-title":"IEEE Trans Multimedia"},{"key":"345_CR19","doi-asserted-by":"crossref","unstructured":"Oh J, Kim H (2023) A camera center estimation based on perspective one point method. IEEE Trans Intell Vehicles","DOI":"10.1109\/TIV.2023.3298938"},{"key":"345_CR20","doi-asserted-by":"crossref","unstructured":"Sun W, Dai L, Zhang X, Chang P, He X (2022) Rsod: real-time small object detection algorithm in uav-based traffic monitoring. Appl Intell, 1\u201316","DOI":"10.1007\/s10489-021-02893-3"},{"issue":"1","key":"345_CR21","doi-asserted-by":"publisher","first-page":"149","DOI":"10.3390\/rs16010149","volume":"16","author":"G Tang","year":"2023","unstructured":"Tang G, Ni J, Zhao Y, Gu Y, Cao W (2023) A survey of object detection for uavs based on deep learning. Remote Sensing 16(1):149","journal-title":"Remote Sensing"},{"key":"345_CR22","doi-asserted-by":"crossref","unstructured":"Tang S, Zhang S, Fang Y (2024) Hic-yolov5: improved yolov5 for small object detection. In: 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, pp 6614\u20136619","DOI":"10.1109\/ICRA57147.2024.10610273"},{"key":"345_CR23","doi-asserted-by":"crossref","unstructured":"Thelasingha N, Julius AA, Humann J, Reddinger J-P, Dotterweich J, Childers M (2024) Iterative planning for multi-agent systems: an application in energy-aware uav-ugv cooperative task site assignments. IEEE Trans Autom Sci Eng","DOI":"10.1109\/TASE.2024.3398663"},{"key":"345_CR24","doi-asserted-by":"crossref","unstructured":"Vakhitov A, Ferraz L, Agudo A, Moreno-Noguer F (2021) Uncertainty-aware camera pose estimation from points and lines. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 4659\u20134668","DOI":"10.1109\/CVPR46437.2021.00463"},{"key":"345_CR25","doi-asserted-by":"crossref","unstructured":"Woo S, Park J, Lee J-Y, Kweon IS (2018) Cbam: convolutional block attention module. In: Proceedings of the European Conference on Computer Vision (ECCV), pp 3\u201319","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"345_CR26","doi-asserted-by":"crossref","unstructured":"Xie X, Cheng G, Wang J, Yao X, Han J (2021) Oriented r-cnn for object detection. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp 3520\u20133529","DOI":"10.1109\/ICCV48922.2021.00350"},{"key":"345_CR27","doi-asserted-by":"crossref","unstructured":"Xu H, Ma J, Yuan J, Le Z, Liu W (2022) Rfnet: unsupervised network for mutually reinforcing multi-modal image registration and fusion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 19679\u201319688","DOI":"10.1109\/CVPR52688.2022.01906"},{"key":"345_CR28","doi-asserted-by":"crossref","unstructured":"Yang L, Ma R, Zakhor A (2022) Drone object detection using rgb\/ir fusion. arXiv:2201.03786","DOI":"10.2352\/EI.2022.34.14.COIMG-179"},{"key":"345_CR29","doi-asserted-by":"crossref","unstructured":"Yuan X, Cheng G, Yan K, Zeng Q, Han J (2023) Small object detection via coarse-to-fine proposal generation and imitation learning. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp 6317\u20136327","DOI":"10.1109\/ICCV51070.2023.00581"},{"key":"345_CR30","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2022.119243","volume":"215","author":"C Zhang","year":"2023","unstructured":"Zhang C, Zhou W, Qin W, Tang W (2023) A novel uav path planning approach: heuristic crossing search and rescue optimization algorithm. Expert Syst Appl 215:119243","journal-title":"Expert Syst Appl"},{"key":"345_CR31","first-page":"1","volume":"61","author":"Y Zhang","year":"2023","unstructured":"Zhang Y, Wu C, Guo W, Zhang T, Li W (2023) Cfanet: efficient detection of uav image based on cross-layer feature aggregation. IEEE Trans Geosci Remote Sens 61:1\u201311","journal-title":"IEEE Trans Geosci Remote Sens"},{"key":"345_CR32","doi-asserted-by":"crossref","unstructured":"Zhao Z, Bai H, Zhang J, Zhang Y, Xu S, Lin Z, Timofte R, Van\u00a0Gool L (2023) Cddfuse: correlation-driven dual-branch feature decomposition for multi-modality image fusion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 5906\u20135916","DOI":"10.1109\/CVPR52729.2023.00572"}],"container-title":["Journal of King Saud University Computer and Information Sciences"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s44443-025-00345-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s44443-025-00345-3","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s44443-025-00345-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,5]],"date-time":"2026-01-05T18:47:45Z","timestamp":1767638865000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s44443-025-00345-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,25]]},"references-count":32,"journal-issue":{"issue":"10","published-print":{"date-parts":[[2025,12]]}},"alternative-id":["345"],"URL":"https:\/\/doi.org\/10.1007\/s44443-025-00345-3","relation":{},"ISSN":["1319-1578","2213-1248"],"issn-type":[{"type":"print","value":"1319-1578"},{"type":"electronic","value":"2213-1248"}],"subject":[],"published":{"date-parts":[[2025,11,25]]},"assertion":[{"value":"28 July 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 October 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 November 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing Interests"}}],"article-number":"334"}}