{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,10]],"date-time":"2026-03-10T06:53:01Z","timestamp":1773125581964,"version":"3.50.1"},"reference-count":48,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100004607","name":"Natural Science Foundation of Guangxi Province","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100004607","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Digital Signal Processing"],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1016\/j.dsp.2025.105818","type":"journal-article","created":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T16:34:38Z","timestamp":1764779678000},"page":"105818","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["ATPL-VIO: Adaptive point and line feature fusion for visual-inertial SLAM in real-world environments"],"prefix":"10.1016","volume":"170","author":[{"given":"Peichao","family":"Cong","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0009-6846-0533","authenticated-orcid":false,"given":"Yangang","family":"Zhu","sequence":"additional","affiliation":[]},{"given":"Murong","family":"Deng","sequence":"additional","affiliation":[]},{"given":"Yixuan","family":"Xiao","sequence":"additional","affiliation":[]},{"given":"Xianquan","family":"Wan","sequence":"additional","affiliation":[]},{"given":"Xin","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.dsp.2025.105818_bib0001","article-title":"Towards robust keypoint detection and tracking: a fusion approach with event-aligned image features","author":"Wang","year":"2024","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"4","key":"10.1016\/j.dsp.2025.105818_bib0002","doi-asserted-by":"crossref","first-page":"1159","DOI":"10.3390\/s18041159","article-title":"PL-VIO: tightly-coupled monocular visual\u2013inertial odometry using point and line features","volume":"18","author":"He","year":"2018","journal-title":"Sensors"},{"issue":"13","key":"10.1016\/j.dsp.2025.105818_bib0003","doi-asserted-by":"crossref","first-page":"3010","DOI":"10.3390\/rs14133010","article-title":"An overview on visual slam: from tradition to semantic [J]","volume":"14","author":"Chen","year":"2022","journal-title":"Remote Sens."},{"key":"10.1016\/j.dsp.2025.105818_bib0004","first-page":"1","article-title":"Semi-direct multimap SLAM system for real-time sparse 3-D map reconstruction","volume":"72","author":"Xie","year":"2023","journal-title":"IEEE Trans. Instrum. Meas."},{"issue":"6","key":"10.1016\/j.dsp.2025.105818_bib0005","doi-asserted-by":"crossref","first-page":"1385","DOI":"10.1049\/ipr2.13032","article-title":"A survey of feature matching methods","volume":"18","author":"Huang","year":"2024","journal-title":"IET Image Process."},{"issue":"2","key":"10.1016\/j.dsp.2025.105818_bib0006","doi-asserted-by":"crossref","first-page":"752","DOI":"10.1109\/LRA.2022.3231983","article-title":"EPLF-VINS: real-time monocular visual-inertial SLAM with efficient point-line flow features","volume":"8","author":"Xu","year":"2022","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"15","key":"10.1016\/j.dsp.2025.105818_bib0007","doi-asserted-by":"crossref","first-page":"15465","DOI":"10.1109\/JSEN.2022.3185122","article-title":"Plj-slam: monocular visual slam with points, lines, and junctions of coplanar lines","volume":"22","author":"Ren","year":"2022","journal-title":"IEEE Sens. J."},{"key":"10.1016\/j.dsp.2025.105818_bib0008","series-title":"2019 IEEE International Conference on Multimedia and Expo (ICME)","article-title":"Real-time monocular visual slam by combining points and lines","author":"Wei","year":"2019"},{"key":"10.1016\/j.dsp.2025.105818_bib0009","doi-asserted-by":"crossref","DOI":"10.1109\/LRA.2023.3313062","article-title":"B-pose: bayesian deep network for camera 6-DoF pose estimation from RGB images","author":"Rekavandi","year":"2023","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"15","key":"10.1016\/j.dsp.2025.105818_bib0010","doi-asserted-by":"crossref","first-page":"3774","DOI":"10.1049\/iet-ipr.2020.0606","article-title":"EKFPnP: extended Kalman filter for camera pose estimation in a sequence of images","volume":"14","author":"Mehralian","year":"2020","journal-title":"IET Image Process."},{"key":"10.1016\/j.dsp.2025.105818_bib0011","doi-asserted-by":"crossref","unstructured":"Shen, Yi, et al. \"Localization through particle filter powered neural network estimated monocular camera poses.\" arxiv preprint arxiv:2404.17685 (2024).","DOI":"10.1117\/12.3037897"},{"issue":"6","key":"10.1016\/j.dsp.2025.105818_bib0012","doi-asserted-by":"crossref","first-page":"1052","DOI":"10.1109\/TPAMI.2007.1049","article-title":"MonoSLAM: real-time single camera SLAM","volume":"29","author":"Davison","year":"2007","journal-title":"IEEE Trans. Pattern. Anal. Mach. Intell."},{"key":"10.1016\/j.dsp.2025.105818_bib0013","doi-asserted-by":"crossref","first-page":"27","DOI":"10.1016\/j.robot.2017.03.019","article-title":"S-PTAM: stereo parallel tracking and mapping","volume":"93","author":"Pire","year":"2017","journal-title":"Rob Aut. Syst."},{"issue":"5","key":"10.1016\/j.dsp.2025.105818_bib0014","doi-asserted-by":"crossref","first-page":"1255","DOI":"10.1109\/TRO.2017.2705103","article-title":"Orb-slam2: an open-source slam system for monocular, stereo, and rgb-d cameras","volume":"33","author":"Mur-Artal","year":"2017","journal-title":"IEEE Trans. Robot."},{"issue":"4","key":"10.1016\/j.dsp.2025.105818_bib0015","doi-asserted-by":"crossref","first-page":"593","DOI":"10.1016\/j.dt.2021.12.010","article-title":"Research on DSO vision positioning technology based on binocular stereo panoramic vision system","volume":"18","author":"Guo","year":"2022","journal-title":"Def. Technol."},{"key":"10.1016\/j.dsp.2025.105818_bib0016","series-title":"2018 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","article-title":"LDSO: direct sparse odometry with loop closure","author":"Gao","year":"2018"},{"issue":"2","key":"10.1016\/j.dsp.2025.105818_bib0017","doi-asserted-by":"crossref","first-page":"249","DOI":"10.1109\/TRO.2016.2623335","article-title":"SVO: semidirect visual odometry for monocular and multicamera systems","volume":"33","author":"Forster","year":"2016","journal-title":"IEEE Trans. Robot."},{"key":"10.1016\/j.dsp.2025.105818_bib0018","doi-asserted-by":"crossref","first-page":"339","DOI":"10.1007\/s11263-011-0492-5","article-title":"Impact of landmark parametrization on monocular EKF-SLAM with points and lines [J]","author":"Sola","year":"2012","journal-title":"Int. J. Comput. Vis."},{"key":"10.1016\/j.dsp.2025.105818_bib0019","series-title":"2016 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","article-title":"PL-SVO: semi-direct monocular visual odometry by combining points and line segments","author":"Gomez-Ojeda","year":"2016"},{"key":"10.1016\/j.dsp.2025.105818_bib0020","series-title":"2017 IEEE international conference on robotics and automation (ICRA)","article-title":"PL-SLAM: real-time monocular visual SLAM with points and lines","author":"Pumarola","year":"2017"},{"key":"10.1016\/j.dsp.2025.105818_bib0021","doi-asserted-by":"crossref","first-page":"777","DOI":"10.5194\/isprs-archives-XLII-2-W13-777-2019","article-title":"a scene-assisted point-line feature based visual slam method for autonomous flight in unknown indoor environments","volume":"42","author":"Cheng","year":"2019","journal-title":"Int. Arch. Photogramm. Remote Sens. Spat. Inf. Sci."},{"key":"10.1016\/j.dsp.2025.105818_bib0022","doi-asserted-by":"crossref","first-page":"115","DOI":"10.1109\/OJCAS.2022.3174632","article-title":"A low-rank cnn architecture for real-time semantic segmentation in visual slam applications","volume":"3","author":"Falaschetti","year":"2022","journal-title":"IEEE Open J. Circuits Syst."},{"issue":"10","key":"10.1016\/j.dsp.2025.105818_bib0023","doi-asserted-by":"crossref","first-page":"3570","DOI":"10.1017\/S0263574724001553","article-title":"A semantic visual SLAM based on improved mask R-CNN in dynamic environment","volume":"42","author":"Zhang","year":"2024","journal-title":"Robotica"},{"key":"10.1016\/j.dsp.2025.105818_bib0024","article-title":"DYMRO-SLAM: a robust stereo visual SLAM for dynamic environments leveraging mask R-CNN and optical flow","author":"Cui","year":"2025","journal-title":"IEEE Access"},{"key":"10.1016\/j.dsp.2025.105818_bib0025","series-title":"Conference on Robot Learning","article-title":"Tartanvo: a generalizable learning-based vo","author":"Wang","year":"2021"},{"key":"10.1016\/j.dsp.2025.105818_bib0026","doi-asserted-by":"crossref","first-page":"2186","DOI":"10.1109\/TASE.2024.3376427","article-title":"Edge-assisted multi-robot visual-inertial SLAM with efficient communication","volume":"22","author":"Liu","year":"2024","journal-title":"IEEE Trans. Autom. Sci. Eng."},{"issue":"5","key":"10.1016\/j.dsp.2025.105818_bib0027","doi-asserted-by":"crossref","DOI":"10.12700\/APH.22.5.2025.5.12","article-title":"End-to-End multi-level encoding methods of visual data compression for robust monocular visual ORB-SLAM","volume":"22","author":"Salih","year":"2025","journal-title":"Acta Polytech. Hung."},{"key":"10.1016\/j.dsp.2025.105818_bib0028","doi-asserted-by":"crossref","DOI":"10.1109\/LRA.2024.3382533","article-title":"islam: imperative slam","author":"Fu","year":"2024","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"4","key":"10.1016\/j.dsp.2025.105818_bib0029","doi-asserted-by":"crossref","first-page":"6583","DOI":"10.1109\/LRA.2020.3015456","article-title":"Structure-slam: low-drift monocular slam in indoor environments","volume":"5","author":"Li","year":"2020","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.dsp.2025.105818_bib0030","unstructured":"He, Jiaming, et al. \"PLE-SLAM: a visual-inertial SLAM based on point-line features and efficient IMU initialization.\" arxiv preprint arxiv:2401.01081 (2024)."},{"key":"10.1016\/j.dsp.2025.105818_bib0031","doi-asserted-by":"crossref","unstructured":"Smith P., Reid I.D., Davison A.J. Real-time monocular SLAM with straight lines [J]. 2006.","DOI":"10.5244\/C.20.3"},{"key":"10.1016\/j.dsp.2025.105818_bib0032","series-title":"2011 IEEE International Conference on Robotics and Automation","first-page":"1497","article-title":"Building a partial 3D line-based map using a monocular SLAM [C]","author":"Zhang","year":"2011"},{"key":"10.1016\/j.dsp.2025.105818_bib0033","series-title":"2017 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","first-page":"1775","article-title":"Robust visual SLAM with point and line features [C]","author":"Zuo","year":"2017"},{"key":"10.1016\/j.dsp.2025.105818_bib0034","series-title":"2018 IEEE international conference on Robotics and automation (ICRA)","first-page":"5137","article-title":"Direct line guidance odometry [C]","author":"Li","year":"2018"},{"issue":"3","key":"10.1016\/j.dsp.2025.105818_bib0035","doi-asserted-by":"crossref","first-page":"734","DOI":"10.1109\/TRO.2019.2899783","article-title":"PL-SLAM: a stereo SLAM system through the combination of points and line segments [J]","volume":"35","author":"Gomez-Ojeda","year":"2019","journal-title":"IEEE Trans. Robot."},{"issue":"4","key":"10.1016\/j.dsp.2025.105818_bib0036","doi-asserted-by":"crossref","first-page":"1159","DOI":"10.3390\/s18041159","article-title":"Pl-VIO: tightly-coupled monocular visual\u2013inertial odometry using point and line features [J]","volume":"18","author":"He","year":"2018","journal-title":"Sensors"},{"key":"10.1016\/j.dsp.2025.105818_bib0037","doi-asserted-by":"crossref","first-page":"10","DOI":"10.1016\/j.cag.2022.06.013","article-title":"Point-line feature fusion based field real-time RGB-D SLAM [J]","volume":"107","author":"Li","year":"2022","journal-title":"Comput. Graph."},{"issue":"4","key":"10.1016\/j.dsp.2025.105818_bib0038","doi-asserted-by":"crossref","first-page":"7033","DOI":"10.1109\/LRA.2021.3095518","article-title":"PLF-VINS: real-time monocular visual-inertial SLAM with point-line fusion and parallel-line fusion","volume":"6","author":"Lee","year":"2021","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"2","key":"10.1016\/j.dsp.2025.105818_bib0039","doi-asserted-by":"crossref","first-page":"1518","DOI":"10.1109\/LRA.2022.3140816","article-title":"UV-SLAM: unconstrained line-based SLAM using vanishing points for structural mapping [J]","volume":"7","author":"Lim","year":"2022","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.dsp.2025.105818_bib0040","article-title":"Loosely coupled stereo VINS based on point-line features tracking with feedback loops","author":"Zhang","year":"2024","journal-title":"IEEE Trans. Veh. Technol."},{"key":"10.1016\/j.dsp.2025.105818_bib0041","doi-asserted-by":"crossref","first-page":"9958","DOI":"10.1109\/ACCESS.2021.3049801","article-title":"RGB-D SLAM with a novel 2D and 3D geometric constraint model of point and line features [J]","volume":"9","author":"Zhang","year":"2021","journal-title":"IEEE Access"},{"key":"10.1016\/j.dsp.2025.105818_bib0042","series-title":"2017 IEEE International Conference on Robotics and Automation (ICRA)","first-page":"165","article-title":"A comparative analysis of tightly-coupled monocular, binocular, and stereo VINS","author":"Paul","year":"2017"},{"key":"10.1016\/j.dsp.2025.105818_bib0043","unstructured":"Fu, Qiang, et al. \"PL-VINS: real-time monocular visual-inertial SLAM with point and line features.\" arxiv preprint arxiv:2009.07462 (2020)."},{"issue":"1","key":"10.1016\/j.dsp.2025.105818_bib0044","doi-asserted-by":"crossref","first-page":"23","DOI":"10.3390\/drones6010023","article-title":"A new visual inertial simultaneous localization and map (SLAM) algorithm based on point and line features","volume":"6","author":"Zhang","year":"2022","journal-title":"Drones"},{"key":"10.1016\/j.dsp.2025.105818_bib0045","doi-asserted-by":"crossref","first-page":"9012","DOI":"10.1109\/ACCESS.2021.3049467","article-title":"A SLAM system based on RGBD image and point-line feature [J]","volume":"9","author":"Li","year":"2021","journal-title":"IEEE Access"},{"key":"10.1016\/j.dsp.2025.105818_bib0046","doi-asserted-by":"crossref","first-page":"1950","DOI":"10.1109\/OJCOMS.2022.3217147","article-title":"A real-time and robust monocular visual inertial SLAM system based on point and line features for mobile robots of smart cities toward 6G [J]","volume":"3","author":"Kuang","year":"2022","journal-title":"IEEE Open J. Commun. Soc."},{"issue":"1","key":"10.1016\/j.dsp.2025.105818_bib0047","doi-asserted-by":"crossref","first-page":"23","DOI":"10.3390\/drones6010023","article-title":"A new visual inertial simultaneous localization and mapping (SLAM) algorithm based on point and line features [J]","volume":"6","author":"Zhang","year":"2022","journal-title":"Drones"},{"issue":"6","key":"10.1016\/j.dsp.2025.105818_bib0048","doi-asserted-by":"crossref","first-page":"3562","DOI":"10.1109\/TMECH.2023.3272208","article-title":"Stereo visual inertial pose estimation based on feedforward and feedbacks","volume":"28","author":"Chen","year":"2023","journal-title":"IEEE\/ASME Trans. Mechatron."}],"container-title":["Digital Signal Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1051200425008346?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1051200425008346?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,9]],"date-time":"2026-03-09T17:47:20Z","timestamp":1773078440000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S1051200425008346"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2]]},"references-count":48,"alternative-id":["S1051200425008346"],"URL":"https:\/\/doi.org\/10.1016\/j.dsp.2025.105818","relation":{},"ISSN":["1051-2004"],"issn-type":[{"value":"1051-2004","type":"print"}],"subject":[],"published":{"date-parts":[[2026,2]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"ATPL-VIO: Adaptive point and line feature fusion for visual-inertial SLAM in real-world environments","name":"articletitle","label":"Article Title"},{"value":"Digital Signal Processing","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.dsp.2025.105818","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2025 Elsevier Inc. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"105818"}}