{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,10]],"date-time":"2026-01-10T11:44:17Z","timestamp":1768045457206,"version":"3.49.0"},"reference-count":33,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2025,10,28]],"date-time":"2025-10-28T00:00:00Z","timestamp":1761609600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,10,28]],"date-time":"2025-10-28T00:00:00Z","timestamp":1761609600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["51975214"],"award-info":[{"award-number":["51975214"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Intell Robot Syst"],"DOI":"10.1007\/s10846-025-02325-1","type":"journal-article","created":{"date-parts":[[2025,10,28]],"date-time":"2025-10-28T00:16:02Z","timestamp":1761610562000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["A Visual-Inertial SLAM Method for Robots on Resource-Constrained Platforms Using a Low-Cost Binocular Camera"],"prefix":"10.1007","volume":"111","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-1127-4603","authenticated-orcid":false,"given":"Shuang","family":"Liu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0003-0725-752X","authenticated-orcid":false,"given":"Bochun","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Qitao","family":"Tang","sequence":"additional","affiliation":[]},{"given":"Songhao","family":"Li","sequence":"additional","affiliation":[]},{"given":"Wei","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Hang","family":"Yuan","sequence":"additional","affiliation":[]},{"given":"Lin","family":"Zhu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,10,28]]},"reference":[{"key":"2325_CR1","doi-asserted-by":"publisher","first-page":"117734","DOI":"10.1016\/j.eswa.2022.117734","volume":"205","author":"I Abaspur Kazerouni","year":"2022","unstructured":"Abaspur Kazerouni, I., Fitzgerald, L., Dooly, G., Toal, D.: A survey of state-of-the-art on visual slam. Expert Syst. Appl. 205, 117734 (2022). https:\/\/doi.org\/10.1016\/j.eswa.2022.117734","journal-title":"Expert Syst. Appl."},{"key":"2325_CR2","doi-asserted-by":"publisher","unstructured":"Yang, G., Wang, Y., Zhi, J., Liu, W., Shao, Y., Peng, P.: A review of visual odometry in slam techniques. In: 2020 International Conference on Artificial Intelligence and Electromechanical Automation (AIEA), pp. 332\u2013336 (2020). https:\/\/doi.org\/10.1109\/AIEA51086.2020.00075","DOI":"10.1109\/AIEA51086.2020.00075"},{"issue":"2","key":"2325_CR3","doi-asserted-by":"publisher","first-page":"415","DOI":"10.26599\/TST.2023.9010010","volume":"29","author":"J Zhu","year":"2024","unstructured":"Zhu, J., Li, H., Zhang, T.: Camera, lidar, and imu based multi-sensor fusion slam: A survey. Tsinghua Sci. Technol. 29(2), 415\u2013429 (2024). https:\/\/doi.org\/10.26599\/TST.2023.9010010","journal-title":"Tsinghua Sci. Technol."},{"key":"2325_CR4","doi-asserted-by":"publisher","first-page":"21367","DOI":"10.1109\/ACCESS.2021.3053188","volume":"9","author":"S Zhang","year":"2021","unstructured":"Zhang, S., Zheng, L., Tao, W.: Survey and evaluation of rgb-d slam. IEEE Access 9, 21367\u201321387 (2021). https:\/\/doi.org\/10.1109\/ACCESS.2021.3053188","journal-title":"IEEE Access"},{"issue":"1","key":"2325_CR5","doi-asserted-by":"publisher","first-page":"61","DOI":"10.1109\/TRO.2011.2170332","volume":"28","author":"T Lupton","year":"2012","unstructured":"Lupton, T., Sukkarieh, S.: Visual-inertial-aided navigation for high-dynamic motion in built environments without initial conditions. IEEE Trans. Rob. 28(1), 61\u201376 (2012). https:\/\/doi.org\/10.1109\/TRO.2011.2170332","journal-title":"IEEE Trans. Rob."},{"issue":"1","key":"2325_CR6","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TRO.2016.2597321","volume":"33","author":"C Forster","year":"2017","unstructured":"Forster, C., Carlone, L., Dellaert, F., Scaramuzza, D.: On-manifold preintegration for real-time visual\u2013inertial odometry. IEEE Trans. Rob. 33(1), 1\u201321 (2017). https:\/\/doi.org\/10.1109\/TRO.2016.2597321","journal-title":"IEEE Trans. Rob."},{"key":"2325_CR7","doi-asserted-by":"publisher","unstructured":"Leutenegger, S., Furgale, P., Rabaud, V., Chli, M., Konolige, K., Siegwart, R.: Keyframe-based visual-inertial slam using nonlinear optimization. Proceedings of Robotis Science and Systems (RSS) 2013 (2013). https:\/\/doi.org\/10.3929\/ethz-b-000236658","DOI":"10.3929\/ethz-b-000236658"},{"issue":"4","key":"2325_CR8","doi-asserted-by":"publisher","first-page":"1004","DOI":"10.1109\/TRO.2018.2853729","volume":"34","author":"T Qin","year":"2018","unstructured":"Qin, T., Li, P., Shen, S.: Vins-mono: A robust and versatile monocular visual-inertial state estimator. IEEE Trans. Rob. 34(4), 1004\u20131020 (2018). https:\/\/doi.org\/10.1109\/TRO.2018.2853729","journal-title":"IEEE Trans. Rob."},{"key":"2325_CR9","unstructured":"Qin, T., Cao, S., Pan, J., Shen, S.: A general optimization-based framework for global pose estimation with multiple sensors (2019). https:\/\/arxiv.org\/abs\/1901.03642"},{"key":"2325_CR10","doi-asserted-by":"publisher","unstructured":"He, Y., Zhao, J., Guo, Y., He, W., Yuan, K.: Pl-vio: Tightly-coupled monocular visual-inertial odometry using point and line features. Sensors 18(4) (2018). https:\/\/doi.org\/10.3390\/s18041159","DOI":"10.3390\/s18041159"},{"key":"2325_CR11","doi-asserted-by":"publisher","unstructured":"Liu, H., Chen, M., Zhang, G., Bao, H., Bao, Y.: Ice-ba: Incremental, consistent and efficient bundle adjustment for visual-inertial slam. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1974\u20131982 (2018). https:\/\/doi.org\/10.1109\/CVPR.2018.00211","DOI":"10.1109\/CVPR.2018.00211"},{"key":"2325_CR12","doi-asserted-by":"publisher","unstructured":"Shan, Z., Li, R., Schwertfeger, S.: Rgbd-inertial trajectory estimation and mapping for ground robots. Sensors 19(10) (2019). https:\/\/doi.org\/10.3390\/s19102251","DOI":"10.3390\/s19102251"},{"issue":"5","key":"2325_CR13","doi-asserted-by":"publisher","first-page":"1147","DOI":"10.1109\/TRO.2015.2463671","volume":"31","author":"R Mur-Artal","year":"2015","unstructured":"Mur-Artal, R., Montiel, J.M.M., Tard\u00f3s, J.D.: Orb-slam: A versatile and accurate monocular slam system. IEEE Trans. Rob. 31(5), 1147\u20131163 (2015). https:\/\/doi.org\/10.1109\/TRO.2015.2463671","journal-title":"IEEE Trans. Rob."},{"issue":"5","key":"2325_CR14","doi-asserted-by":"publisher","first-page":"1255","DOI":"10.1109\/TRO.2017.2705103","volume":"33","author":"R Mur-Artal","year":"2017","unstructured":"Mur-Artal, R., Tard\u00f3s, J.D.: Orb-slam2: An open-source slam system for monocular, stereo, and rgb-d cameras. IEEE Trans. Rob. 33(5), 1255\u20131262 (2017). https:\/\/doi.org\/10.1109\/TRO.2017.2705103","journal-title":"IEEE Trans. Rob."},{"issue":"2","key":"2325_CR15","doi-asserted-by":"publisher","first-page":"796","DOI":"10.1109\/LRA.2017.2653359","volume":"2","author":"R Mur-Artal","year":"2017","unstructured":"Mur-Artal, R., Tard\u00f3s, J.D.: Visual-inertial monocular slam with map reuse. IEEE Robot. Autom. Lett. 2(2), 796\u2013803 (2017). https:\/\/doi.org\/10.1109\/LRA.2017.2653359","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"6","key":"2325_CR16","doi-asserted-by":"publisher","first-page":"1874","DOI":"10.1109\/TRO.2021.3075644","volume":"37","author":"C Campos","year":"2021","unstructured":"Campos, C., Elvira, R., Rodr\u00edguez, J.J.G., Montiel, J.M., Tard\u00f3s, J.D.: Orb-slam3: An accurate open-source library for visual, visual-inertial, and multimap slam. IEEE Trans. Robot. 37(6), 1874\u20131890 (2021). https:\/\/doi.org\/10.1109\/TRO.2021.3075644","journal-title":"IEEE Trans. Robot."},{"key":"2325_CR17","doi-asserted-by":"publisher","unstructured":"Wang, C., Zhang, Q., Lin, S., Li, W., Wang, X., Bai, Y., Tian, Q.: Research and experiment of an underwater stereo vision system. In: OCEANS 2019 - Marseille, pp. 1\u20135 (2019). https:\/\/doi.org\/10.1109\/OCEANSE.2019.8867236","DOI":"10.1109\/OCEANSE.2019.8867236"},{"key":"2325_CR18","doi-asserted-by":"publisher","unstructured":"Gu, P., Meng, Z., Zhou, P.: Real-time visual inertial odometry with a resource-efficient harris corner detection accelerator on fpga platform. In: 2022 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 10542\u201310548 (2022). https:\/\/doi.org\/10.1109\/IROS47612.2022.9981598","DOI":"10.1109\/IROS47612.2022.9981598"},{"issue":"6","key":"2325_CR19","doi-asserted-by":"publisher","first-page":"3374","DOI":"10.1109\/TRO.2022.3182503","volume":"38","author":"H Xu","year":"2022","unstructured":"Xu, H., Zhang, Y., Zhou, B., Wang, L., Yao, X., Meng, G., Shen, S.: Omni-swarm: A decentralized omnidirectional visual\u2013inertial\u2013uwb state estimation system for aerial swarms. IEEE Trans. Rob. 38(6), 3374\u20133394 (2022). https:\/\/doi.org\/10.1109\/TRO.2022.3182503","journal-title":"IEEE Trans. Rob."},{"key":"2325_CR20","doi-asserted-by":"publisher","unstructured":"Zhao, H., Shang, J., Liu, K., Chen, C., Gu, F.: Edgevo: An efficient and accurate edge-based visual odometry. In: 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 10630\u201310636 (2023). https:\/\/doi.org\/10.1109\/ICRA48891.2023.10160754","DOI":"10.1109\/ICRA48891.2023.10160754"},{"key":"2325_CR21","unstructured":"Luo, H., Liu, Y., Guo, C., Li, Z., Song, W.: SuperVINS: A real-time visual-inertial SLAM framework for challenging imaging conditions (2024). https:\/\/arxiv.org\/abs\/2407.21348"},{"key":"2325_CR22","doi-asserted-by":"publisher","unstructured":"Fan, Y., Zhao, T., Wang, G.: Schurvins: Schur complement-based lightweight visual inertial navigation system. In: 2024 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 17964\u201317973 (2024). https:\/\/doi.org\/10.1109\/CVPR52733.2024.01701","DOI":"10.1109\/CVPR52733.2024.01701"},{"key":"2325_CR23","doi-asserted-by":"publisher","unstructured":"Gui, P., Tang, L., Mukhopadhyay, S.: Mems based imu for tilting measurement: Comparison of complementary and kalman filter based data fusion. In: 2015 IEEE 10th Conference on Industrial Electronics and Applications (ICIEA), pp. 2004\u20132009 (2015). https:\/\/doi.org\/10.1109\/ICIEA.2015.7334442","DOI":"10.1109\/ICIEA.2015.7334442"},{"issue":"12","key":"2325_CR24","doi-asserted-by":"publisher","first-page":"23803","DOI":"10.3390\/s141223803","volume":"14","author":"Z Li","year":"2014","unstructured":"Li, Z., Wang, J., Gao, J., Li, B., Zhou, F.: A vondrak low pass filter for imu sensor initial alignment on a disturbed base. Sensors 14(12), 23803\u201323821 (2014). https:\/\/doi.org\/10.3390\/s141223803","journal-title":"Sensors"},{"key":"2325_CR25","doi-asserted-by":"publisher","unstructured":"Khan, R., Talha, M., Khattak, A., Qasim, M.: Realization of balanced contrast limited adaptive histogram equalization (B-CLAHE) for adaptive dynamic range compression of real time medical images. https:\/\/doi.org\/10.1109\/IBCAST.2013.6512142","DOI":"10.1109\/IBCAST.2013.6512142"},{"issue":"4","key":"2325_CR26","doi-asserted-by":"publisher","first-page":"9573","DOI":"10.1109\/LRA.2022.3191193","volume":"7","author":"J Liu","year":"2022","unstructured":"Liu, J., Li, X., Liu, Y., Chen, H.: Rgb-d inertial odometry for a resource-restricted robot in dynamic environments. IEEE Robot. Autom. Lett. 7(4), 9573\u20139580 (2022). https:\/\/doi.org\/10.1109\/LRA.2022.3191193","journal-title":"IEEE Robot. Autom. Lett."},{"key":"2325_CR27","doi-asserted-by":"publisher","first-page":"430","DOI":"10.1007\/11744023_34","volume-title":"Computer Vision \u2013 ECCV 2006","author":"E Rosten","year":"2006","unstructured":"Rosten, E., Drummond, T.: Machine learning for high-speed corner detection. In: Leonardis, A., Bischof, H., Pinz, A. (eds.) Computer Vision \u2013 ECCV 2006, pp. 430\u2013443. Springer, Berlin, Heidelberg (2006)"},{"issue":"5","key":"2325_CR28","doi-asserted-by":"publisher","first-page":"1188","DOI":"10.1109\/TRO.2012.2197158","volume":"28","author":"D Galvez-L\u00f3pez","year":"2012","unstructured":"Galvez-L\u00f3pez, D., Tardos, J.D.: Bags of binary words for fast place recognition in image sequences. IEEE Trans. Rob. 28(5), 1188\u20131197 (2012). https:\/\/doi.org\/10.1109\/TRO.2012.2197158","journal-title":"IEEE Trans. Rob."},{"key":"2325_CR29","doi-asserted-by":"publisher","unstructured":"Hess, W., Kohler, D., Rapp, H., Andor, D.: Real-time loop closure in 2d lidar slam. In: 2016 IEEE International Conference on Robotics and Automation (ICRA), pp. 1271\u20131278 (2016). https:\/\/doi.org\/10.1109\/ICRA.2016.7487258","DOI":"10.1109\/ICRA.2016.7487258"},{"key":"2325_CR30","doi-asserted-by":"publisher","unstructured":"Wang, Z., Hao, W., Huang, Y., Wu, H.: Slam mapping of information fusion between lidar and depth camera. In: 2022 International Conference on Image Processing, Computer Vision and Machine Learning (ICICML), pp. 142\u2013145 (2022). https:\/\/doi.org\/10.1109\/ICICML57342.2022.10009758","DOI":"10.1109\/ICICML57342.2022.10009758"},{"issue":"3","key":"2325_CR31","doi-asserted-by":"publisher","first-page":"52","DOI":"10.1145\/504729.504754","volume":"45","author":"S Thrun","year":"2002","unstructured":"Thrun, S.: Probabilistic robotics. Commun. ACM 45(3), 52\u201357 (2002)","journal-title":"Commun. ACM"},{"key":"2325_CR32","doi-asserted-by":"publisher","unstructured":"Yin, J., Yin, H., Liang, C., Jiang, H., Zhang, Z.: Ground-challenge: A multi-sensor slam dataset focusing on corner cases for ground robots. In: 2023 IEEE International Conference on Robotics and Biomimetics (ROBIO), pp. 1\u20135 (2023). https:\/\/doi.org\/10.1109\/ROBIO58561.2023.10354969","DOI":"10.1109\/ROBIO58561.2023.10354969"},{"key":"2325_CR33","doi-asserted-by":"publisher","unstructured":"Shi, X., Li, D., Zhao, P., Tian, Q., Tian, Y., Long, Q., Zhu, C., Song, J., Qiao, F., Song, L., Guo, Y., Wang, Z., Zhang, Y., Qin, B., Yang, W., Wang, F., Chan, R.H.M., She, Q.: Are we ready for service robots? the openloris-scene datasets for lifelong slam. In: 2020 IEEE International Conference on Robotics and Automation (ICRA), pp. 3139\u20133145 (2020). https:\/\/doi.org\/10.1109\/ICRA40945.2020.9196638","DOI":"10.1109\/ICRA40945.2020.9196638"}],"container-title":["Journal of Intelligent &amp; Robotic Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10846-025-02325-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10846-025-02325-1","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10846-025-02325-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,10]],"date-time":"2026-01-10T07:49:14Z","timestamp":1768031354000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10846-025-02325-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,28]]},"references-count":33,"journal-issue":{"issue":"4","published-online":{"date-parts":[[2025,12]]}},"alternative-id":["2325"],"URL":"https:\/\/doi.org\/10.1007\/s10846-025-02325-1","relation":{},"ISSN":["1573-0409"],"issn-type":[{"value":"1573-0409","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,10,28]]},"assertion":[{"value":"30 March 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 October 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 October 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"Not applicable.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics"}},{"value":"The authors have no conflicts of interest to declare that are relevant to the content of this article.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"All authors consent to publication.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publish"}},{"value":"Not applicable.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to Participate"}}],"article-number":"114"}}