{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T00:16:41Z","timestamp":1774052201487,"version":"3.50.1"},"reference-count":32,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2025,5,8]],"date-time":"2025-05-08T00:00:00Z","timestamp":1746662400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,5,8]],"date-time":"2025-05-08T00:00:00Z","timestamp":1746662400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key R&D Program of China","doi-asserted-by":"crossref","award":["2022YFB4700400"],"award-info":[{"award-number":["2022YFB4700400"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Hubei Provincial Natural Science Foundation of China","award":["2023AFB109"],"award-info":[{"award-number":["2023AFB109"]}]},{"DOI":"10.13039\/501100004543","name":"China Scholarship Council","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100004543","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Complex Intell. Syst."],"published-print":{"date-parts":[[2025,6]]},"DOI":"10.1007\/s40747-025-01906-9","type":"journal-article","created":{"date-parts":[[2025,5,8]],"date-time":"2025-05-08T07:11:30Z","timestamp":1746688290000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":13,"title":["Deep reinforcement learning for path planning of autonomous mobile robots in complicated environments"],"prefix":"10.1007","volume":"11","author":[{"given":"Zhijie","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Hao","family":"Fu","sequence":"additional","affiliation":[]},{"given":"Juan","family":"Yang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2779-2637","authenticated-orcid":false,"given":"Yunhan","family":"Lin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,8]]},"reference":[{"key":"1906_CR1","doi-asserted-by":"publisher","first-page":"1615","DOI":"10.1109\/TASE.2020.3013288","volume":"18","author":"T Wang","year":"2021","unstructured":"Wang T, Huang P, Dong G (2021) Modeling and path planning for persistent surveillance by unmanned ground vehicle. IEEE Trans Autom Sci Eng 18:1615\u20131625","journal-title":"IEEE Trans Autom Sci Eng"},{"issue":"4","key":"1906_CR2","doi-asserted-by":"publisher","first-page":"5778","DOI":"10.1109\/TNNLS.2022.3209154","volume":"35","author":"R Chai","year":"2024","unstructured":"Chai R, Niu H, Carrasco J, Arvin F, Yin H, Lennox B (2024) Design and experimental validation of deep reinforcement learning-based fast trajectory planning and control for mobile robot in unknown environment. IEEE Trans Neural Netw Learn Syst 35(4):5778\u20135792. https:\/\/doi.org\/10.1109\/TNNLS.2022.3209154","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"issue":"4","key":"1906_CR3","doi-asserted-by":"publisher","first-page":"1615","DOI":"10.1109\/TASE.2020.3013288","volume":"18","author":"T Wang","year":"2021","unstructured":"Wang T, Huang P, Dong G (2021) Modeling and path planning for persistent surveillance by unmanned ground vehicle. IEEE Trans Autom Sci Eng 18(4):1615\u20131625. https:\/\/doi.org\/10.1109\/TASE.2020.3013288","journal-title":"IEEE Trans Autom Sci Eng"},{"key":"1906_CR4","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1016\/j.robot.2018.04.007","volume":"106","author":"B Fu","year":"2018","unstructured":"Fu B, Chen L, Zhou Y, Zheng D, Wei Z, Dai J, Pan H (2018) An improved a* algorithm for the industrial robot path planning with high success rate and short length. Robot Auton Syst 106:26\u201337. https:\/\/doi.org\/10.1016\/j.robot.2018.04.007","journal-title":"Robot Auton Syst"},{"issue":"11","key":"1906_CR5","doi-asserted-by":"publisher","first-page":"9199","DOI":"10.1109\/LRA.2024.3432351","volume":"9","author":"Z Wang","year":"2024","unstructured":"Wang Z, Zhao X, Zhang J, Yang N, Wang P, Tang J, Zhang J, Shi L (2024) Apf-cpp: an artificial potential field based multi-robot online coverage path planning approach. IEEE Robot Autom Lett 9(11):9199\u20139206. https:\/\/doi.org\/10.1109\/LRA.2024.3432351","journal-title":"IEEE Robot Autom Lett"},{"key":"1906_CR6","doi-asserted-by":"publisher","unstructured":"Wang S, Min H (2013) Experience mixed the modified artificial potential field method. In: 2013 IEEE\/RSJ international conference on intelligent robots and systems, pp 4823\u20134828. https:\/\/doi.org\/10.1109\/IROS.2013.6697052","DOI":"10.1109\/IROS.2013.6697052"},{"issue":"6","key":"1906_CR7","doi-asserted-by":"publisher","first-page":"2568","DOI":"10.1109\/TMECH.2018.2821767","volume":"23","author":"L Chen","year":"2018","unstructured":"Chen L, Shan Y, Tian W, Li B, Cao D (2018) A fast and efficient double-tree rrt$$^*$$-like sampling-based planner applying on mobile robotic systems. IEEE\/ASME Trans Mechatron 23(6):2568\u20132578. https:\/\/doi.org\/10.1109\/TMECH.2018.2821767","journal-title":"IEEE\/ASME Trans Mechatron"},{"key":"1906_CR8","unstructured":"LaValle SM (1998) Rapidly-exploring random trees: a new tool for path planning. Ann Res Report"},{"issue":"22","key":"1906_CR9","doi-asserted-by":"publisher","first-page":"22547","DOI":"10.1109\/JIOT.2022.3182798","volume":"9","author":"Z Yu","year":"2022","unstructured":"Yu Z, Si Z, Li X, Wang D, Song H (2022) A novel hybrid particle swarm optimization algorithm for path planning of uavs. IEEE Internet Things J 9(22):22547\u201322558. https:\/\/doi.org\/10.1109\/JIOT.2022.3182798","journal-title":"IEEE Internet Things J"},{"key":"1906_CR10","doi-asserted-by":"publisher","DOI":"10.1016\/j.oceaneng.2020.107693","volume":"216","author":"X Guo","year":"2020","unstructured":"Guo X, Ji M, Zhao Z, Wen D, Zhang W (2020) Global path planning and multi-objective path control for unmanned surface vehicle based on modified particle swarm optimization (pso) algorithm. Ocean Eng 216:107693. https:\/\/doi.org\/10.1016\/j.oceaneng.2020.107693","journal-title":"Ocean Eng"},{"issue":"4","key":"1906_CR11","doi-asserted-by":"publisher","first-page":"617","DOI":"10.1109\/TEVC.2018.2878221","volume":"23","author":"X Yu","year":"2019","unstructured":"Yu X, Chen W-N, Gu T, Yuan H, Zhang H, Zhang J (2019) Aco-a*: ant colony optimization plus a* for 3-d traveling in environments with dense obstacles. IEEE Trans Evol Comput 23(4):617\u2013631. https:\/\/doi.org\/10.1109\/TEVC.2018.2878221","journal-title":"IEEE Trans Evol Comput"},{"issue":"7","key":"1906_CR12","doi-asserted-by":"publisher","first-page":"1743","DOI":"10.1109\/TCYB.2016.2556742","volume":"47","author":"M Mavrovouniotis","year":"2017","unstructured":"Mavrovouniotis M, M\u00fcller FM, Yang S (2017) Ant colony optimization with local search for dynamic traveling salesman problems. IEEE Trans Cybern 47(7):1743\u20131756. https:\/\/doi.org\/10.1109\/TCYB.2016.2556742","journal-title":"IEEE Trans Cybern"},{"issue":"2","key":"1906_CR13","doi-asserted-by":"publisher","first-page":"730","DOI":"10.1109\/LRA.2021.3133591","volume":"7","author":"R Cimurs","year":"2022","unstructured":"Cimurs R, Suh IH, Lee JH (2022) Goal-driven autonomous exploration through deep reinforcement learning. IEEE Robot Autom Lett 7(2):730\u2013737. https:\/\/doi.org\/10.1109\/LRA.2021.3133591","journal-title":"IEEE Robot Autom Lett"},{"key":"1906_CR14","unstructured":"Sutton RS (2018) Reinforcement learning: an introduction. A Bradford Book"},{"issue":"2","key":"1906_CR15","doi-asserted-by":"publisher","first-page":"1387","DOI":"10.1109\/LRA.2019.2895892","volume":"4","author":"M Pflueger","year":"2019","unstructured":"Pflueger M, Agha A, Sukhatme GS (2019) Rover-irl: inverse reinforcement learning with soft value iteration networks for planetary rover path planning. IEEE Robot Autom Lett 4(2):1387\u20131394","journal-title":"IEEE Robot Autom Lett"},{"issue":"4","key":"1906_CR16","doi-asserted-by":"publisher","first-page":"1179","DOI":"10.1109\/JAS.2019.1911732","volume":"7","author":"L Jiang","year":"2020","unstructured":"Jiang L, Huang H, Ding Z (2020) Path planning for intelligent robots based on deep q-learning with experience replay and heuristic knowledge. IEEE\/CAA J Automatica Sinica 7(4):1179\u20131189. https:\/\/doi.org\/10.1109\/JAS.2019.1911732","journal-title":"IEEE\/CAA J Automatica Sinica"},{"key":"1906_CR17","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2024.123539","volume":"249","author":"CS Tan","year":"2024","unstructured":"Tan CS, Mohd-Mokhtar R, Arshad MR (2024) Expected-mean gamma-incremental reinforcement learning algorithm for robot path planning. Expert Syst Appl 249:123539","journal-title":"Expert Syst Appl"},{"issue":"7540","key":"1906_CR18","doi-asserted-by":"publisher","first-page":"529","DOI":"10.1038\/nature14236","volume":"518","author":"V Mnih","year":"2015","unstructured":"Mnih V, Kavukcuoglu K, Silver D, Rusu AA, Veness J, Bellemare MG, Graves A, Riedmiller M, Fidjeland AK, Ostrovski G (2015) Human-level control through deep reinforcement learning. Nature 518(7540):529\u2013533","journal-title":"Nature"},{"key":"1906_CR19","doi-asserted-by":"crossref","unstructured":"Van Hasselt H, Guez A, Silver D (2016) Deep reinforcement learning with double q-learning. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 30","DOI":"10.1609\/aaai.v30i1.10295"},{"issue":"12","key":"1906_CR20","doi-asserted-by":"publisher","first-page":"22153","DOI":"10.1109\/JIOT.2024.3379361","volume":"11","author":"Z Bai","year":"2024","unstructured":"Bai Z, Pang H, He Z, Zhao B, Wang T (2024) Path planning of autonomous mobile robot in comprehensive unknown environment using deep reinforcement learning. IEEE Internet Things J 11(12):22153\u201322166. https:\/\/doi.org\/10.1109\/JIOT.2024.3379361","journal-title":"IEEE Internet Things J"},{"key":"1906_CR21","unstructured":"Lillicrap T (2015) Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971"},{"key":"1906_CR22","unstructured":"Fujimoto S, Hoof H, Meger D (2018) Addressing function approximation error in actor-critic methods. In: International Conference on Machine Learning, pp 1587\u20131596. PMLR"},{"key":"1906_CR23","doi-asserted-by":"crossref","unstructured":"Tan Y, Lin Y, Liu T, Min H (2022) Pl-td3: A dynamic path planning algorithm of mobile robot. In: 2022 IEEE International Conference on Systems, Man, and Cybernetics (SMC), pp 3040\u20133045. IEEE","DOI":"10.1109\/SMC53654.2022.9945119"},{"issue":"24","key":"1906_CR24","doi-asserted-by":"publisher","first-page":"9802","DOI":"10.3390\/s23249802","volume":"23","author":"Y Zhang","year":"2023","unstructured":"Zhang Y, Chen P (2023) Path planning of a mobile robot for a dynamic indoor environment based on an sac-lstm algorithm. Sensors 23(24):9802","journal-title":"Sensors"},{"issue":"10","key":"1906_CR25","doi-asserted-by":"publisher","first-page":"1969","DOI":"10.3390\/electronics13101969","volume":"13","author":"X Kuang","year":"2024","unstructured":"Kuang X, Zhou S (2024) Robotic manipulator in dynamic environment with sac combing attention mechanism and lstm. Electronics 13(10):1969","journal-title":"Electronics"},{"key":"1906_CR26","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2023.121378","volume":"347","author":"H Xiao","year":"2023","unstructured":"Xiao H, Fu L, Shang C, Bao X, Xu X, Guo W (2023) Ship energy scheduling with dqn-ce algorithm combining bi-directional lstm and attention mechanism. Appl Energy 347:121378","journal-title":"Appl Energy"},{"issue":"5","key":"1906_CR27","doi-asserted-by":"publisher","first-page":"4202","DOI":"10.1109\/LRA.2024.3373988","volume":"9","author":"J Heuvel","year":"2024","unstructured":"Heuvel J, Zeng X, Shi W, Sethuraman T, Bennewitz M (2024) Spatiotemporal attention enhances lidar-based robot navigation in dynamic environments. IEEE Robot Autom Lett 9(5):4202\u20134209. https:\/\/doi.org\/10.1109\/LRA.2024.3373988","journal-title":"IEEE Robot Autom Lett"},{"key":"1906_CR28","doi-asserted-by":"publisher","unstructured":"Chen C, Liu Y, Kreiss S, Alahi A (2019) Crowd-robot interaction: crowd-aware robot navigation with attention-based deep reinforcement learning. In: 2019 International Conference on Robotics and Automation (ICRA), pp 6015\u20136022. https:\/\/doi.org\/10.1109\/ICRA.2019.8794134","DOI":"10.1109\/ICRA.2019.8794134"},{"issue":"3","key":"1906_CR29","doi-asserted-by":"publisher","first-page":"5533","DOI":"10.1109\/LRA.2021.3077863","volume":"6","author":"Q Li","year":"2021","unstructured":"Li Q, Lin W, Liu Z, Prorok A (2021) Message-aware graph attention networks for large-scale multi-robot path planning. IEEE Robot Autom Lett 6(3):5533\u20135540. https:\/\/doi.org\/10.1109\/LRA.2021.3077863","journal-title":"IEEE Robot Autom Lett"},{"issue":"7","key":"1906_CR30","doi-asserted-by":"publisher","first-page":"4415","DOI":"10.1109\/TSMC.2021.3096935","volume":"52","author":"M Pei","year":"2022","unstructured":"Pei M, An H, Liu B, Wang C (2022) An improved dyna-q algorithm for mobile robot path planning in unknown dynamic environment. IEEE Trans Syst Man Cybern Syst 52(7):4415\u20134425. https:\/\/doi.org\/10.1109\/TSMC.2021.3096935","journal-title":"IEEE Trans Syst Man Cybern Syst"},{"key":"1906_CR31","doi-asserted-by":"publisher","first-page":"13","DOI":"10.1016\/j.robot.2016.08.001","volume":"86","author":"TT Mac","year":"2016","unstructured":"Mac TT, Copot C, Tran DT, De Keyser R (2016) Heuristic approaches in robot path planning: a survey. Robot Auton Syst 86:13\u201328","journal-title":"Robot Auton Syst"},{"key":"1906_CR32","doi-asserted-by":"crossref","unstructured":"Yang Z, Zhu L, Wu Y, Yang Y (2019) Gated channel transformation for visual recognition. 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 11791\u201311800","DOI":"10.1109\/CVPR42600.2020.01181"}],"container-title":["Complex &amp; Intelligent Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-025-01906-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s40747-025-01906-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-025-01906-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,17]],"date-time":"2025-05-17T11:22:51Z","timestamp":1747480971000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s40747-025-01906-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,8]]},"references-count":32,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2025,6]]}},"alternative-id":["1906"],"URL":"https:\/\/doi.org\/10.1007\/s40747-025-01906-9","relation":{},"ISSN":["2199-4536","2198-6053"],"issn-type":[{"value":"2199-4536","type":"print"},{"value":"2198-6053","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,5,8]]},"assertion":[{"value":"15 November 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"13 April 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 May 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"No potential conflict of interest is reported by the authors.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"This study does not involve human participants or sensitive data.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical and informed consent for data used"}}],"article-number":"277"}}