{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T19:42:31Z","timestamp":1772566951256,"version":"3.50.1"},"reference-count":51,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2025,1,22]],"date-time":"2025-01-22T00:00:00Z","timestamp":1737504000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,22]],"date-time":"2025-01-22T00:00:00Z","timestamp":1737504000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1007\/s10489-024-06149-8","type":"journal-article","created":{"date-parts":[[2025,1,22]],"date-time":"2025-01-22T05:56:35Z","timestamp":1737525395000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":20,"title":["A modified dueling DQN algorithm for robot path planning incorporating priority experience replay and artificial potential fields"],"prefix":"10.1007","volume":"55","author":[{"given":"Chang","family":"Li","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0809-8949","authenticated-orcid":false,"given":"Xiaofeng","family":"Yue","sequence":"additional","affiliation":[]},{"given":"Zeyuan","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Guoyuan","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Hongbo","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Yuan","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Juan","family":"Zhu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,1,22]]},"reference":[{"key":"6149_CR1","doi-asserted-by":"publisher","first-page":"450","DOI":"10.3390\/sym10100450","volume":"10","author":"H Zhang","year":"2018","unstructured":"Zhang H, Lin W, Chen A (2018) Path planning for the mobile robot: A review. Symmetry 10:450. https:\/\/doi.org\/10.3390\/sym10100450","journal-title":"Symmetry"},{"key":"6149_CR2","doi-asserted-by":"publisher","first-page":"120254","DOI":"10.1016\/j.eswa.2023.120254","volume":"227","author":"L Liu","year":"2023","unstructured":"Liu L, Wang X, Yang X et al (2023) Path planning techniques for mobile robots: Review and prospect. Expert Syst Appl 227:120254. https:\/\/doi.org\/10.1016\/j.eswa.2023.120254","journal-title":"Expert Syst Appl"},{"key":"6149_CR3","doi-asserted-by":"publisher","first-page":"13","DOI":"10.1016\/j.robot.2016.08.001","volume":"86","author":"TT Mac","year":"2016","unstructured":"Mac TT, Copot C, Tran DT, De Keyser R (2016) Heuristic approaches in robot path planning: A survey. Rob Autom Syst 86:13\u201328. https:\/\/doi.org\/10.1016\/j.robot.2016.08.001","journal-title":"Rob Autom Syst"},{"key":"6149_CR4","doi-asserted-by":"publisher","first-page":"8392","DOI":"10.3390\/app12178392","volume":"12","author":"M Jain","year":"2022","unstructured":"Jain M, Saihjpal V, Singh N, Singh SB (2022) An Overview of Variants and Advancements of PSO Algorithm. Appl Sci 12:8392. https:\/\/doi.org\/10.3390\/app12178392","journal-title":"Appl Sci"},{"key":"6149_CR5","doi-asserted-by":"publisher","first-page":"e01068","DOI":"10.1016\/j.sciaf.2021.e01068","volume":"15","author":"OO Martins","year":"2022","unstructured":"Martins OO, Adekunle AA, Olaniyan OM, Bolaji BO (2022) An improved multi-objective a-star algorithm for path planning in a large workspace: Design, implementation, and evaluation. Scientific African 15:e01068. https:\/\/doi.org\/10.1016\/j.sciaf.2021.e01068","journal-title":"Scientific African"},{"key":"6149_CR6","doi-asserted-by":"publisher","first-page":"99498","DOI":"10.1109\/ACCESS.2022.3206356","volume":"10","author":"H Liu","year":"2022","unstructured":"Liu H, Zhang Y (2022) ASL-DWA: An improved a-star algorithm for indoor cleaning robots. IEEE Access 10:99498\u201399515. https:\/\/doi.org\/10.1109\/ACCESS.2022.3206356","journal-title":"IEEE Access"},{"key":"6149_CR7","doi-asserted-by":"publisher","first-page":"1320","DOI":"10.1038\/s41598-022-05386-6","volume":"12","author":"X Wang","year":"2022","unstructured":"Wang X, Zhang H, Liu S et al (2022) Path planning of scenic spots based on improved A* algorithm. Sci Rep 12:1320. https:\/\/doi.org\/10.1038\/s41598-022-05386-6","journal-title":"Sci Rep"},{"key":"6149_CR8","doi-asserted-by":"publisher","first-page":"012003","DOI":"10.1088\/1742-6596\/2330\/1\/012003","volume":"2330","author":"X Zhenyang","year":"2022","unstructured":"Zhenyang X, Wei Y (2022) Mobile robot path planning based on fusion of improved A* algorithm and adaptive DWA algorithm. J Phys: Conf Ser 2330:012003. https:\/\/doi.org\/10.1088\/1742-6596\/2330\/1\/012003","journal-title":"J Phys: Conf Ser"},{"key":"6149_CR9","doi-asserted-by":"publisher","first-page":"39729","DOI":"10.1109\/ACCESS.2022.3166632","volume":"10","author":"R Szczepanski","year":"2022","unstructured":"Szczepanski R, Tarczewski T, Erwinski K (2022) Energy Efficient Local Path Planning Algorithm Based on Predictive Artificial Potential Field. IEEE Access 10:39729\u201339742. https:\/\/doi.org\/10.1109\/ACCESS.2022.3166632","journal-title":"IEEE Access"},{"key":"6149_CR10","doi-asserted-by":"publisher","first-page":"108709","DOI":"10.1016\/j.oceaneng.2021.108709","volume":"223","author":"H Sang","year":"2021","unstructured":"Sang H, You Y, Sun X et al (2021) The hybrid path planning algorithm based on improved a* and artificial potential field for unmanned surface vehicle formations. Ocean Eng 223:108709. https:\/\/doi.org\/10.1016\/j.oceaneng.2021.108709","journal-title":"Ocean Eng"},{"key":"6149_CR11","doi-asserted-by":"publisher","unstructured":"Yafei L, Anping W, Qingyang C, Yujie W (2020) An improved uav path planning method based on RRT-APF hybrid strategy. In: 2020 5th International conference on automation, control and robotics engineering (CACRE). IEEE, Dalian, China, 81\u201386.\u00a0https:\/\/doi.org\/10.1109\/CACRE50138.2020.9229999","DOI":"10.1109\/CACRE50138.2020.9229999"},{"key":"6149_CR12","doi-asserted-by":"publisher","first-page":"120403","DOI":"10.1016\/j.eswa.2023.120403","volume":"228","author":"Y Liang","year":"2023","unstructured":"Liang Y, Zhao H (2023) CCPF-RRT*: An improved path planning algorithm with consideration of congestion. Expert Syst Appl 228:120403. https:\/\/doi.org\/10.1016\/j.eswa.2023.120403","journal-title":"Expert Syst Appl"},{"key":"6149_CR13","doi-asserted-by":"publisher","first-page":"40728","DOI":"10.1109\/ACCESS.2021.3062375","volume":"9","author":"Y Chen","year":"2021","unstructured":"Chen Y, Bai G, Zhan Y et al (2021) Path Planning and Obstacle Avoiding of the USV Based on Improved ACO-APF Hybrid Algorithm With Adaptive Early-Warning. IEEE Access 9:40728\u201340742. https:\/\/doi.org\/10.1109\/ACCESS.2021.3062375","journal-title":"IEEE Access"},{"key":"6149_CR14","doi-asserted-by":"publisher","first-page":"16313","DOI":"10.1007\/s11042-023-15498-4","volume":"83","author":"D Xia","year":"2023","unstructured":"Xia D, Shen B, Zheng Y et al (2023) A bidirectional-a-star-based ant colony optimization algorithm for big-data-driven taxi route recommendation. Multimed Tools Appl 83:16313\u201316335. https:\/\/doi.org\/10.1007\/s11042-023-15498-4","journal-title":"Multimed Tools Appl"},{"key":"6149_CR15","doi-asserted-by":"publisher","first-page":"180","DOI":"10.1016\/j.procs.2018.01.113","volume":"127","author":"C Lamini","year":"2018","unstructured":"Lamini C, Benhlima S, Elbekri A (2018) Genetic Algorithm Based Approach for Autonomous Mobile Robot Path Planning. Procedia Comput Sci 127:180\u2013189. https:\/\/doi.org\/10.1016\/j.procs.2018.01.113","journal-title":"Procedia Comput Sci"},{"key":"6149_CR16","doi-asserted-by":"publisher","first-page":"445","DOI":"10.1038\/nature14540","volume":"521","author":"ML Littman","year":"2015","unstructured":"Littman ML (2015) Reinforcement learning improves behaviour from evaluative feedback. Nature 521:445\u2013451. https:\/\/doi.org\/10.1038\/nature14540","journal-title":"Nature"},{"key":"6149_CR17","doi-asserted-by":"publisher","first-page":"1054","DOI":"10.1109\/TNN.1998.712192","volume":"9","author":"RS Sutton","year":"1998","unstructured":"Sutton RS, Barto AG (1998) Reinforcement Learning: An Introduction. IEEE Trans Neural Netw 9:1054\u20131054. https:\/\/doi.org\/10.1109\/TNN.1998.712192","journal-title":"IEEE Trans Neural Netw"},{"key":"6149_CR18","doi-asserted-by":"publisher","unstructured":"Mnih V, Kavukcuoglu K, Silver D, et al (2013) Playing Atari with Deep Reinforcement Learning. arXiv:1312.5602. https:\/\/doi.org\/10.48550\/arXiv.1312.5602","DOI":"10.48550\/arXiv.1312.5602"},{"key":"6149_CR19","doi-asserted-by":"publisher","first-page":"279","DOI":"10.1007\/BF00992698","volume":"8","author":"CJCH Watkins","year":"1992","unstructured":"Watkins CJCH, Dayan P (1992) Q-learning. Mach Learn 8:279\u2013292. https:\/\/doi.org\/10.1007\/BF00992698","journal-title":"Mach Learn"},{"key":"6149_CR20","doi-asserted-by":"publisher","first-page":"19572","DOI":"10.1109\/ACCESS.2022.3151248","volume":"10","author":"A Halbouni","year":"2022","unstructured":"Halbouni A, Gunawan TS, Habaebi MH et al (2022) Machine Learning and Deep Learning Approaches for CyberSecurity: A Review. IEEE Access 10:19572\u201319585. https:\/\/doi.org\/10.1109\/ACCESS.2022.3151248","journal-title":"IEEE Access"},{"key":"6149_CR21","doi-asserted-by":"publisher","first-page":"411","DOI":"10.1146\/annurev-control-042920-020211","volume":"5","author":"L Brunke","year":"2022","unstructured":"Brunke L, Greeff M, Hall AW et al (2022) Safe Learning in Robotics: From Learning-Based Control to Safe Reinforcement Learning. Annual Rev Control, Robot Autonom Syst 5:411\u2013444. https:\/\/doi.org\/10.1146\/annurev-control-042920-020211","journal-title":"Annual Rev Control, Robot Autonom Syst"},{"key":"6149_CR22","doi-asserted-by":"publisher","first-page":"106796","DOI":"10.1016\/j.asoc.2020.106796","volume":"97","author":"A Maoudj","year":"2020","unstructured":"Maoudj A, Hentout A (2020) Optimal path planning approach based on Q-learning algorithm for mobile robots. Appl Soft Comput 97:106796. https:\/\/doi.org\/10.1016\/j.asoc.2020.106796","journal-title":"Appl Soft Comput"},{"key":"6149_CR23","doi-asserted-by":"publisher","unstructured":"Van Hasselt H, Guez A, Silver D (2016) Deep Reinforcement Learning with Double Q-Learning. AAAI 30:. https:\/\/doi.org\/10.1609\/aaai.v30i1.10295","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"6149_CR24","doi-asserted-by":"publisher","unstructured":"Wang Z, Schaul T, Hessel M, et al (2016) Dueling network architectures for deep reinforcement learning. arXiv:1511.06581. https:\/\/doi.org\/10.48550\/arXiv.1511.06581","DOI":"10.48550\/arXiv.1511.06581"},{"key":"6149_CR25","doi-asserted-by":"publisher","unstructured":"Kim K-S, Kim D-E, Lee J-M (2018) Deep learning based on smooth driving for autonomous navigation. In: 2018 IEEE\/ASME International Conference on Advanced Intelligent Mechatronics (AIM). IEEE, Auckland, pp 616\u2013621.\u00a0https:\/\/doi.org\/10.1109\/AIM.2018.8452266","DOI":"10.1109\/AIM.2018.8452266"},{"key":"6149_CR26","doi-asserted-by":"publisher","first-page":"263","DOI":"10.1007\/s11370-019-00310-w","volume":"13","author":"S Wen","year":"2020","unstructured":"Wen S, Zhao Y, Yuan X et al (2020) Path planning for active SLAM based on deep reinforcement learning under unknown environments. Intel Serv Robot 13:263\u2013272. https:\/\/doi.org\/10.1007\/s11370-019-00310-w","journal-title":"Intel Serv Robot"},{"key":"6149_CR27","doi-asserted-by":"publisher","first-page":"1523","DOI":"10.3390\/s24051523","volume":"24","author":"DA Deguale","year":"2024","unstructured":"Deguale DA, Yu L, Sinishaw ML, Li K (2024) Enhancing Stability and Performance in Mobile Robot Path Planning with PMR-Dueling DQN Algorithm. Sens 24:1523. https:\/\/doi.org\/10.3390\/s24051523","journal-title":"Sens"},{"key":"6149_CR28","doi-asserted-by":"publisher","first-page":"249","DOI":"10.1109\/TCDS.2019.2928820","volume":"13","author":"K Wu","year":"2021","unstructured":"Wu K, Wang H, AbolfazliEsfahani M, Yuan S (2021) BND*-DDQN: Learn to Steer Autonomously Through Deep Reinforcement Learning. IEEE Trans Cogn Develop Syst 13:249\u2013261. https:\/\/doi.org\/10.1109\/TCDS.2019.2928820","journal-title":"IEEE Trans Cogn Develop Syst"},{"key":"6149_CR29","doi-asserted-by":"publisher","first-page":"4287","DOI":"10.1007\/s40747-022-00948-7","volume":"9","author":"Y Gu","year":"2023","unstructured":"Gu Y, Zhu Z, Lv J et al (2023) DM-DQN: Dueling Munchausen deep Q network for robot path planning. Complex Intell Syst 9:4287\u20134300. https:\/\/doi.org\/10.1007\/s40747-022-00948-7","journal-title":"Complex Intell Syst"},{"key":"6149_CR30","doi-asserted-by":"publisher","unstructured":"Zhang F, Gu C, Yang F (2022) An improved algorithm of robot path planning in complex environment based on double DQN. In: Yan L, Duan H, Yu X (eds) Advances in Guidance, Navigation and Control. Springer Singapore, Singapore, pp 303\u2013313.\u00a0https:\/\/doi.org\/10.1007\/978-981-15-8155-7_25","DOI":"10.1007\/978-981-15-8155-7_25"},{"key":"6149_CR31","doi-asserted-by":"publisher","first-page":"1570","DOI":"10.1002\/oca.2781","volume":"44","author":"R Huang","year":"2023","unstructured":"Huang R, Qin C, Li JL, Lan X (2023) Path planning of mobile robot in unknown dynamic continuous environment using reward-modified deep Q -network. Optimal Cont Applic Methods 44:1570\u20131587. https:\/\/doi.org\/10.1002\/oca.2781","journal-title":"Optimal Cont Applic Methods"},{"key":"6149_CR32","doi-asserted-by":"publisher","first-page":"297","DOI":"10.1007\/s10846-019-01073-3","volume":"98","author":"C Yan","year":"2020","unstructured":"Yan C, Xiang X, Wang C (2020) Towards Real-Time Path Planning through Deep Reinforcement Learning for a UAV in Dynamic Environments. J Intell Robot Syst 98:297\u2013309. https:\/\/doi.org\/10.1007\/s10846-019-01073-3","journal-title":"J Intell Robot Syst"},{"key":"6149_CR33","doi-asserted-by":"publisher","first-page":"012024","DOI":"10.1088\/1742-6596\/1820\/1\/012024","volume":"1820","author":"M Guan","year":"2021","unstructured":"Guan M, Yang FX, Jiao JC, Chen XP (2021) Research on path planning of mobile robot based on improved Deep Q Network. J Phys: Conf Ser 1820:012024. https:\/\/doi.org\/10.1088\/1742-6596\/1820\/1\/012024","journal-title":"J Phys: Conf Ser"},{"key":"6149_CR34","doi-asserted-by":"publisher","first-page":"107001","DOI":"10.1016\/j.oceaneng.2020.107001","volume":"199","author":"J Woo","year":"2020","unstructured":"Woo J, Kim N (2020) Collision avoidance for an unmanned surface vehicle using deep reinforcement learning. Ocean Eng 199:107001. https:\/\/doi.org\/10.1016\/j.oceaneng.2020.107001","journal-title":"Ocean Eng"},{"key":"6149_CR35","doi-asserted-by":"publisher","first-page":"417","DOI":"10.1007\/s42979-021-00817-z","volume":"2","author":"G Chen","year":"2021","unstructured":"Chen G, Pan L, Chen Y et al (2021) Deep Reinforcement Learning of Map-Based Obstacle Avoidance for Mobile Robot Navigation. SN Comput Sci 2:417. https:\/\/doi.org\/10.1007\/s42979-021-00817-z","journal-title":"SN Comput Sci"},{"key":"6149_CR36","doi-asserted-by":"publisher","first-page":"1795","DOI":"10.1007\/s13042-023-01998-0","volume":"15","author":"S Xu","year":"2024","unstructured":"Xu S, Bi W, Zhang A, Wang Y (2024) A deep reinforcement learning approach incorporating genetic algorithm for missile path planning. Int J Mach Learn Cyb 15:1795\u20131814. https:\/\/doi.org\/10.1007\/s13042-023-01998-0","journal-title":"Int J Mach Learn Cyb"},{"key":"6149_CR37","doi-asserted-by":"publisher","first-page":"056206","DOI":"10.1088\/1361-6501\/ad2663","volume":"35","author":"H Lv","year":"2024","unstructured":"Lv H, Chen Y, Li S et al (2024) Improve exploration in deep reinforcement learning for UAV path planning using state and action entropy. Meas Sci Technol 35:056206. https:\/\/doi.org\/10.1088\/1361-6501\/ad2663","journal-title":"Meas Sci Technol"},{"key":"6149_CR38","doi-asserted-by":"publisher","unstructured":"Han H, Wang J, Kuang L et al (2023) Improved robot path planning method based on deep reinforcement learning. Sensors\u00a023:5622. https:\/\/doi.org\/10.3390\/s23125622","DOI":"10.3390\/s23125622"},{"key":"6149_CR39","doi-asserted-by":"publisher","unstructured":"Schaul T, Quan J, Antonoglou I, Silver D (2016) Prioritized experience replay. arXiv:1511.05952. https:\/\/doi.org\/10.48550\/arXiv.1511.05952","DOI":"10.48550\/arXiv.1511.05952"},{"key":"6149_CR40","doi-asserted-by":"publisher","first-page":"120495","DOI":"10.1016\/j.eswa.2023.120495","volume":"231","author":"AK Shakya","year":"2023","unstructured":"Shakya AK, Pillai G, Chakrabarty S (2023) Reinforcement learning algorithms: A brief survey. Expert Syst Appl 231:120495. https:\/\/doi.org\/10.1016\/j.eswa.2023.120495","journal-title":"Expert Syst Appl"},{"key":"6149_CR41","doi-asserted-by":"publisher","unstructured":"Bai Z, Pang H, He Z, et al (2024) Path Planning of Autonomous Mobile Robot in Comprehensive Unknown Environment Using Deep Reinforcement Learning. IEEE Internet Things J 1\u20131. https:\/\/doi.org\/10.1109\/JIOT.2024.3379361","DOI":"10.1109\/JIOT.2024.3379361"},{"key":"6149_CR42","doi-asserted-by":"publisher","unstructured":"Quinones-Ramirez M, Rios-Martinez J, Uc-Cetina V (2023) Robot path planning using deep reinforcement learning. arXiv:2302.09120. https:\/\/doi.org\/10.48550\/arXiv.2302.09120","DOI":"10.48550\/arXiv.2302.09120"},{"key":"6149_CR43","doi-asserted-by":"publisher","first-page":"105642","DOI":"10.1016\/j.engappai.2022.105642","volume":"118","author":"Y Zhang","year":"2023","unstructured":"Zhang Y, Rao X, Liu C et al (2023) A cooperative EV charging scheduling strategy based on double deep Q-network and Prioritized experience replay. Eng Appl Artif Intell 118:105642. https:\/\/doi.org\/10.1016\/j.engappai.2022.105642","journal-title":"Eng Appl Artif Intell"},{"key":"6149_CR44","doi-asserted-by":"publisher","first-page":"21","DOI":"10.1109\/MRA.2021.3115980","volume":"28","author":"W Yuan","year":"2021","unstructured":"Yuan W, Li Y, Zhuang H et al (2021) Prioritized Experience Replay-Based Deep Q Learning: Multiple-Reward Architecture for Highway Driving Decision Making. IEEE Robot Autom Mag 28:21\u201331. https:\/\/doi.org\/10.1109\/MRA.2021.3115980","journal-title":"IEEE Robot Autom Mag"},{"key":"6149_CR45","doi-asserted-by":"publisher","unstructured":"Khatib O (1985) Real-time obstacle avoidance for manipulators and mobile robots. In: Proceedings. 1985 IEEE International Conference on Robotics and Automation. Institute of Electrical and Electronics Engineers, St. Louis, MO, USA, pp 500\u2013505. https:\/\/doi.org\/10.1109\/ROBOT.1985.1087247","DOI":"10.1109\/ROBOT.1985.1087247"},{"key":"6149_CR46","doi-asserted-by":"publisher","first-page":"108875","DOI":"10.1016\/j.patcog.2022.108875","volume":"131","author":"M Li","year":"2022","unstructured":"Li M, Huang T, Zhu W (2022) Clustering experience replay for the effective exploitation in reinforcement learning. Pattern Recognit 131:108875. https:\/\/doi.org\/10.1016\/j.patcog.2022.108875","journal-title":"Pattern Recognit"},{"key":"6149_CR47","doi-asserted-by":"publisher","unstructured":"Zheng H, Xie W, Feng MB (2022) Variance reduction based experience replay for policy optimization. arXiv:2110.08902. https:\/\/doi.org\/10.48550\/arXiv.2110.08902","DOI":"10.48550\/arXiv.2110.08902"},{"key":"6149_CR48","doi-asserted-by":"publisher","first-page":"2423","DOI":"10.3390\/electronics13122423","volume":"13","author":"W Hu","year":"2024","unstructured":"Hu W, Zhou Y, Ho HW (2024) Mobile Robot Navigation Based on Noisy N-Step Dueling Double Deep Q-Network and Prioritized Experience Replay. Electronics 13:2423. https:\/\/doi.org\/10.3390\/electronics13122423","journal-title":"Electronics"},{"key":"6149_CR49","doi-asserted-by":"publisher","unstructured":"Lim H-D, Lee D (2023) Temporal difference learning with experience replay. arXiv:2306.09746. https:\/\/doi.org\/10.48550\/arXiv.2306.09746","DOI":"10.48550\/arXiv.2306.09746"},{"key":"6149_CR50","doi-asserted-by":"publisher","first-page":"38017","DOI":"10.1109\/ACCESS.2024.3375083","volume":"12","author":"X Luo","year":"2024","unstructured":"Luo X, Wang Q, Gong H, Tang C (2024) UAV Path Planning Based on the Average TD3 Algorithm With Prioritized Experience Replay. IEEE Access 12:38017\u201338029. https:\/\/doi.org\/10.1109\/ACCESS.2024.3375083","journal-title":"IEEE Access"},{"key":"6149_CR51","doi-asserted-by":"publisher","unstructured":"Stentz A (1994) Optimal and efficient path planning for partially-known environments. Proceed 1994 IEEE Int Conf Robot Autom.\u00a0https:\/\/doi.org\/10.1109\/ROBOT.1994.351061","DOI":"10.1109\/ROBOT.1994.351061"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-024-06149-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-024-06149-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-024-06149-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,7,15]],"date-time":"2025-07-15T09:33:47Z","timestamp":1752572027000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-024-06149-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,1,22]]},"references-count":51,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2025,4]]}},"alternative-id":["6149"],"URL":"https:\/\/doi.org\/10.1007\/s10489-024-06149-8","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,1,22]]},"assertion":[{"value":"3 December 2024","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 January 2025","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"This study did not involve human participants, animal subjects, or third-party data. Therefore, ethical approval and informed consent are not applicable.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interest"}}],"article-number":"366"}}