{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T17:33:57Z","timestamp":1758303237509,"version":"3.44.0"},"reference-count":53,"publisher":"Springer Science and Business Media LLC","issue":"11","license":[{"start":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T00:00:00Z","timestamp":1750291200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T00:00:00Z","timestamp":1750291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2025,7]]},"DOI":"10.1007\/s10489-025-06671-3","type":"journal-article","created":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T08:54:21Z","timestamp":1750323261000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Dynamic obstacle avoidance and grasping planning for mobile robotic arm in complex environment based on improved TD3"],"prefix":"10.1007","volume":"55","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-6534-3333","authenticated-orcid":false,"given":"Yong","family":"Li","sequence":"first","affiliation":[]},{"given":"Linbing","family":"Ke","sequence":"additional","affiliation":[]},{"given":"Chaoxing","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,6,19]]},"reference":[{"issue":"05","key":"6671_CR1","first-page":"3","volume":"51","author":"R Liu","year":"2017","unstructured":"Liu R, Yuan H, Yang Y et al (2017) Research status and development direction of mobile manipulator. Tool Eng 51(05):3\u20138","journal-title":"Tool Eng"},{"key":"6671_CR2","doi-asserted-by":"publisher","first-page":"64","DOI":"10.1016\/j.neucom.2022.05.006","volume":"497","author":"P Chen","year":"2022","unstructured":"Chen P, Pei J, Lu W et al (2022) A deep reinforcement learning based method for real-time path planning and dynamic obstacle avoidance. Neurocomputing 497:64\u201375. https:\/\/doi.org\/10.1016\/j.neucom.2022.05.006","journal-title":"Neurocomputing"},{"key":"6671_CR3","doi-asserted-by":"publisher","unstructured":"Li Y, Liu Y (2020) Vision-based obstacle avoidance algorithm for mobile robot. In: Proc. chinese automation congress (CAC), Shanghai, China, pp 1273\u20131278. https:\/\/doi.org\/10.1109\/CAC51589.2020.9326906","DOI":"10.1109\/CAC51589.2020.9326906"},{"issue":"03","key":"6671_CR4","first-page":"95","volume":"42","author":"C Chen","year":"2023","unstructured":"Chen C, Liu Y, Wang G (2023) Obstacle avoidance path planning of mobile manipulator based on aco-rrt algorithm. J Henan Polytech Univ (Nat Sci) 42(03):95\u2013102","journal-title":"J Henan Polytech Univ (Nat Sci)"},{"issue":"8","key":"6671_CR5","doi-asserted-by":"publisher","first-page":"3177","DOI":"10.1007\/s40815-023-01563-5","volume":"25","author":"X Song","year":"2023","unstructured":"Song X, Song Y, Stojanovic V et al (2023) Improved dynamic event-triggered security control for t-s fuzzy lpv-pde systems via pointwise measurements and point control. Int J Fuzzy Syst 25(8):3177\u20133192. https:\/\/doi.org\/10.1007\/s40815-023-01563-5","journal-title":"Int J Fuzzy Syst"},{"issue":"11","key":"6671_CR6","doi-asserted-by":"publisher","first-page":"2015","DOI":"10.1177\/01423312221142564","volume":"45","author":"S Guan","year":"2023","unstructured":"Guan S, Zhuang Z, Tao H, Chen Y (2023) Feedback-aided pd-type iterative learning control for time-varying systems with non-uniform trial lengths. Trans Inst Meas Control 45(11):2015\u20132026. https:\/\/doi.org\/10.1177\/01423312221142564","journal-title":"Trans Inst Meas Control"},{"key":"6671_CR7","first-page":"1","volume":"06","author":"J Wu","year":"2023","unstructured":"Wu J, Bi Z, Li Z et al (2023) End-path planning strategy for redundant manipulator based on improved double-tree rrt* algorithm. Comput Appl Res 06:1\u20138","journal-title":"Comput Appl Res"},{"issue":"02","key":"6671_CR8","first-page":"236","volume":"44","author":"J Chen","year":"2022","unstructured":"Chen J, Zheng M (2022) Research on robot operation behavior based on deep reinforcement learning. Robot 44(02):236\u2013256","journal-title":"Robot"},{"key":"6671_CR9","doi-asserted-by":"publisher","unstructured":"Zhu K, Zhang T (2021) Deep reinforcement learning based mobile robot navigation: A review. Tsinghua Sci Technol 26(5):674\u2013691. https:\/\/doi.org\/10.26599\/TST.2021.9010034","DOI":"10.26599\/TST.2021.9010034"},{"issue":"3","key":"6671_CR10","doi-asserted-by":"publisher","first-page":"177","DOI":"10.1049\/trit.2020.0024","volume":"5","author":"Y Yang","year":"2020","unstructured":"Yang Y, Liu J, Pan L (2020) Multi-robot path planning based on a deep reinforcement learning dqn algorithm. CAAI Trans Intell Technol 5(3):177\u2013183. https:\/\/doi.org\/10.1049\/trit.2020.0024","journal-title":"CAAI Trans Intell Technol"},{"issue":"4","key":"6671_CR11","doi-asserted-by":"publisher","first-page":"2393","DOI":"10.1109\/TII.2019.2942191","volume":"16","author":"H Shi","year":"2020","unstructured":"Shi H, Shi L, Xu M, Hwang KS (2020) End-to-end navigation strategy with deep reinforcement learning for mobile robots. IEEE Trans Ind Inform 16(4):2393\u20132402. https:\/\/doi.org\/10.1109\/TII.2019.2942191","journal-title":"IEEE Trans Ind Inform"},{"key":"6671_CR12","doi-asserted-by":"publisher","unstructured":"Tai L, Liu M (2016) A robot exploration strategy based on q-learning network. In: Proceedings IEEE International Conference Real-Time Compututing Robotics (RCAR), pp 57\u201362. https:\/\/doi.org\/10.1109\/RCAR.2016.7784001","DOI":"10.1109\/RCAR.2016.7784001"},{"issue":"7\u20139","key":"6671_CR13","doi-asserted-by":"publisher","first-page":"1180","DOI":"10.1016\/j.neucom.2007.11.026","volume":"71","author":"J Peters","year":"2008","unstructured":"Peters J, Schaal S (2008) Natural actor-critic. Neurocomputing 71(7\u20139):1180\u20131190. https:\/\/doi.org\/10.1016\/j.neucom.2007.11.026","journal-title":"Neurocomputing"},{"issue":"5","key":"6671_CR14","doi-asserted-by":"publisher","first-page":"1141","DOI":"10.1109\/TSMC.2012.2229296","volume":"43","author":"A Konar","year":"2013","unstructured":"Konar A, Chakraborty IG, Singh SJ et al (2013) A deterministic improved q-learning for path planning of a mobile robot. IEEE Trans Syst Man Cybern Syst 43(5):1141\u20131153. https:\/\/doi.org\/10.1109\/TSMC.2012.2229296","journal-title":"IEEE Trans Syst Man Cybern Syst"},{"key":"6671_CR15","doi-asserted-by":"publisher","DOI":"10.1016\/j.compeleceng.2024.109870","volume":"121","author":"PN Dao","year":"2025","unstructured":"Dao PN, Phung MH (2025) Nonlinear robust integral based actor-critic reinforcement learning control for a perturbed three-wheeled mobile robot with mecanum wheels. Comput Electr Eng 121:109870. https:\/\/doi.org\/10.1016\/j.compeleceng.2024.109870","journal-title":"Comput Electr Eng"},{"key":"6671_CR16","doi-asserted-by":"publisher","unstructured":"Nguyen H, Dang HB, Dao PN (2024) On-policy and off-policy q-learning strategies for spacecraft systems: An approach for time-varying discrete-time without controllability assumption of augmented system. Aerosp Sci Technol 146:108972. https:\/\/doi.org\/10.1016\/j.ast.2024.108972","DOI":"10.1016\/j.ast.2024.108972"},{"issue":"2","key":"6671_CR17","doi-asserted-by":"publisher","first-page":"575","DOI":"10.3390\/app10020575","volume":"10","author":"MS Kim","year":"2020","unstructured":"Kim MS, Han DK, Park JH, Kim JS (2020) Motion planning of robot manipulators for a smoother path using a twin delayed deep deterministic policy gradient with hindsight experience replay. Appl Sci 10(2):575. https:\/\/doi.org\/10.3390\/app10020575","journal-title":"Appl Sci"},{"key":"6671_CR18","doi-asserted-by":"publisher","DOI":"10.1016\/j.asoc.2021.107605","volume":"110","author":"S Wen","year":"2021","unstructured":"Wen S, Wen Z, Zhang D et al (2021) A multi-robot path-planning algorithm for autonomous navigation using meta-reinforcement learning based on transfer learning. Appl Soft Comput 110:107605. https:\/\/doi.org\/10.1016\/j.asoc.2021.107605","journal-title":"Appl Soft Comput"},{"key":"6671_CR19","unstructured":"Haarnoja T, Zhou A, Hartikainen K, et al (2018) Soft actor-critic algorithms and applications. arXiv preprint arXiv:1812.05905"},{"issue":"2","key":"6671_CR20","first-page":"23","volume":"16","author":"JW Mock","year":"2024","unstructured":"Mock JW (2024) Muknahallipatna SS (2024) Sim-to-real: A performance comparison of ppo td3 and sac reinforcement learning algorithms for quadruped walking gait generation. J Intell Learn Syst Appl 16(2):23\u201343","journal-title":"J Intell Learn Syst Appl"},{"issue":"3","key":"6671_CR21","doi-asserted-by":"publisher","first-page":"311","DOI":"10.3390\/electronics11030311","volume":"11","author":"Y Liu","year":"2022","unstructured":"Liu Y, Gao P, Zheng C et al (2022) A deep reinforcement learning strategy combining expert experience guidance for a fruit-picking manipulator. Electr 11(3):311. https:\/\/doi.org\/10.3390\/electronics11030311","journal-title":"Electr"},{"key":"6671_CR22","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2024.3371094","author":"H Liu","year":"2024","unstructured":"Liu H, Ying F, Jiang R, Shan Y (2024) Obstacle-avoidable robotic motion planning framework based on deep reinforcement learning. IEEE\/ASME Trans Mechatron. https:\/\/doi.org\/10.1109\/TMECH.2024.3371094","journal-title":"IEEE\/ASME Trans Mechatron"},{"issue":"02","key":"6671_CR23","first-page":"223","volume":"4","author":"Q Zhang","year":"2022","unstructured":"Zhang Q, Wen W, Zhou X et al (2022) Research on intelligent planning method of robot arm based on improved td3 algorithm. J Intell Sci Technol 4(02):223\u2013232","journal-title":"J Intell Sci Technol"},{"issue":"2","key":"6671_CR24","first-page":"17","volume":"4","author":"V Deshpande","year":"2014","unstructured":"Deshpande V, George PM (2014) Kinematic modelling and analysis of 5 dof robotic arm. Int J Robot Res Dev 4(2):17\u201324","journal-title":"Int J Robot Res Dev"},{"key":"6671_CR25","volume-title":"Numerical optimization","author":"SJ Wright","year":"2006","unstructured":"Wright SJ (2006) Numerical optimization. Springer, New York"},{"issue":"7","key":"6671_CR26","doi-asserted-by":"publisher","first-page":"609","DOI":"10.1080\/03052150902736879","volume":"41","author":"MJ Tahk","year":"2009","unstructured":"Tahk MJ, Park MS, Woo HW, Kim HJ (2009) Hessian approximation algorithms for hybrid optimization methods. Eng Optim 41(7):609\u2013633. https:\/\/doi.org\/10.1080\/03052150902736879","journal-title":"Eng Optim"},{"key":"6671_CR27","unstructured":"Mathy CJM, Gonda F, Schmidt D et al (20015) Sparta: Fast global planning of collision-avoiding robot trajectories. In: NIPS Workshop on learning inference and control of multi-agent systems"},{"key":"6671_CR28","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511804441","volume-title":"Convex optimization","author":"SP Boyd","year":"2004","unstructured":"Boyd SP, Vandenberghe L (2004) Convex optimization. Cambridge University Press, Cambridge"},{"issue":"13\u201314","key":"6671_CR29","doi-asserted-by":"publisher","first-page":"1474","DOI":"10.1177\/0278364917722399","volume":"36","author":"P Englert","year":"2017","unstructured":"Englert P, Vien NA, Toussaint M (2017) Inverse kkt: Learning cost functions of manipulation tasks from demonstrations. Int J Robot Res 36(13\u201314):1474\u20131488. https:\/\/doi.org\/10.1177\/0278364917722399","journal-title":"Int J Robot Res"},{"issue":"1","key":"6671_CR30","doi-asserted-by":"publisher","first-page":"1365","DOI":"10.1007\/s10107-023-01962-7","volume":"199","author":"X Jia","year":"2023","unstructured":"Jia X, Kanzow C, Mehlitz P et al (2023) An augmented lagrangian method for optimization problems with structured geometric constraints. Math Program 199(1):1365\u20131415. https:\/\/doi.org\/10.1007\/s10107-023-01962-7","journal-title":"Math Program"},{"key":"6671_CR31","doi-asserted-by":"publisher","first-page":"771","DOI":"10.3934\/ces.2018.771","volume":"11","author":"GC Velez","year":"2018","unstructured":"Velez GC, Mesa F, Alzate PPC (2018) Linear search optimization through the armijo rule method. Contemp. Eng Sci 11:771\u2013778. https:\/\/doi.org\/10.3934\/ces.2018.771","journal-title":"Contemp. Eng Sci"},{"key":"6671_CR32","first-page":"3975","volume":"35","author":"C Liu","year":"2022","unstructured":"Liu C, Luo L (2022) Quasi-newton methods for saddle point problems. Adv Neural Inf Process Syst 35:3975\u20133987","journal-title":"Adv Neural Inf Process Syst"},{"key":"6671_CR33","volume-title":"Reinforcement learning: An introduction","author":"RS Sutton","year":"2018","unstructured":"Sutton RS, Barto AG (2018) Reinforcement learning: An introduction. MIT Press, Cambridge"},{"key":"6671_CR34","unstructured":"Lillicrap TP, Hunt JJ, Pritzel A et al (2015) Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971"},{"key":"6671_CR35","unstructured":"Fujimoto S, Hoof H, Meger D (2018) Addressing function approximation error in actor-critic methods. In: Proceedings international conference machine learning (ICML), pp 1587\u20131596"},{"issue":"1","key":"6671_CR36","doi-asserted-by":"publisher","first-page":"156","DOI":"10.3390\/pr12010156","volume":"12","author":"D Zhao","year":"2024","unstructured":"Zhao D, Ding Z, Li W et al (2024) Optimization of smart textiles robotic arm path planning: A model-free deep reinforcement learning approach with inverse kinematics. Process 12(1):156. https:\/\/doi.org\/10.3390\/pr12010156","journal-title":"Process"},{"key":"6671_CR37","doi-asserted-by":"publisher","unstructured":"Zhang Y, Shao Z, Guo S (2024) Configuration-aware robotic trajectory planning based on deep deterministic policy gradients for active object tracking. In: Proceedings IEEE international conference mechatronics automations (ICMA), pp 1031\u20131036. https:\/\/doi.org\/10.1109\/ICMA60452.2024.10309974","DOI":"10.1109\/ICMA60452.2024.10309974"},{"issue":"4","key":"6671_CR38","doi-asserted-by":"publisher","first-page":"50","DOI":"10.1007\/s10846-023-01852-2","volume":"107","author":"L Zheng","year":"2023","unstructured":"Zheng L, Wang YH, Yang R et al (2023) An efficiently convergent deep reinforcement learning-based trajectory planning method for manipulators in dynamic environments. J Intell Robot Syst 107(4):50. https:\/\/doi.org\/10.1007\/s10846-023-01852-2","journal-title":"J Intell Robot Syst"},{"key":"6671_CR39","doi-asserted-by":"publisher","unstructured":"Cho K, Van\u00a0Merri\u00ebnboer B, Gulcehre C, et al (2014) Learning phrase representations using RNN encoder-decoder for statistical machine translation. arXiv preprint arXiv:1406.1078, https:\/\/doi.org\/10.3115\/v1\/D14-1179","DOI":"10.3115\/v1\/D14-1179"},{"issue":"7","key":"6671_CR40","doi-asserted-by":"publisher","first-page":"1251","DOI":"10.1177\/09596518231153446","volume":"237","author":"Q Xie","year":"2023","unstructured":"Xie Q, Zhang Y, Wang T, Zhu S (2023) Dynamic response prediction of hydraulic soft robotic arms based on lstm neural network. Proc Inst Mech Eng I: J Syst Control Eng 237(7):1251\u20131265. https:\/\/doi.org\/10.1177\/09596518231153446","journal-title":"Proc Inst Mech Eng I: J Syst Control Eng"},{"issue":"10","key":"6671_CR41","doi-asserted-by":"publisher","first-page":"3004","DOI":"10.3390\/s24103004","volume":"24","author":"E Mul\u00e1s-Tejeda","year":"2024","unstructured":"Mul\u00e1s-Tejeda E, G\u00f3mez-Espinosa A, Escobedo Cabello JA et al (2024) Implementation of a long short-term memory neural network-based algorithm for dynamic obstacle avoidance. Sensors 24(10):3004. https:\/\/doi.org\/10.3390\/s24103004","journal-title":"Sensors"},{"issue":"16","key":"6671_CR42","doi-asserted-by":"publisher","first-page":"5625","DOI":"10.3390\/s21165625","volume":"21","author":"K Zarzycki","year":"2021","unstructured":"Zarzycki K, \u0141awry\u0144czuk M (2021) Lstm and gru neural networks as models of dynamical processes used in predictive control: A comparison of models developed for two chemical reactors. Sensors 21(16):5625. https:\/\/doi.org\/10.3390\/s21165625","journal-title":"Sensors"},{"key":"6671_CR43","doi-asserted-by":"publisher","unstructured":"Alsanwy S, Chalak\u00a0Qazani M.R, Al-Ashwal W, Shajari A, Nahvandi S, Asadi H (2024) Vehicle trajectory prediction using deep learning for advanced driver assistance systems and autonomous vehicles. In: Proc IEEE Int Syst Conf (SysCon), Montreal QC Canada, pp 1\u20138 (2024). https:\/\/doi.org\/10.1109\/SysCon61195.2024.10553601","DOI":"10.1109\/SysCon61195.2024.10553601"},{"key":"6671_CR44","doi-asserted-by":"publisher","unstructured":"Jia J, Xing X, Chang DE (2022) Gru-attention based td3 network for mobile robot navigation. In: Proc Int Conf Control Autom Syst (ICCAS), Jeju, Korea Republic of, pp 1642\u20131647. https:\/\/doi.org\/10.23919\/ICCAS55662.2022.10003950","DOI":"10.23919\/ICCAS55662.2022.10003950"},{"key":"6671_CR45","doi-asserted-by":"publisher","DOI":"10.1155\/2023\/8145687","author":"X Huang","year":"2023","unstructured":"Huang X, Wang W, Ji Z et al (2023) Representation enhancement-based proximal policy optimization for uav path planning and obstacle avoidance. Int J Aerosp Eng. https:\/\/doi.org\/10.1155\/2023\/8145687","journal-title":"Int J Aerosp Eng"},{"key":"6671_CR46","doi-asserted-by":"publisher","unstructured":"Wang Y, Xi M, Weng Y (2023) Intelligent path planning algorithm of autonomous underwater vehicle based on vision under ocean current. Expert Syst 13399. https:\/\/doi.org\/10.1111\/exsy.13399","DOI":"10.1111\/exsy.13399"},{"key":"6671_CR47","unstructured":"Fedus W, Ramachandran P, Agarwal R et al (2020) Revisiting fundamentals of experience replay. In: Proc Int Conf Mach Learn (ICML), pp 3061\u20133071"},{"key":"6671_CR48","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2023.107560","volume":"132","author":"H Hassani","year":"2024","unstructured":"Hassani H, Nikan S, Shami A (2024) Traffic navigation via reinforcement learning with episodic-guided prioritized experience replay. Eng Appl Artif Intell 132:107560. https:\/\/doi.org\/10.1016\/j.engappai.2023.107560","journal-title":"Eng Appl Artif Intell"},{"key":"6671_CR49","doi-asserted-by":"publisher","unstructured":"Jiang Y, Li Z, Qu X, Long F, Zhang R (2024) Modified td3 reinforcement learning-based path following control for an autonomous underwater vehicle. In: China intelligent robotics annual conference, Springer, ???, pp 341\u2013352. https:\/\/doi.org\/10.1007\/978-981-97-2461-3_27","DOI":"10.1007\/978-981-97-2461-3_27"},{"key":"6671_CR50","doi-asserted-by":"publisher","unstructured":"Weishaupt S, Husmann R et al (2024) Comparative analysis of multiple deep reinforcement learning approaches for collision-free path-planning of a 3-dof robot. In: Proc Amer Control Conf (ACC). https:\/\/doi.org\/10.23919\/ACC55779.2024.10238789","DOI":"10.23919\/ACC55779.2024.10238789"},{"key":"6671_CR51","unstructured":"Jiang H (2024) Learning-based robot navigation in dynamic environments: From indoor scenes to human crowd scenes. PhD Thesis Nanyang technological university, Singapore. https:\/\/dr.ntu.edu.sg\/handle\/10356\/172736"},{"key":"6671_CR52","unstructured":"Ivanov S, D\u2019yakonov A (2019) Modern deep reinforcement learning algorithms. arXiv preprint arXiv:1906.10025"},{"key":"6671_CR53","doi-asserted-by":"publisher","first-page":"31","DOI":"10.1007\/s40032-014-0099-z","volume":"95","author":"S Liu","year":"2014","unstructured":"Liu S, Zhang Q, Zhou D (2014) Obstacle avoidance path planning of space manipulator based on improved artificial potential field method. J Inst Eng India Ser C 95:31\u201339. https:\/\/doi.org\/10.1007\/s40032-014-0099-z","journal-title":"J Inst Eng India Ser C"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-025-06671-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-025-06671-3\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-025-06671-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T13:38:49Z","timestamp":1758289129000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-025-06671-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,19]]},"references-count":53,"journal-issue":{"issue":"11","published-print":{"date-parts":[[2025,7]]}},"alternative-id":["6671"],"URL":"https:\/\/doi.org\/10.1007\/s10489-025-06671-3","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"type":"print","value":"0924-669X"},{"type":"electronic","value":"1573-7497"}],"subject":[],"published":{"date-parts":[[2025,6,19]]},"assertion":[{"value":"22 May 2025","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 June 2025","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The manuscript has not been submitted to multiple journals for simultaneous consideration. The submitted work is original. The authors declare that the data in this article has not been falsified or tampered with.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval and consent to participate"}},{"value":"Not applicable","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}},{"value":"The authors declare that they have no competing financial interests or personal relationships that could influence the work reported in this paper.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"776"}}