{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T19:32:19Z","timestamp":1772911939103,"version":"3.50.1"},"reference-count":37,"publisher":"Springer Science and Business Media LLC","issue":"11","license":[{"start":{"date-parts":[[2025,6,25]],"date-time":"2025-06-25T00:00:00Z","timestamp":1750809600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,6,25]],"date-time":"2025-06-25T00:00:00Z","timestamp":1750809600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100010040","name":"Taishan Scholar Project of Shandong Province","doi-asserted-by":"publisher","award":["tsqn202211129"],"award-info":[{"award-number":["tsqn202211129"]}],"id":[{"id":"10.13039\/501100010040","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62073189"],"award-info":[{"award-number":["62073189"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2025,7]]},"DOI":"10.1007\/s10489-025-06655-3","type":"journal-article","created":{"date-parts":[[2025,6,25]],"date-time":"2025-06-25T09:32:14Z","timestamp":1750843934000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Trajectory tracking control for robotic manipulator with disturbances: a double-Q reinforcement learning method"],"prefix":"10.1007","volume":"55","author":[{"given":"Dehai","family":"Yu","sequence":"first","affiliation":[]},{"given":"Weiwei","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Yongshu","family":"Li","sequence":"additional","affiliation":[]},{"given":"Zhuangzhuang","family":"Luan","sequence":"additional","affiliation":[]},{"given":"Zhongcai","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,6,25]]},"reference":[{"issue":"10","key":"6655_CR1","doi-asserted-by":"publisher","first-page":"6439","DOI":"10.1109\/TIE.2016.2569068","volume":"63","author":"B Xiao","year":"2016","unstructured":"Xiao B, Yin S, Kaynak O (2016) Tracking control of robotic manipulators with uncertain kinematics and dynamics. IEEE Trans Ind Electron 63(10):6439\u20136449. https:\/\/doi.org\/10.1109\/TIE.2016.2569068","journal-title":"IEEE Trans Ind Electron"},{"key":"6655_CR2","doi-asserted-by":"publisher","first-page":"142","DOI":"10.1016\/j.conengprac.2017.10.015","volume":"71","author":"L Roveda","year":"2018","unstructured":"Roveda L, Pedrocchi N, Beschi M, Tosatti LM (2018) High-accuracy robotized industrial assembly task control schema with force overshoots avoidance. Control Eng Pract 71:142\u2013153. https:\/\/doi.org\/10.1016\/j.conengprac.2017.10.015","journal-title":"Control Eng Pract"},{"issue":"8","key":"6655_CR3","doi-asserted-by":"publisher","first-page":"3331","DOI":"10.1109\/TNNLS.2021.3051946","volume":"33","author":"W Sun","year":"2022","unstructured":"Sun W, Wu Y, Lv X (2022) Adaptive neural network control for full-state constrained robotic manipulator with actuator saturation and time-varying delays. IEEE Trans Neural Netw Learn Syst 33(8):3331\u20133342. https:\/\/doi.org\/10.1109\/TNNLS.2021.3051946","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"issue":"3","key":"6655_CR4","doi-asserted-by":"publisher","first-page":"1299","DOI":"10.1109\/TMECH.2017.2674701","volume":"22","author":"GP Incremona","year":"2017","unstructured":"Incremona GP, Ferrara A, Magni L (2017) MPC for robot manipulators with integral sliding modes generation. IEEE\/ASME Trans Mechatronics 22(3):1299\u20131307. https:\/\/doi.org\/10.1109\/TMECH.2017.2674701","journal-title":"IEEE\/ASME Trans Mechatronics"},{"issue":"8","key":"6655_CR5","doi-asserted-by":"publisher","first-page":"4584","DOI":"10.1109\/TNNLS.2021.3116713","volume":"34","author":"S Cao","year":"2023","unstructured":"Cao S, Sun L, Jiang J, Zuo Z (2023) Reinforcement learning-based fixed-time trajectory tracking control for uncertain robotic manipulators with input saturation. IEEE Trans Neural Netw Learn Syst 34(8):4584\u20134595. https:\/\/doi.org\/10.1109\/TNNLS.2021.3116713","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"issue":"1","key":"6655_CR6","doi-asserted-by":"publisher","first-page":"180","DOI":"10.1109\/TRO.2009.2033957","volume":"26","author":"L Bascetta","year":"2010","unstructured":"Bascetta L, Rocco P (2010) Revising the robust-control design for rigid robot manipulators. IEEE Trans Robot 26(1):180\u2013187. https:\/\/doi.org\/10.1109\/TRO.2009.2033957","journal-title":"IEEE Trans Robot"},{"issue":"10","key":"6655_CR7","doi-asserted-by":"publisher","first-page":"10034","DOI":"10.1109\/TII.2022.3232768","volume":"19","author":"H Chen","year":"2024","unstructured":"Chen H, Zong G, Zhao X, Gao F, Shi K (2024) Secure filter design of fuzzy switched CPSs with mismatched modes and application: A multidomain event-triggered strategy. IEEE Trans Ind Informat 19(10):10034\u201310044. https:\/\/doi.org\/10.1109\/TII.2022.3232768","journal-title":"IEEE Trans Ind Informat"},{"issue":"6","key":"6655_CR8","doi-asserted-by":"publisher","first-page":"5155","DOI":"10.1109\/TIE.2020.2992017","volume":"68","author":"X Wu","year":"2021","unstructured":"Wu X, She J, Yu L, Dong H, Zhang W (2021) Contour tracking control of networked motion control system using improved equivalent-input-disturbance approach. IEEE Trans Ind Electron 68(6):5155\u20135165. https:\/\/doi.org\/10.1109\/TIE.2020.2992017","journal-title":"IEEE Trans Ind Electron"},{"key":"6655_CR9","doi-asserted-by":"publisher","DOI":"10.1109\/TASE.2024.3353380","author":"H Xie","year":"2024","unstructured":"Xie H, Zhang J, Jing Y, Dimirovski GM, Chen J (2024) Self-adjustable performance-based adaptive tracking control of uncertain nonlinear systems. IEEE Trans Autom Sci Eng. https:\/\/doi.org\/10.1109\/TASE.2024.3353380","journal-title":"IEEE Trans Autom Sci Eng"},{"issue":"2","key":"6655_CR10","doi-asserted-by":"publisher","first-page":"1176","DOI":"10.1109\/TAC.2022.3148384","volume":"68","author":"C Deng","year":"2023","unstructured":"Deng C, Wen C, Wang W, Li X, Yue D (2023) Distributed adaptive tracking control for high-order nonlinear multiagent systems over event-triggered communication. IEEE Trans Autom Control 68(2):1176\u20131183. https:\/\/doi.org\/10.1109\/TAC.2022.3148384","journal-title":"IEEE Trans Autom Control"},{"key":"6655_CR11","doi-asserted-by":"publisher","first-page":"188","DOI":"10.1016\/j.neucom.2018.11.008","volume":"330","author":"W Sun","year":"2019","unstructured":"Sun W, Wu Y, Wang L (2019) Trajectory tracking of constrained robotic systems via a hybrid control strategy. Neurocomputing 330:188\u2013195. https:\/\/doi.org\/10.1016\/j.neucom.2018.11.008","journal-title":"Neurocomputing"},{"key":"6655_CR12","doi-asserted-by":"publisher","first-page":"2363","DOI":"10.1007\/s11071-021-07080-0","volume":"107","author":"K Liu","year":"2022","unstructured":"Liu K, Wang R, Zheng S, Dong S, Sun G (2022) Fixed-time disturbance observer-based robust fault-tolerant tracking control for uncertain quadrotor UAV subject to input delay. Nonlinear Dyn 107:2363\u20132390. https:\/\/doi.org\/10.1007\/s11071-021-07080-0","journal-title":"Nonlinear Dyn"},{"key":"6655_CR13","doi-asserted-by":"publisher","first-page":"298","DOI":"10.1016\/j.ins.2020.08.024","volume":"545","author":"G Cui","year":"2021","unstructured":"Cui G, Yang W, Yu J (2021) Neural network-based finite-time adaptive tracking control of nonstrict-feedback nonlinear systems with actuator failures. Inf Sci 545:298\u2013311. https:\/\/doi.org\/10.1016\/j.ins.2020.08.024","journal-title":"Inf Sci"},{"key":"6655_CR14","doi-asserted-by":"publisher","first-page":"2583","DOI":"10.1007\/s11071-020-06050-2","volume":"102","author":"Z Zhao","year":"2020","unstructured":"Zhao Z, Cao D, Yang J, Wang H (2020) High-order sliding mode observer-based trajectory tracking control for a quadrotor UAV with uncertain dynamics. Nonlinear Dyn 102:2583\u20132596. https:\/\/doi.org\/10.1007\/s11071-020-06050-2","journal-title":"Nonlinear Dyn"},{"issue":"2","key":"6655_CR15","doi-asserted-by":"publisher","first-page":"689","DOI":"10.1109\/TII.2018.2809514","volume":"15","author":"B Xiao","year":"2019","unstructured":"Xiao B, Yin S (2019) Exponential tracking control of robotic manipulators with uncertain dynamics and kinematics. IEEE Trans Ind Informat 15(2):689\u2013698. https:\/\/doi.org\/10.1109\/TII.2018.2809514","journal-title":"IEEE Trans Ind Informat"},{"issue":"9","key":"6655_CR16","doi-asserted-by":"publisher","first-page":"4969","DOI":"10.1109\/TII.2019.2894282","volume":"15","author":"G Wen","year":"2019","unstructured":"Wen G, Chen CLP, Ge SS, Yang H, Liu X (2019) Optimized adaptive nonlinear tracking control using actor-critic reinforcement learning strategy. IEEE Trans Ind Informat 15(9):4969\u20134977. https:\/\/doi.org\/10.1109\/TII.2019.2894282","journal-title":"IEEE Trans Ind Informat"},{"issue":"9","key":"6655_CR17","doi-asserted-by":"publisher","first-page":"9621","DOI":"10.1109\/TCYB.2021.3060736","volume":"52","author":"M Ran","year":"2021","unstructured":"Ran M, Li J, Xie L (2021) Reinforcement-learning-based disturbance rejection control for uncertain nonlinear systems. IEEE Trans Cybern 52(9):9621\u20139633. https:\/\/doi.org\/10.1109\/TCYB.2021.3060736","journal-title":"IEEE Trans Cybern"},{"issue":"9","key":"6655_CR18","doi-asserted-by":"publisher","first-page":"3570","DOI":"10.1109\/TNNLS.2019.2945019","volume":"31","author":"Y Hu","year":"2020","unstructured":"Hu Y, Wang W, Liu H, Liu L (2020) Reinforcement learning tracking control for robotic manipulator with kernel-based dynamic model. IEEE Trans Neural Netw Learn Syst 31(9):3570\u20133578. https:\/\/doi.org\/10.1109\/TNNLS.2019.2945019","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"issue":"2","key":"6655_CR19","doi-asserted-by":"publisher","first-page":"913","DOI":"10.1109\/TCYB.2022.3192871","volume":"53","author":"S Zhao","year":"2023","unstructured":"Zhao S, Wang J, Xu H, Wang B (2023) Composite observer-based optimal attitude-tracking control with reinforcement learning for hypersonic vehicles. IEEE Trans Cybern 53(2):913\u2013926. https:\/\/doi.org\/10.1109\/TCYB.2022.3192871","journal-title":"IEEE Trans Cybern"},{"issue":"11","key":"6655_CR20","doi-asserted-by":"publisher","first-page":"4423","DOI":"10.1109\/TAC.2019.2905215","volume":"64","author":"C Chen","year":"2019","unstructured":"Chen C, Modares H, Xie K, Lewis FL, Wan Y, Xie S (2019) Reinforcement learning-based adaptive optimal exponential tracking control of linear systems with unknown dynamics. IEEE Trans Autom Control 64(11):4423\u20134438. https:\/\/doi.org\/10.1109\/TAC.2019.2905215","journal-title":"IEEE Trans Autom Control"},{"issue":"7","key":"6655_CR21","doi-asserted-by":"publisher","first-page":"4844","DOI":"10.1002\/rnc.7236","volume":"34","author":"X Chen","year":"2024","unstructured":"Chen X, Sun W, Gao X, Li Y (2024) Reinforcement learning-based event-triggered optimal control for unknown nonlinear systems with input delay. Int J Robust Nonlinear Control 34(7):4844\u20134863. https:\/\/doi.org\/10.1002\/rnc.7236","journal-title":"Int J Robust Nonlinear Control"},{"key":"6655_CR22","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIM.2022.3227558","volume":"72","author":"FH Panahi","year":"2023","unstructured":"Panahi FH, Panahi FH, Ohtsuki T (2023) A reinforcement learning-based fire warning and suppression system using unmanned aerial vehicles. IEEE Trans Instrum Meas 72:1\u201316. https:\/\/doi.org\/10.1109\/TIM.2022.3227558","journal-title":"IEEE Trans Instrum Meas"},{"key":"6655_CR23","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2023.106785","volume":"126","author":"IML Pataro","year":"2023","unstructured":"Pataro IML, Cunha R, Gil JD, Guzm\u00e1n JL, Berenguel M, Lemos JM (2023) Optimal model-free adaptive control based on reinforcement Q-learning for solar thermal collector fields. Eng Appl Artif Intell 126:106785. https:\/\/doi.org\/10.1016\/j.engappai.2023.106785","journal-title":"Eng Appl Artif Intell"},{"issue":"6","key":"6655_CR24","doi-asserted-by":"publisher","first-page":"4229","DOI":"10.1109\/TII.2020.2990397","volume":"17","author":"M Dabbaghjamanesh","year":"2021","unstructured":"Dabbaghjamanesh M, Moeini A, Kavousi-Fard A (2021) Reinforcement learning-based load forecasting of electric vehicle charging station using Q-learning technique. IEEE Trans Ind Informat 17(6):4229\u20134237. https:\/\/doi.org\/10.1109\/TII.2020.2990397","journal-title":"IEEE Trans Ind Informat"},{"issue":"6","key":"6655_CR25","doi-asserted-by":"publisher","first-page":"877","DOI":"10.1109\/TCAD.2015.2481867","volume":"35","author":"RA Shafik","year":"2016","unstructured":"Shafik RA, Yang S, Das A, Maeda-Nunez LA, Merrett GV, Al-Hashimi BM (2016) Learning transfer-based adaptive energy minimization in embedded systems. IEEE Trans Comput-Aided Design Integr Circuits Syst 35(6):877\u2013890. https:\/\/doi.org\/10.1109\/TCAD.2015.2481867","journal-title":"IEEE Trans Comput-Aided Design Integr Circuits Syst"},{"key":"6655_CR26","unstructured":"Van Hasselt H (2010) Double Q-learning. Proc Adv Neural In Process Syst"},{"key":"6655_CR27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295","author":"H Van Hasselt","year":"2016","unstructured":"Van Hasselt H, Guez A, Silver D (2016) Deep reinforcement learning with double Q-learning. Proc AAAI Conf on Artif Intel. https:\/\/doi.org\/10.1609\/aaai.v30i1.10295","journal-title":"Proc AAAI Conf on Artif Intel"},{"key":"6655_CR28","doi-asserted-by":"publisher","first-page":"292","DOI":"10.1016\/j.eswa.2019.06.066","volume":"137","author":"I Carlucho","year":"2019","unstructured":"Carlucho I, De Paula M, Acosta GG (2019) Double Q-PID algorithm for mobile robot control. Expert Syst Appl 137:292\u2013307. https:\/\/doi.org\/10.1016\/j.eswa.2019.06.066","journal-title":"Expert Syst Appl"},{"key":"6655_CR29","doi-asserted-by":"publisher","DOI":"10.1016\/j.ymssp.2019.106374","volume":"139","author":"L Sun","year":"2020","unstructured":"Sun L, Liu Y (2020) Extended state observer augmented finite-time trajectory tracking control of uncertain mechanical systems. Mech Syst Signal Process 139:106374. https:\/\/doi.org\/10.1016\/j.ymssp.2019.106374","journal-title":"Mech Syst Signal Process"},{"key":"6655_CR30","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2025.3526321","author":"H Chen","year":"2025","unstructured":"Chen H, Zong G, Shen M, Gao F (2025) Finite-time resilient control of networked markov switched nonlinear systems: A relaxed design. IEEE Trans Syst Man Cybern. https:\/\/doi.org\/10.1109\/TSMC.2025.3526321","journal-title":"IEEE Trans Syst Man Cybern"},{"issue":"4","key":"6655_CR31","doi-asserted-by":"publisher","first-page":"3894","DOI":"10.1109\/TIE.2023.3277090","volume":"71","author":"T Xie","year":"2024","unstructured":"Xie T, Xian B, Gu X, Hu J, Liu M (2024) Disturbance observer-based fixed-time tracking control for a tilt trirotor unmanned aerial vehicle. IEEE Trans Ind Electron 71(4):3894\u20133903. https:\/\/doi.org\/10.1109\/TIE.2023.3277090","journal-title":"IEEE Trans Ind Electron"},{"issue":"3","key":"6655_CR32","doi-asserted-by":"publisher","first-page":"6139","DOI":"10.1109\/LRA.2022.3164448","volume":"7","author":"R Xi","year":"2022","unstructured":"Xi R, Xiao X, Ma T, Yang Z (2022) Adaptive sliding mode disturbance observer based robust control for robot manipulators towards assembly assistance. IEEE Robot Automat Lett 7(3):6139\u20136146. https:\/\/doi.org\/10.1109\/LRA.2022.3164448","journal-title":"IEEE Robot Automat Lett"},{"key":"6655_CR33","volume-title":"Modelling and control of robot manipulators","author":"L Sciavicco","year":"2012","unstructured":"Sciavicco L, Siciliano B (2012) Modelling and control of robot manipulators. Springer, Berlin, Germany"},{"key":"6655_CR34","doi-asserted-by":"publisher","DOI":"10.1016\/j.conengprac.2020.104670","volume":"106","author":"D Shi","year":"2021","unstructured":"Shi D, Zhang J, Sun Z, Shen G, Xia Y (2021) Composite trajectory tracking control for robot manipulator with active disturbance rejection. Control Eng Pract 106:104670. https:\/\/doi.org\/10.1016\/j.conengprac.2020.104670","journal-title":"Control Eng Pract"},{"issue":"5","key":"6655_CR35","doi-asserted-by":"publisher","first-page":"3275","DOI":"10.1109\/TMECH.2021.3107150","volume":"27","author":"P Yang","year":"2022","unstructured":"Yang P, Su Y (2022) Proximate fixed-time prescribed performance tracking control of uncertain robot manipulators. IEEE\/ASME Trans Mechatronics 27(5):3275\u20133285. https:\/\/doi.org\/10.1109\/TMECH.2021.3107150","journal-title":"IEEE\/ASME Trans Mechatronics"},{"key":"6655_CR36","volume-title":"Reinforcement learning: An introduction","author":"RS Sutton","year":"2018","unstructured":"Sutton RS, Barto AG (2018) Reinforcement learning: An introduction, 2nd edn. MIT Press, London","edition":"2"},{"key":"6655_CR37","doi-asserted-by":"publisher","DOI":"10.1016\/j.ymssp.2019.106379","volume":"135","author":"Y Su","year":"2020","unstructured":"Su Y, Zheng C, Mercorelli P (2020) Robust approximate fixed-time tracking control for uncertain robot manipulators. Mech Syst Signal Process 135:106379. https:\/\/doi.org\/10.1016\/j.ymssp.2019.106379","journal-title":"Mech Syst Signal Process"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-025-06655-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-025-06655-3\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-025-06655-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T13:38:41Z","timestamp":1758289121000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-025-06655-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,25]]},"references-count":37,"journal-issue":{"issue":"11","published-print":{"date-parts":[[2025,7]]}},"alternative-id":["6655"],"URL":"https:\/\/doi.org\/10.1007\/s10489-025-06655-3","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,6,25]]},"assertion":[{"value":"13 May 2025","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 June 2025","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they do not have any financial or nonfinancial conflict of interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of interest"}}],"article-number":"818"}}