{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,23]],"date-time":"2026-03-23T22:45:27Z","timestamp":1774305927650,"version":"3.50.1"},"reference-count":48,"publisher":"Tech Science Press","issue":"1","license":[{"start":{"date-parts":[[2025,3,30]],"date-time":"2025-03-30T00:00:00Z","timestamp":1743292800000},"content-version":"vor","delay-in-days":88,"URL":"https:\/\/doi.org\/10.32604\/TSP-CROSSMARKPOLICY"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["CMC"],"published-print":{"date-parts":[[2025]]},"DOI":"10.32604\/cmc.2025.059955","type":"journal-article","created":{"date-parts":[[2025,2,27]],"date-time":"2025-02-27T03:27:22Z","timestamp":1740626842000},"page":"1257-1273","update-policy":"https:\/\/doi.org\/10.32604\/tsp-crossmarkpolicy","source":"Crossref","is-referenced-by-count":1,"title":["A Low-Collision and Efficient Grasping Method for Manipulator Based on Safe Reinforcement Learning"],"prefix":"10.32604","volume":"83","author":[{"given":"Qinglei","family":"Zhang","sequence":"first","affiliation":[]},{"given":"Bai","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Jiyun","family":"Qin","sequence":"additional","affiliation":[]},{"given":"Jianguo","family":"Duan","sequence":"additional","affiliation":[]},{"given":"Ying","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"17807","published-online":{"date-parts":[[2025]]},"reference":[{"key":"ref1","doi-asserted-by":"crossref","first-page":"101360","DOI":"10.1016\/j.aei.2021.101360","article-title":"Deep reinforcement learning-based safe interaction for industrial human-robot collaboration using intrinsic reward function","volume":"49","author":"Liu","year":"2021","journal-title":"Adv Eng Inform"},{"key":"ref2","doi-asserted-by":"crossref","first-page":"102130","DOI":"10.1016\/j.rcim.2021.102130","article-title":"A visual path-following learning approach for industrial robots using DRL","volume":"71","author":"Maldonado-Ramirez","year":"2021","journal-title":"Robot Comput Integr Manuf"},{"key":"ref3","doi-asserted-by":"crossref","first-page":"105657","DOI":"10.1016\/j.ast.2019.105657","article-title":"Reinforcement learning in dual-arm trajectory planning for a free-floating space robot","volume":"98","author":"Wu","year":"2020","journal-title":"Aerosp Sci Technol"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"3273","DOI":"10.1109\/TSMC.2021.3064898","article-title":"Trajectory tracking for a dual-arm free-floating space robot with a class of general nonsingular predefined-time terminal sliding mode","volume":"52","author":"Liu","year":"2022","journal-title":"IEEE Trans Syst Man Cybern-Syst"},{"key":"ref5","doi-asserted-by":"crossref","first-page":"2193","DOI":"10.1109\/TMECH.2019.2932378","article-title":"Strong continuum manipulator for flexible endoscopic surgery","volume":"24","author":"Hwang","year":"2019","journal-title":"IEEE\/ASME Trans Mechatron"},{"key":"ref6","doi-asserted-by":"crossref","first-page":"1511","DOI":"10.1097\/JS9.0000000000000976","article-title":"Comparison of short-term outcomes of robotic-assisted radical colon cancer surgery using the kangduo surgical robotic system and the da vinci si robotic system: a prospective cohort study","volume":"110","author":"Liu","year":"2024","journal-title":"Int J Surg"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"4978","DOI":"10.1109\/LRA.2020.3004787","article-title":"Grasping in the wild: learning 6DoF closed-loop grasping from low-cost demonstrations","volume":"5","author":"Song","year":"2020","journal-title":"IEEE Robot Autom Lett"},{"key":"ref8","doi-asserted-by":"crossref","first-page":"105397","DOI":"10.1016\/j.compag.2020.105397","article-title":"An obstacle separation method for robotic picking of fruits in clusters","volume":"175","author":"Xiong","year":"2020","journal-title":"Comput Electron Agric"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"6899","DOI":"10.1109\/LRA.2021.3096239","article-title":"DDGC: generative deep dexterous grasping in clutter","volume":"6","author":"Lundell","year":"2021","journal-title":"IEEE Robot Autom Lett"},{"key":"ref10","doi-asserted-by":"crossref","first-page":"183","DOI":"10.1177\/0278364919859066","article-title":"Learning robust, real-time, reactive robotic grasping","volume":"39","author":"Morrison","year":"2020","journal-title":"Int J Rob Res"},{"key":"ref11","doi-asserted-by":"crossref","first-page":"2883","DOI":"10.1109\/LRA.2022.3143198","article-title":"Hierarchical policies for cluttered-scene grasping with latent plans","volume":"7","author":"Wang","year":"2022","journal-title":"IEEE Robot Autom Lett"},{"key":"ref12","doi-asserted-by":"crossref","first-page":"eaat8414","DOI":"10.1126\/science.aat8414","article-title":"Trends and challenges in robot manipulation","volume":"364","author":"Billard","year":"2019","journal-title":"Science"},{"key":"ref13","doi-asserted-by":"crossref","first-page":"105","DOI":"10.3390\/robotics10030105","article-title":"Reinforcement learning for pick and place operations in robotics: a survey","volume":"10","author":"Lobbezoo","year":"2021","journal-title":"Robotics"},{"key":"ref14","doi-asserted-by":"crossref","first-page":"90","DOI":"10.1177\/027836498600500106","article-title":"Real-time obstacle avoidance for manipulators and mobile robots","volume":"5","author":"Khatib","year":"1986","journal-title":"Int J Rob Res"},{"key":"ref15","doi-asserted-by":"crossref","first-page":"5440","DOI":"10.1109\/TNNLS.2023.3262109","article-title":"Adaptive hybrid optimization learning-based accurate motion planning of multi-joint arm","volume":"34","author":"Bai","year":"2023","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"ref16","doi-asserted-by":"crossref","first-page":"473","DOI":"10.1109\/TRO.2016.2539377","article-title":"Asymptotically near-optimal RRT for fast, high-quality motion planning","volume":"32","author":"Salzman","year":"2016","journal-title":"IEEE Trans Robot"},{"key":"ref17","doi-asserted-by":"crossref","first-page":"2035","DOI":"10.1109\/TMECH.2022.3175260","article-title":"Motion planning for closed-chain constraints based on probabilistic roadmap with improved connectivity","volume":"27","author":"Jang","year":"2022","journal-title":"IEEE\/ASME Trans Mechatron"},{"key":"ref18","first-page":"4303","article-title":"A path planning algorithm based on improved RRT sampling region","volume":"80","author":"Jiang","year":"2024","journal-title":"Comput Mater Contin"},{"key":"ref19","doi-asserted-by":"crossref","first-page":"eadf7843","DOI":"10.1126\/scirobotics.adf7843","article-title":"Motion planning around obstacles with convex optimization","volume":"8","author":"Marcucci","year":"2023","journal-title":"Sci Robot"},{"key":"ref20","doi-asserted-by":"crossref","first-page":"456","DOI":"10.1109\/TCST.2023.3331525","article-title":"Predictive control of cooperative robots sharing common workspace","volume":"32","author":"Tika","year":"2024","journal-title":"IEEE Trans Contr Syst Technol"},{"key":"ref21","doi-asserted-by":"crossref","first-page":"4643","DOI":"10.1109\/TMECH.2022.3160605","article-title":"Trajectory generation for multiprocess robotic tasks based on nested dual-memory deep deterministic policy gradient","volume":"27","author":"Ying","year":"2022","journal-title":"IEEE\/ASME Trans Mechatron"},{"key":"ref22","doi-asserted-by":"crossref","first-page":"240","DOI":"10.3390\/biomimetics8020240","article-title":"The task decomposition and dedicated reward-system-based reinforcement learning algorithm for pick-and-place","volume":"8","author":"Kim","year":"2023","journal-title":"Biomimetics"},{"key":"ref23","doi-asserted-by":"crossref","first-page":"2476","DOI":"10.1007\/s40815-022-01293-0","article-title":"Fuzzy deep deterministic policy gradient-based motion controller for humanoid robot","volume":"24","author":"Kuo","year":"2022","journal-title":"Int J Fuzzy Syst"},{"key":"ref24","doi-asserted-by":"crossref","first-page":"1151","DOI":"10.1109\/TRO.2022.3226108","article-title":"Grasping living objects with adversarial behaviors using inverse reinforcement learning","volume":"39","author":"Hu","year":"2023","journal-title":"IEEE Trans Robot"},{"key":"ref25","doi-asserted-by":"crossref","first-page":"106446","DOI":"10.1016\/j.ast.2020.106446","volume":"109","author":"Li","year":"2021","journal-title":"Aerosp Sci Technol"},{"key":"ref26","doi-asserted-by":"crossref","first-page":"2759","DOI":"10.1109\/TIE.2022.3172754","article-title":"Solving robotic manipulation with sparse reward reinforcement learning via graph-based diversity and proximity","volume":"70","author":"Bing","year":"2022","journal-title":"IEEE Trans Ind Electron"},{"key":"ref27","doi-asserted-by":"crossref","first-page":"4915","DOI":"10.1109\/LRA.2021.3070252","article-title":"Recovery rl: safe reinforcement learning with learned recovery zones","volume":"6","author":"Thananjeyan","year":"2021","journal-title":"IEEE Robot Autom Lett"},{"key":"ref28","doi-asserted-by":"crossref","first-page":"5435","DOI":"10.1109\/TNNLS.2021.3084685","article-title":"Safe reinforcement learning with stability guarantee for motion planning of autonomous vehicles","volume":"32","author":"Zhang","year":"2021","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"ref29","doi-asserted-by":"crossref","first-page":"103360","DOI":"10.1016\/j.engappai.2019.103360","article-title":"Teaching a humanoid robot to walk faster through safe reinforcement learning","volume":"88","author":"Garc\u00eda","year":"2020","journal-title":"Eng Appl Artif Intell"},{"key":"ref30","doi-asserted-by":"crossref","first-page":"1062","DOI":"10.1109\/TSTE.2022.3148236","article-title":"Deep reinforcement learning from demonstrations to assist service restoration in islanded microgrids","volume":"13","author":"Du","year":"2022","journal-title":"IEEE Trans Sustain Energy"},{"key":"ref31","first-page":"449","article-title":"Safety-constrained multi-agent reinforcement learning for power quality control in distributed renewable energy networks","volume":"79","author":"Zhao","year":"2024","journal-title":"Comput Mater Contin"},{"key":"ref32","doi-asserted-by":"crossref","first-page":"107980","DOI":"10.1016\/j.ast.2022.107980","article-title":"A learning system for motion planning of free-float dual-arm space manipulator towards non-cooperative object","volume":"131","author":"Wang","year":"2022","journal-title":"Aerosp Sci Technol"},{"key":"ref33","doi-asserted-by":"crossref","first-page":"5631","DOI":"10.1109\/LRA.2024.3396368","article-title":"SRL-VIC: a variable stiffness-based safe reinforcement learning for contact-rich robotic tasks","volume":"9","author":"Zhang","year":"2024","journal-title":"IEEE Robot Autom Lett"},{"key":"ref34","doi-asserted-by":"crossref","first-page":"2349","DOI":"10.1109\/TASE.2024.3378915","article-title":"Safe learning by constraint-aware policy optimization for robotic ultrasound imaging","volume":"22","author":"Duan","year":"2024","journal-title":"IEEE Trans Autom Sci Eng"},{"key":"ref35","doi-asserted-by":"crossref","first-page":"5709","DOI":"10.1109\/LRA.2020.3010739","article-title":"Learning force control for contact-rich manipulation tasks with rigid position-controlled robots","volume":"5","author":"Beltran-Hernandez","year":"2020","journal-title":"IEEE Robot Autom Lett"},{"key":"ref36","doi-asserted-by":"crossref","first-page":"110684","DOI":"10.1016\/j.automatica.2022.110684","article-title":"Safe exploration in model-based reinforcement learning using control barrier functions","volume":"147","author":"Cohen","year":"2023","journal-title":"Automatica"},{"key":"ref37","doi-asserted-by":"crossref","first-page":"11216","DOI":"10.1109\/TPAMI.2024.3457538","article-title":"A review of safe reinforcement learning: methods, theories, and applications","volume":"46","author":"Gu","year":"2024","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"ref38","series-title":"2018 IEEE International Conference on Robotics and Automation (ICRA)","first-page":"6236","article-title":"OptLayer\u2014practical constrained optimization for deep reinforcement learning in the real world","author":"Pham","year":"2018 May 21\u201325"},{"key":"ref39","series-title":"The 34th International Conference on Machine Learning","first-page":"22","article-title":"Constrained policy optimization","author":"Achiam","year":"2017 Aug 6\u201311"},{"key":"ref40","unstructured":"Schulman J. Trust region policy optimization. arXiv:150205477. 2015."},{"key":"ref41","doi-asserted-by":"crossref","first-page":"2223","DOI":"10.1109\/TNNLS.2020.3044196","article-title":"An off-policy trust region policy optimization method with monotonic improvement guarantee for deep reinforcement learning","volume":"33","author":"Meng","year":"2021","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"ref42","doi-asserted-by":"crossref","first-page":"4727","DOI":"10.1109\/TNNLS.2021.3059912","article-title":"Hierarchical reinforcement learning with universal policies for multistep robotic manipulation","volume":"33","author":"Yang","year":"2021","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"ref43","doi-asserted-by":"crossref","first-page":"64","DOI":"10.1016\/j.neucom.2022.05.006","article-title":"A deep reinforcement learning based method for real-time path planning and dynamic obstacle avoidance","volume":"497","author":"Chen","year":"2022","journal-title":"Neurocomputing"},{"key":"ref44","unstructured":"Schulman J, Wolski F, Dhariwal P, Radford A, Klimov O. Proximal policy optimization algorithms. arXiv:170706347. 2017."},{"key":"ref45","doi-asserted-by":"crossref","first-page":"750","DOI":"10.1016\/j.ins.2022.07.111","article-title":"Proximal policy optimization via enhanced exploration efficiency","volume":"609","author":"Zhang","year":"2022","journal-title":"Inf Sci"},{"key":"ref46","doi-asserted-by":"crossref","first-page":"1899","DOI":"10.1007\/s40747-021-00366-1","article-title":"Collision-free path planning for welding manipulator via hybrid algorithm of deep reinforcement learning and inverse kinematics","volume":"8","author":"Zhong","year":"2022","journal-title":"Complex Intell Syst"},{"key":"ref47","doi-asserted-by":"crossref","first-page":"1904","DOI":"10.1109\/TCST.2024.3377876","article-title":"Path-following control of unmanned underwater vehicle based on an improved TD3 deep reinforcement learning","volume":"32","author":"Fan","year":"2024","journal-title":"IEEE Trans Contr Syst Technol"},{"key":"ref48","doi-asserted-by":"crossref","first-page":"143","DOI":"10.1016\/j.aml.2018.05.008","article-title":"A remark on Samuelson\u2019s variational principle in economics","volume":"84","author":"Wu","year":"2018","journal-title":"Appl Math Lett"}],"container-title":["Computers, Materials &amp; Continua"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/cdn.techscience.cn\/files\/cmc\/2025\/TSP_CMC-83-1\/TSP_CMC_59955\/TSP_CMC_59955.pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T06:35:26Z","timestamp":1763102126000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.techscience.com\/cmc\/v83n1\/60085"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":48,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2025]]},"published-print":{"date-parts":[[2025]]}},"URL":"https:\/\/doi.org\/10.32604\/cmc.2025.059955","relation":{},"ISSN":["1546-2226"],"issn-type":[{"value":"1546-2226","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"2024-10-21","order":0,"name":"received","label":"Received","group":{"name":"publication_history","label":"Publication History"}},{"value":"2025-02-06","order":1,"name":"accepted","label":"Accepted","group":{"name":"publication_history","label":"Publication History"}},{"value":"2025-03-26","order":2,"name":"published","label":"Published Online","group":{"name":"publication_history","label":"Publication History"}}]}}