{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T09:08:16Z","timestamp":1776157696159,"version":"3.50.1"},"reference-count":73,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62273343"],"award-info":[{"award-number":["62273343"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Robotics and Computer-Integrated Manufacturing"],"published-print":{"date-parts":[[2026,8]]},"DOI":"10.1016\/j.rcim.2026.103259","type":"journal-article","created":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T11:16:54Z","timestamp":1770895014000},"page":"103259","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Robot assembly using variable admittance control with reinforcement learning from demonstrations in a constrained region"],"prefix":"10.1016","volume":"100","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3865-0493","authenticated-orcid":false,"given":"Jianhua","family":"Su","sequence":"first","affiliation":[]},{"given":"Jinpo","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Liancheng","family":"Shen","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.rcim.2026.103259_bib0001","doi-asserted-by":"crossref","first-page":"552","DOI":"10.1080\/01691864.2015.1130172","article-title":"Guidance algorithm for complex-shape peg-in-hole strategy based on geometrical information and force control","author":"Song","year":"2016","journal-title":"Adv. Robot."},{"key":"10.1016\/j.rcim.2026.103259_bib0002","first-page":"571","article-title":"Interpretation of force and moment signals for compliant peg-in-hole assembly","volume":"1","author":"Newman","year":"2001","journal-title":"IEEE Int. Conf. Robot. Autom."},{"key":"10.1016\/j.rcim.2026.103259_bib0003","series-title":"IEEE\/RSJ Int. Conf. on Intelligent Robots and Systems","first-page":"6044","article-title":"Robust, compliant assembly with elastic parts and model uncertainty","author":"Wirnshofer","year":"2019"},{"key":"10.1016\/j.rcim.2026.103259_bib0004","series-title":"IEEE\/RSJ Int. Conf. on Intelligent Robots and Systems","first-page":"2062","article-title":"Deep reinforcement learning for robotic assembly of mixed deformable and rigid objects","author":"Luo","year":"2018"},{"key":"10.1016\/j.rcim.2026.103259_bib0005","series-title":"IEEE\/RSJ Int. Conf. on Intelligent Robots and Systems","first-page":"819","article-title":"Deep reinforcement learning for high precision assembly tasks","author":"Inoue","year":"2017"},{"issue":"2","key":"10.1016\/j.rcim.2026.103259_bib0006","doi-asserted-by":"crossref","first-page":"2225","DOI":"10.1109\/LRA.2021.3061374","article-title":"Learning variable impedance control via inverse reinforcement learning for force-related tasks","volume":"6","author":"Zhang","year":"2021","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"3","key":"10.1016\/j.rcim.2026.103259_bib0007","doi-asserted-by":"crossref","first-page":"1658","DOI":"10.1109\/TII.2018.2868859","article-title":"Feedback deep deterministic policy gradient with fuzzy reward for robotic multiple peg-in-hole assembly tasks","volume":"15","author":"Xu","year":"2019","journal-title":"IEEE Trans. Ind. Inform."},{"key":"10.1016\/j.rcim.2026.103259_bib0008","series-title":"IEEE\/RSJ Int. Conf. on Intelligent Robots and Systems","first-page":"5548","article-title":"Deep reinforcement learning for industrial insertion tasks with visual inputs and natural rewards","author":"Schoettler","year":"2020"},{"issue":"1","key":"10.1016\/j.rcim.2026.103259_bib0009","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1109\/LRA.2020.3028529","article-title":"Stability-guaranteed reinforcement learning for contact-rich manipulation","volume":"6","author":"Khader","year":"2021","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"4","key":"10.1016\/j.rcim.2026.103259_bib0010","doi-asserted-by":"crossref","first-page":"5709","DOI":"10.1109\/LRA.2020.3010739","article-title":"Learning force control for contact-rich manipulation tasks with rigid position-controlled robots","volume":"5","author":"Beltran-Hernandez","year":"2020","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"19","key":"10.1016\/j.rcim.2026.103259_bib0011","doi-asserted-by":"crossref","first-page":"6923","DOI":"10.3390\/app10196923","article-title":"Variable compliance control for robotic peg-in-hole assembly: a deep-reinforcement-learning approach","volume":"10","author":"Beltran-Hernandez","year":"2020","journal-title":"Appl. Sci."},{"issue":"7","key":"10.1016\/j.rcim.2026.103259_bib0012","doi-asserted-by":"crossref","first-page":"820","DOI":"10.1177\/0278364911402527","article-title":"Learning variable impedance control","volume":"30","author":"Buchli","year":"2011","journal-title":"Int. J. Robot. Res."},{"issue":"4","key":"10.1016\/j.rcim.2026.103259_bib0013","doi-asserted-by":"crossref","first-page":"6129","DOI":"10.1109\/LRA.2020.3011379","article-title":"Learning variable impedance control for contact sensitive tasks","volume":"5","author":"Bogdanovic","year":"2020","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.rcim.2026.103259_bib0014","doi-asserted-by":"crossref","unstructured":"A.R. Correia and L. Alexandre, L. \u201cA survey of demonstration learning,\u201d ArXiv, abs\/2303.11191, 2023.","DOI":"10.2139\/ssrn.4390650"},{"key":"10.1016\/j.rcim.2026.103259_bib0015","first-page":"451","article-title":"Modeling of the peg-in-hole task based on impedance parameters and HMM","volume":"1","author":"Itabashi","year":"1997","journal-title":"IEEE\/RSJ Int. Conf. Intell. Robot Syst."},{"key":"10.1016\/j.rcim.2026.103259_bib0016","doi-asserted-by":"crossref","DOI":"10.1016\/j.rcim.2020.101996","article-title":"A peg-in-hole robot assembly system based on Gauss mixture model","volume":"67","author":"Song","year":"2021","journal-title":"Robot Comput. Integr. Manuf."},{"issue":"2","key":"10.1016\/j.rcim.2026.103259_bib0017","doi-asserted-by":"crossref","first-page":"906","DOI":"10.1109\/TRO.2021.3087317","article-title":"Ergodic exploration using tensor train: applications in insertion tasks","volume":"38","author":"Shetty","year":"2022","journal-title":"IEEE Trans. Robot."},{"issue":"2","key":"10.1016\/j.rcim.2026.103259_bib0018","doi-asserted-by":"crossref","first-page":"3349","DOI":"10.1109\/LRA.2022.3146949","article-title":"Contact state estimation for peg-in-hole assembly using Gaussian Mixture Model","volume":"7","author":"Lee","year":"2022","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.rcim.2026.103259_bib0019","doi-asserted-by":"crossref","first-page":"199","DOI":"10.1007\/s10514-015-9435-2","article-title":"Adaptation of manipulation skills in physical contact with the environment to reference force profiles","volume":"39","author":"Abu-Dakka","year":"2015","journal-title":"Auton. Robot"},{"issue":"3","key":"10.1016\/j.rcim.2026.103259_bib0020","doi-asserted-by":"crossref","first-page":"513","DOI":"10.1109\/TRO.2016.2540623","article-title":"Learning physical collaborative robot behaviors from human demonstrations","volume":"32","author":"Rozo","year":"2016","journal-title":"IEEE Trans. Robot."},{"issue":"5","key":"10.1016\/j.rcim.2026.103259_bib0021","doi-asserted-by":"crossref","first-page":"3132","DOI":"10.1109\/TMECH.2021.3110825","article-title":"Learning to assemble noncylindrical parts using trajectory learning and force tracking","volume":"27","author":"Su","year":"2022","journal-title":"IEEE\/ASME Trans. Mechatron."},{"issue":"35","key":"10.1016\/j.rcim.2026.103259_bib0022","doi-asserted-by":"crossref","first-page":"1798","DOI":"10.1109\/TPAMI.2013.50","article-title":"Representation learning: a review and new perspectives","volume":"35","author":"Bengio","year":"2013","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10.1016\/j.rcim.2026.103259_bib0023","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2022.105753","article-title":"Reinforcement learning from expert demonstrations with application to redundant robot control","volume":"119","author":"Ram\u00edrez","year":"2023","journal-title":"Eng. Appl. Artif. Intell."},{"key":"10.1016\/j.rcim.2026.103259_bib0024","series-title":"1984 American Control Conference","first-page":"304","article-title":"Impedance control: an approach to manipulation","author":"Hogan","year":"1984"},{"key":"10.1016\/j.rcim.2026.103259_bib0025","series-title":"IEEE Int. Conf. on Robotics and Automation","first-page":"499","article-title":"Force tracking in impedance control","author":"Seraji","year":"1993"},{"key":"10.1016\/j.rcim.2026.103259_bib0026","doi-asserted-by":"crossref","first-page":"1","DOI":"10.3389\/frobt.2020.590681","article-title":"Variable impedance control and learning - a review","volume":"7","author":"Abu-Dakka","year":"2020","journal-title":"Front. Robot. AI"},{"key":"10.1016\/j.rcim.2026.103259_bib0027","author":"Lillicrap"},{"key":"10.1016\/j.rcim.2026.103259_bib0028","author":"Schulman"},{"issue":"5","key":"10.1016\/j.rcim.2026.103259_bib0029","doi-asserted-by":"crossref","first-page":"2311","DOI":"10.1109\/TMECH.2014.2375638","article-title":"The concept of attractive region in environment and its application in high-precision tasks with low-precision systems","volume":"20","author":"Qiao","year":"2015","journal-title":"IEEE\/ASME Trans. Mechatron."},{"issue":"3","key":"10.1016\/j.rcim.2026.103259_bib0030","doi-asserted-by":"crossref","first-page":"6661","DOI":"10.1109\/LRA.2022.3176718","article-title":"Uncertainty-driven spiral trajectory for robotic peg-in-hole assembly","volume":"7","author":"Kang","year":"2022","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.rcim.2026.103259_bib0031","series-title":"IEEE\/RSJ Int. Conf. on Intelligent Robots and Systems","first-page":"5792","article-title":"Quickly inserting pegs into uncertain holes using multi-view images and deep network trained on synthetic data","author":"Triyonoputro","year":"2019"},{"key":"10.1016\/j.rcim.2026.103259_bib0032","first-page":"1465","article-title":"Search strategies for peg-in-hole assemblies with position uncertainty","volume":"3","author":"Chhatpar","year":"2001","journal-title":"IEEE\/RSJ Int. Conf. Intell. Robots Syst."},{"issue":"3","key":"10.1016\/j.rcim.2026.103259_bib0033","doi-asserted-by":"crossref","first-page":"4447","DOI":"10.1109\/LRA.2020.3000428","article-title":"Compliant peg-in-hole assembly using partial spiral force trajectory with tilted peg posture","volume":"5","author":"Park","year":"2020","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"8","key":"10.1016\/j.rcim.2026.103259_bib0034","doi-asserted-by":"crossref","first-page":"8157","DOI":"10.1109\/TIE.2021.3108710","article-title":"Robot precision assembly combining with passive and active compliant motions","volume":"69","author":"Su","year":"2022","journal-title":"IEEE Trans. Ind. Electron."},{"key":"10.1016\/j.rcim.2026.103259_bib0035","first-page":"1547","article-title":"Learning attractor landscapes for learning motor primitives","volume":"15","author":"Ijspeert","year":"2010","journal-title":"Adv. Neural. Inf. Process Syst."},{"issue":"2","key":"10.1016\/j.rcim.2026.103259_bib0036","doi-asserted-by":"crossref","first-page":"328","DOI":"10.1162\/NECO_a_00393","article-title":"Dynamical movement primitives: learning attractor models for motor behaviors","volume":"25","author":"Ijspeert","year":"2013","journal-title":"Neural Comput."},{"issue":"4","key":"10.1016\/j.rcim.2026.103259_bib0037","doi-asserted-by":"crossref","first-page":"1418","DOI":"10.1109\/TCST.2019.2913129","article-title":"Dynamic trajectory generation and a robust controller to intercept a moving ball in a game setting","volume":"28","author":"Prakash","year":"2020","journal-title":"IEEE Trans. Control Syst. Technol."},{"issue":"7","key":"10.1016\/j.rcim.2026.103259_bib0038","doi-asserted-by":"crossref","first-page":"1299","DOI":"10.1017\/S0263574720001186","article-title":"Motion adaptation based on learning the manifold of task and dynamic movement primitive parameters","volume":"39","author":"Cohen","year":"2021","journal-title":"Robotica"},{"key":"10.1016\/j.rcim.2026.103259_bib0039","doi-asserted-by":"crossref","DOI":"10.1016\/j.rcim.2019.101863","article-title":"Reinforcement learning based on movement primitives for contact tasks","volume":"62","author":"Kim","year":"2020","journal-title":"Robot Comput. Integr. Manuf."},{"key":"10.1016\/j.rcim.2026.103259_bib0040","series-title":"IEEE Int. Conf. on Robotics and Automation","first-page":"6023","article-title":"Residual reinforcement learning for robot control","author":"Johannink","year":"2019"},{"issue":"2","key":"10.1016\/j.rcim.2026.103259_bib0041","doi-asserted-by":"crossref","first-page":"4488","DOI":"10.1109\/LRA.2022.3150024","article-title":"Residual learning from demonstration: adapting DMPs for contact-rich manipulation","volume":"7","author":"Davchev","year":"2022","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.rcim.2026.103259_bib0042","series-title":"Springer Handbook of Robotics","first-page":"35","article-title":"Dynamics, \u201cchapter 3: dynamics,\u201d","author":"Featherstone","year":"2016"},{"key":"10.1016\/j.rcim.2026.103259_bib0043","doi-asserted-by":"crossref","first-page":"200","DOI":"10.1108\/AA-09-2016-120","article-title":"Force control for a rigid dual peg-in-hole assembly","volume":"37","author":"Zhang","year":"2017","journal-title":"Assem. Autom."},{"issue":"5","key":"10.1016\/j.rcim.2026.103259_bib0044","doi-asserted-by":"crossref","first-page":"469","DOI":"10.1016\/j.robot.2008.10.024","article-title":"A survey of robot learning from demonstration","volume":"57","author":"Argall","year":"2009","journal-title":"Rob Aut. Syst"},{"issue":"4","key":"10.1016\/j.rcim.2026.103259_bib0045","doi-asserted-by":"crossref","first-page":"5581","DOI":"10.1109\/LRA.2020.3009076","article-title":"Robotic deep rolling with iterative learning motion and force control","volume":"5","author":"Chen","year":"2020","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"3","key":"10.1016\/j.rcim.2026.103259_bib0046","doi-asserted-by":"crossref","first-page":"1234","DOI":"10.1109\/TMECH.2022.3217048","article-title":"Robot imitation learning from image-only observation without real-world interaction","volume":"28","author":"Xu","year":"2023","journal-title":"IEEE\/ASME Trans. Mechatron."},{"key":"10.1016\/j.rcim.2026.103259_bib0047","doi-asserted-by":"crossref","first-page":"11166","DOI":"10.1109\/LRA.2022.3196122","article-title":"Learning category\u2013level generalizable object manipulation policy via generative adversarial self-imitation learning from demonstrations","volume":"7","author":"Shen","year":"2022","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.rcim.2026.103259_bib0048","doi-asserted-by":"crossref","first-page":"4307","DOI":"10.1007\/s10462-021-10108-x","article-title":"A survey of inverse reinforcement learning","volume":"55","author":"Adams","year":"2022","journal-title":"Artif. Intell. Rev."},{"issue":"6","key":"10.1016\/j.rcim.2026.103259_bib0049","doi-asserted-by":"crossref","first-page":"3461","DOI":"10.1109\/TSMC.2022.3225381","article-title":"An optimal iterative learning control approach for linear systems with nonuniform trial lengths under input constraints","volume":"53","author":"Zhuang","year":"2023","journal-title":"IEEE Trans. Syst. Man Cybern.: Syst."},{"key":"10.1016\/j.rcim.2026.103259_bib0050","series-title":"IEEE 10th Data Driven Control and Learning Systems Conference","first-page":"1064","article-title":"Adaptive iterative learning control for non-equal length tasks in the presence of initial errors","author":"Sun","year":"2021"},{"key":"10.1016\/j.rcim.2026.103259_bib0051","series-title":"IEEE\/ASME International Conference on Advanced Intelligent Mechatronics (AIM)","first-page":"1185","article-title":"Impedance adaptation by reinforcement learning with contact dynamic movement primitives","author":"Chang","year":"2022"},{"issue":"4","key":"10.1016\/j.rcim.2026.103259_bib0052","doi-asserted-by":"crossref","first-page":"10898","DOI":"10.1109\/LRA.2022.3191070","article-title":"Reinforcement learning of impedance policies for feg-in-hole tasks: role of asymmetric matrices","volume":"7","author":"Kozlovsky","year":"2022","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.rcim.2026.103259_bib0053","doi-asserted-by":"crossref","first-page":"126","DOI":"10.1115\/1.3139652","article-title":"Hybrid position\/force control of manipulators","volume":"105","author":"Raibert","year":"1981","journal-title":"ASME J. Dyn. Syst. Meas. Control"},{"key":"10.1016\/j.rcim.2026.103259_bib0054","series-title":"IEEE International Conference on Robotics and Automation","first-page":"554","article-title":"Unified impedance and admittance control","author":"Ott","year":"2010"},{"key":"10.1016\/j.rcim.2026.103259_bib0055","series-title":"Proceedings of 1995 IEEE International Conference on Robotics and Automation","first-page":"1919","article-title":"Peg-on-hole: a model based solution to peg and hole alignment","volume":"2","author":"Bruyninckx","year":"1995"},{"issue":"5","key":"10.1016\/j.rcim.2026.103259_bib0056","doi-asserted-by":"crossref","first-page":"2218","DOI":"10.1109\/TMECH.2017.2705180","article-title":"Condition and strategy analysis for assembly based on attractive region in environment","volume":"22","author":"Li","year":"2017","journal-title":"IEEE\/ASME Trans. Mechatron."},{"key":"10.1016\/j.rcim.2026.103259_bib0057","doi-asserted-by":"crossref","first-page":"54","DOI":"10.1016\/j.robot.2018.01.009","article-title":"Adaptive variable impedance control for dynamic contact force tracking in uncertain environment","volume":"102","author":"Duan","year":"2018","journal-title":"Rob Aut. Syst"},{"issue":"5","key":"10.1016\/j.rcim.2026.103259_bib0058","doi-asserted-by":"crossref","first-page":"1298","DOI":"10.1109\/TRO.2016.2593492","article-title":"Stability considerations for variable impedance control","volume":"32","author":"Kronander","year":"2016","journal-title":"IEEE Trans. Robot."},{"key":"10.1016\/j.rcim.2026.103259_bib0059","series-title":"Proceedings of the International Conference on Machine Learning","first-page":"1861","article-title":"Soft actor-critic: off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"Haarnoja","year":"2018"},{"issue":"1","key":"10.1016\/j.rcim.2026.103259_bib0060","doi-asserted-by":"crossref","first-page":"19","DOI":"10.1177\/027836498600500102","article-title":"Using back-projections for fine motion planning with uncertainty","volume":"5","author":"Erdmann","year":"1986","journal-title":"Int. J. Robot. Res."},{"issue":"8","key":"10.1016\/j.rcim.2026.103259_bib0061","doi-asserted-by":"crossref","first-page":"6299","DOI":"10.1109\/TIE.2017.2682002","article-title":"Compliance-based robotic peg-in-hole assembly strategy without force feedback","volume":"64","author":"Park","year":"2017","journal-title":"IEEE Trans. Ind. l Electron."},{"issue":"2","key":"10.1016\/j.rcim.2026.103259_bib0062","doi-asserted-by":"crossref","first-page":"409","DOI":"10.1109\/TSMC.2017.2759148","article-title":"A robust adaptive model reference impedance control of a robotic manipulator with actuator saturation","volume":"50","author":"Arefinia","year":"2020","journal-title":"IEEE Trans. Syst. Man Cybern.: Syst."},{"issue":"7","key":"10.1016\/j.rcim.2026.103259_bib0063","doi-asserted-by":"crossref","first-page":"4193","DOI":"10.1109\/TSMC.2019.2930582","article-title":"Stability-guaranteed variable impedance control of robots based on approximate dynamic inversion","volume":"51","author":"Sun","year":"2021","journal-title":"IEEE Trans. Syst. Man Cybern.: Syst."},{"issue":"12","key":"10.1016\/j.rcim.2026.103259_bib0064","doi-asserted-by":"crossref","first-page":"2487","DOI":"10.1109\/TSMC.2017.2767566","article-title":"UDE-based variable impedance control of uncertain robot systems","volume":"49","author":"Dong","year":"2019","journal-title":"IEEE Trans. Syst. Man Cybern.: Syst."},{"key":"10.1016\/j.rcim.2026.103259_bib0065","author":"Silver"},{"key":"10.1016\/j.rcim.2026.103259_bib0066","series-title":"Proceedings of the IAS Conference on Robot Learning","article-title":"Reinforcement learning of insertion tasks: a comparison between policy structures","volume":"24","author":"Mulder","year":"2024"},{"key":"10.1016\/j.rcim.2026.103259_bib0067","series-title":"2019 International Conference on Robotics and Automation (ICRA)","first-page":"6023","article-title":"Residual reinforcement learning for robot control","author":"Johannink","year":"2019"},{"key":"10.1016\/j.rcim.2026.103259_bib0068","series-title":"2020 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","first-page":"5548","article-title":"Deep reinforcement learning for industrial insertion tasks with visual inputs and natural rewards","author":"Schoettler","year":"2020"},{"key":"10.1016\/j.rcim.2026.103259_bib0069","doi-asserted-by":"crossref","first-page":"245","DOI":"10.1016\/j.jmsy.2023.11.008","article-title":"A residual reinforcement learning method for robotic assembly using visual and force information","volume":"72","author":"Zhang","year":"2024","journal-title":"J. Manuf. Syst."},{"key":"10.1016\/j.rcim.2026.103259_bib0070","doi-asserted-by":"crossref","first-page":"4488","DOI":"10.1109\/LRA.2022.3150024","article-title":"Residual learning from demonstration: adapting dmps for contact-rich manipulation","volume":"7","author":"Davchev","year":"2022","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.rcim.2026.103259_bib0071","series-title":"IEEE\/ASME Transactions on Mechatronics","first-page":"122","article-title":"A systematic design procedure of force controllers for industrial robots","volume":"5","author":"Natale","year":"2000"},{"key":"10.1016\/j.rcim.2026.103259_bib0072","doi-asserted-by":"crossref","DOI":"10.1016\/j.rcim.2024.102896","article-title":"Variable impedance control on contact-rich manipulation of a collaborative industrial mobile manipulator: an imitation learning approach","volume":"92","author":"Zhou","year":"2025","journal-title":"Robot Comput. Integr. Manuf."},{"issue":"6","key":"10.1016\/j.rcim.2026.103259_bib0073","doi-asserted-by":"crossref","first-page":"730","DOI":"10.1108\/IR-07-2016-0186","article-title":"Study on dual peg-in-hole insertion using of constraints formed in the environment","volume":"44","author":"Su","year":"2017","journal-title":"Ind. Robot: Int. J."}],"container-title":["Robotics and Computer-Integrated Manufacturing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0736584526000384?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0736584526000384?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T08:16:19Z","timestamp":1776154579000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0736584526000384"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,8]]},"references-count":73,"alternative-id":["S0736584526000384"],"URL":"https:\/\/doi.org\/10.1016\/j.rcim.2026.103259","relation":{},"ISSN":["0736-5845"],"issn-type":[{"value":"0736-5845","type":"print"}],"subject":[],"published":{"date-parts":[[2026,8]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Robot assembly using variable admittance control with reinforcement learning from demonstrations in a constrained region","name":"articletitle","label":"Article Title"},{"value":"Robotics and Computer-Integrated Manufacturing","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.rcim.2026.103259","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier Ltd. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"103259"}}