{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T03:47:23Z","timestamp":1769917643720,"version":"3.49.0"},"reference-count":31,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,7,12]],"date-time":"2021-07-12T00:00:00Z","timestamp":1626048000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,7,12]],"date-time":"2021-07-12T00:00:00Z","timestamp":1626048000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,7,12]],"date-time":"2021-07-12T00:00:00Z","timestamp":1626048000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,7,12]]},"DOI":"10.1109\/aim46487.2021.9517356","type":"proceedings-article","created":{"date-parts":[[2021,8,24]],"date-time":"2021-08-24T20:58:48Z","timestamp":1629838728000},"page":"1045-1051","source":"Crossref","is-referenced-by-count":12,"title":["Towards Real-World Force-Sensitive Robotic Assembly through Deep Reinforcement Learning in Simulations"],"prefix":"10.1109","author":[{"given":"Marius","family":"Hebecker","sequence":"first","affiliation":[]},{"given":"Jens","family":"Lambrecht","sequence":"additional","affiliation":[]},{"given":"Markus","family":"Schmitz","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref31","article-title":"Openai gym","author":"brockman","year":"2016"},{"key":"ref30","year":"2020","journal-title":"Universal robots ros driver"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1177\/0278364911402527"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2019.2959445"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9341617"},{"key":"ref13","doi-asserted-by":"crossref","first-page":"13","DOI":"10.1109\/37.257890","article-title":"Acquiring robot skills via reinforcement learning","volume":"14","author":"gullapalli","year":"1994","journal-title":"IEEE Control Systems Magazine"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202244"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1108\/IR-07-2014-0363"},{"key":"ref18","first-page":"261","author":"schaal","year":"2006","journal-title":"Dynamic movement primitives - a framework for motor control in humans and humanoid robotics"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8967523"},{"key":"ref28","author":"liang","year":"2017","journal-title":"Ray rllib A composable and scalable reinforcement learning library"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref27","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017"},{"key":"ref3","author":"loh","year":"2019","journal-title":"Activating agile product-life-cycle management in automotive"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/MMAR.2013.6669888"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref5","year":"2019","journal-title":"Solving Rubik&#x2019;s Cube with a Robot Hand"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2002.1014236"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CASE48305.2020.9216798"},{"key":"ref2","author":"teulieres","year":"2019","journal-title":"Industrial robotics Insights into the sector&#x2019;s future growth dynamics"},{"key":"ref9","author":"dulac-arnold","year":"2020","journal-title":"An empirical investigation of the challenges of real-world reinforcement learning"},{"key":"ref1","article-title":"Executive Summary World Robotics 2019 Industrial Robots","year":"2019","journal-title":"Tech Rep"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460528"},{"key":"ref22","author":"lillicrap","year":"2015","journal-title":"Continuous control with deep reinforcement learning"},{"key":"ref21","author":"heess","year":"2015","journal-title":"Memory-based control with recurrent neural networks"},{"key":"ref24","author":"kaspar","year":"2020","journal-title":"Sim2real transfer for reinforcement learning without dynamics randomization"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.3390\/app10196923"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/JRA.1987.1087068"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1162\/106365601750190398"}],"event":{"name":"2021 IEEE\/ASME International Conference on Advanced Intelligent Mechatronics (AIM)","location":"Delft, Netherlands","start":{"date-parts":[[2021,7,12]]},"end":{"date-parts":[[2021,7,16]]}},"container-title":["2021 IEEE\/ASME International Conference on Advanced Intelligent Mechatronics (AIM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9517313\/9517081\/09517356.pdf?arnumber=9517356","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T15:45:08Z","timestamp":1652197508000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9517356\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,7,12]]},"references-count":31,"URL":"https:\/\/doi.org\/10.1109\/aim46487.2021.9517356","relation":{},"subject":[],"published":{"date-parts":[[2021,7,12]]}}}