{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,23]],"date-time":"2026-01-23T09:01:33Z","timestamp":1769158893249,"version":"3.49.0"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,10,24]]},"DOI":"10.1109\/iros45743.2020.9341390","type":"proceedings-article","created":{"date-parts":[[2021,2,13]],"date-time":"2021-02-13T02:26:48Z","timestamp":1613183208000},"page":"9080-9087","source":"Crossref","is-referenced-by-count":12,"title":["Sample-Efficient Learning for Industrial Assembly using Qgraph-bounded DDPG"],"prefix":"10.1109","author":[{"given":"Sabrina","family":"Hoppe","sequence":"first","affiliation":[]},{"given":"Markus","family":"Giftthaler","sequence":"additional","affiliation":[]},{"given":"Robert","family":"Krug","sequence":"additional","affiliation":[]},{"given":"Marc","family":"Toussaint","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2019.2928212"},{"key":"ref32","article-title":"Parameter space noise for exploration","author":"plappert","year":"2017"},{"key":"ref31","article-title":"TreeQN and ATreec: Differentiable tree planning for deep reinforcement learning","author":"farquhar","year":"0"},{"key":"ref30","article-title":"Explicit recall for efficient exploration","author":"dong","year":"2018"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/SIMPAR.2018.8376281"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2019.2891177"},{"key":"ref35","article-title":"Pinocchio: fast forward and inverse dynamics for poly-articulated systems","author":"carpentier","year":"2019"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/56.20440"},{"key":"ref10","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v32i1.11694","article-title":"Deep reinforcement learning that matters","author":"henderson","year":"2018","journal-title":"Thirty-Second AAAI Conference on Artificial Intelligence"},{"key":"ref11","article-title":"Towards characterizing divergence in deep q-learning","author":"achiam","year":"2019"},{"key":"ref12","article-title":"Qgraph-bounded q-learning: Stabilizing model-free off-policy deep reinforcement learning","author":"hoppe","year":"2020"},{"key":"ref13","article-title":"Compensating device for a handling unit and handling unit comprising the compensating device","author":"rueb","year":"2018","journal-title":"Patent"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-94-007-2069-5_83"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.1995.525545"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/100.591646"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1023\/A:1022698606139"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1163\/156855305323383767"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1177\/0278364917710318"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460995"},{"key":"ref4","article-title":"When to trust your model: Model-based policy optimization","author":"janner","year":"2019"},{"key":"ref27","article-title":"Deep reinforcement learning for industrial insertion tasks with visual inputs and natural rewards","author":"schoettler","year":"2019"},{"key":"ref3","article-title":"Playing atari with deep reinforcement learning","author":"mnih","year":"2013"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aau5872"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202244"},{"key":"ref5","article-title":"Soft actor-critic algorithms and applications","author":"haarnoja","year":"2018"},{"key":"ref8","article-title":"Deep reinforcement learning and the deadly triad","author":"van hasselt","year":"2018"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref2","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-377-6.50013-X"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793542"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1177\/0278364913495721"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460696"},{"key":"ref21","first-page":"1334","article-title":"End-to-end training of deep visuomotor policies","volume":"17","author":"levine","year":"2016","journal-title":"The Journal of Machine Learning Research"},{"key":"ref24","article-title":"Combining learned and analytical models for predicting action effects","author":"kloss","year":"2017"},{"key":"ref23","article-title":"Learning data-efficient rigid-body contact models: Case study of planar impact","author":"fazeli","year":"2017"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794127"},{"key":"ref25","article-title":"Residual policy learning","author":"silver","year":"2018"}],"event":{"name":"2020 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","location":"Las Vegas, NV, USA","start":{"date-parts":[[2020,10,24]]},"end":{"date-parts":[[2021,1,24]]}},"container-title":["2020 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9340668\/9340635\/09341390.pdf?arnumber=9341390","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,17]],"date-time":"2022-12-17T04:15:45Z","timestamp":1671250545000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9341390\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,10,24]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/iros45743.2020.9341390","relation":{},"subject":[],"published":{"date-parts":[[2020,10,24]]}}}