{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T15:31:23Z","timestamp":1730302283798,"version":"3.28.0"},"reference-count":15,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,6,1]],"date-time":"2019-06-01T00:00:00Z","timestamp":1559347200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,6,1]],"date-time":"2019-06-01T00:00:00Z","timestamp":1559347200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,6,1]],"date-time":"2019-06-01T00:00:00Z","timestamp":1559347200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,6]]},"DOI":"10.1109\/urai.2019.8768564","type":"proceedings-article","created":{"date-parts":[[2019,7,26]],"date-time":"2019-07-26T00:16:33Z","timestamp":1564100193000},"page":"228-235","source":"Crossref","is-referenced-by-count":4,"title":["Effects of Hyper-Parameters for Deep Reinforcement Learning in Robotic Motion Mimicry: A Preliminary Study"],"prefix":"10.1109","author":[{"given":"Taewoo","family":"Kim","sequence":"first","affiliation":[]},{"given":"Joo-Haeng","family":"Lee","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8206046"},{"article-title":"Emergence of locomotion behaviours in rich environments","year":"2017","author":"heess","key":"ref11"},{"key":"ref12","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2013.6696520"},{"article-title":"Proximal policy optimization algorithms","year":"2017","author":"schulman","key":"ref14"},{"key":"ref15","volume":"135","author":"sutton","year":"1998","journal-title":"Introduction to Reinforcement Learning"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8593432"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8594452"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8461249"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8462901"},{"article-title":"Transferring end-to-end visuomotor control from simulation to real world for a multi-stage task","year":"2017","author":"james","key":"ref8"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3197517.3201311"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICHR.2008.4755984"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511489808"},{"article-title":"Learning dexterous in-hand manipulation","year":"2018","author":"andrychowicz","key":"ref9"}],"event":{"name":"2019 16th International Conference on Ubiquitous Robots (UR)","start":{"date-parts":[[2019,6,24]]},"location":"Jeju, Korea (South)","end":{"date-parts":[[2019,6,27]]}},"container-title":["2019 16th International Conference on Ubiquitous Robots (UR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8765202\/8768488\/08768564.pdf?arnumber=8768564","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,19]],"date-time":"2022-07-19T20:20:04Z","timestamp":1658262004000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8768564\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,6]]},"references-count":15,"URL":"https:\/\/doi.org\/10.1109\/urai.2019.8768564","relation":{},"subject":[],"published":{"date-parts":[[2019,6]]}}}