{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,6]],"date-time":"2024-09-06T05:03:35Z","timestamp":1725599015225},"reference-count":13,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,5,30]],"date-time":"2021-05-30T00:00:00Z","timestamp":1622332800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,5,30]],"date-time":"2021-05-30T00:00:00Z","timestamp":1622332800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,5,30]]},"DOI":"10.1109\/icra48506.2021.9561874","type":"proceedings-article","created":{"date-parts":[[2021,10,19]],"date-time":"2021-10-19T20:28:35Z","timestamp":1634675315000},"page":"3546-3552","source":"Crossref","is-referenced-by-count":1,"title":["Mesh Based Analysis of Low Fractal Dimension Reinforcement Learning Policies"],"prefix":"10.1109","author":[{"given":"Sean","family":"Gillen","sequence":"first","affiliation":[{"name":"University of California,Electrical and Computer Engineering Department,Santa Barbara,CA,93106"}]},{"given":"Katie","family":"Byl","sequence":"additional","affiliation":[{"name":"University of California,Electrical and Computer Engineering Department,Santa Barbara,CA,93106"}]}],"member":"263","reference":[{"key":"ref10","article-title":"Robust Policies via Meshing for Metastable Rough Terrain Walking","author":"saglam","year":"2015","journal-title":"Robotics Science and Systems"},{"year":"2018","key":"ref11","article-title":"Learning Dexterous In-Hand Manipulation"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2019.8815226"},{"article-title":"Mesh-based Tools to Analyze Deep Reinforcement Learning Policies for Underactuated Biped Locomotion","year":"2019","author":"talele","key":"ref13"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1177\/0278364909340446"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2017.7963802"},{"article-title":"Emergence of Locomotion Behaviours in Rich Environments","year":"2017","author":"heess","key":"ref6"},{"key":"ref5","article-title":"Explicitly Encouraging Low Fractional Dimensional Trajectories Via Reinforcement Learning","author":"gillen","year":"2020","journal-title":"Conference on Robot Learning"},{"article-title":"Robust Recovery Controller for a Quadrupedal Robot using Deep Reinforcement Learning","year":"2019","author":"lee","key":"ref8"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aau5872"},{"journal-title":"OpenAI Gym","year":"2016","author":"brockman","key":"ref2"},{"journal-title":"Fractal Dimension Example","article-title":"Brendan Ryan \/ Public domain","year":"2020","key":"ref1"},{"key":"ref9","first-page":"1800","article-title":"Simple random search of static linear policies is competitive for reinforcement learning","author":"mania","year":"2018","journal-title":"Advances in neural information processing systems"}],"event":{"name":"2021 IEEE International Conference on Robotics and Automation (ICRA)","start":{"date-parts":[[2021,5,30]]},"location":"Xi'an, China","end":{"date-parts":[[2021,6,5]]}},"container-title":["2021 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9560720\/9560666\/09561874.pdf?arnumber=9561874","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,2]],"date-time":"2022-08-02T19:21:36Z","timestamp":1659468096000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9561874\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,5,30]]},"references-count":13,"URL":"https:\/\/doi.org\/10.1109\/icra48506.2021.9561874","relation":{},"subject":[],"published":{"date-parts":[[2021,5,30]]}}}