{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T05:57:22Z","timestamp":1775109442364,"version":"3.50.1"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,4,12]],"date-time":"2021-04-12T00:00:00Z","timestamp":1618185600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,4,12]],"date-time":"2021-04-12T00:00:00Z","timestamp":1618185600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,4,12]]},"DOI":"10.1109\/robosoft51838.2021.9479340","type":"proceedings-article","created":{"date-parts":[[2021,7,12]],"date-time":"2021-07-12T21:33:31Z","timestamp":1626125611000},"page":"141-148","source":"Crossref","is-referenced-by-count":35,"title":["Model-Free Reinforcement Learning with Ensemble for a Soft Continuum Robot Arm"],"prefix":"10.1109","author":[{"given":"Ryota","family":"Morimoto","sequence":"first","affiliation":[{"name":"The University of Tokyo,Graduate School of Information Science and Technology,Tokyo,Japan"}]},{"given":"Satoshi","family":"Nishikawa","sequence":"additional","affiliation":[{"name":"The University of Tokyo,Graduate School of Information Science and Technology,Tokyo,Japan"}]},{"given":"Ryuma","family":"Niiyama","sequence":"additional","affiliation":[{"name":"The University of Tokyo,Graduate School of Information Science and Technology,Tokyo,Japan"}]},{"given":"Yasuo","family":"Kuniyoshi","sequence":"additional","affiliation":[{"name":"The University of Tokyo,Graduate School of Information Science and Technology,Tokyo,Japan"}]}],"member":"263","reference":[{"key":"ref33","article-title":"UCB Exploration via Q-Ensembles","author":"chen","year":"2017"},{"key":"ref32","first-page":"4033","article-title":"Deep exploration via bootstrapped DQN","author":"osband","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref31","first-page":"2587","article-title":"Addressing Function Approximation Error in Actor-Critic Methods","volume":"4","author":"fujimoto","year":"2018","journal-title":"35th International Conference on Machine Learning (ICML)"},{"key":"ref30","article-title":"Soft Actor-Critic Algorithms and Applications","author":"haarnoja","year":"2018"},{"key":"ref37","article-title":"Proximal Policy Optimization Algorithms","author":"schulman","year":"2017"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ROBIO.2017.8324762"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOSOFT.2018.8404919"},{"key":"ref10","article-title":"Learning to Play Table Tennis From Scratch using Muscular Robots","author":"b\u00fcchler","year":"2020"},{"key":"ref11","article-title":"Deep Dynamics Models for Learning Dexterous Manipulation","author":"nagabandi","year":"2019","journal-title":"Conference on Robot Learning (CoRL)"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487156"},{"key":"ref13","article-title":"OpenAI Gym","author":"brockman","year":"2016"},{"key":"ref14","first-page":"7953","article-title":"A game theoretic framework for model based reinforcement learning","author":"rajeswaran","year":"2020","journal-title":"37nd International Conference on Machine Learning ICML"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2018.2878318"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ROBIO.2018.8665152"},{"key":"ref17","first-page":"1","article-title":"Guided Policy Search","volume":"28","author":"levine","year":"2013","journal-title":"The 30th Int Conf Mach Learning"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793653"},{"key":"ref19","doi-asserted-by":"crossref","first-page":"773","DOI":"10.1109\/TRO.2008.924923","article-title":"Geometrically exact models for soft robotic manipulators","volume":"24","author":"trivedi","year":"2008","journal-title":"IEEE Transactions on Robotics"},{"key":"ref28","article-title":"SUNRISE: A Simple Unified Framework for Ensemble Learning in Deep Reinforcement Learning","author":"lee","year":"2020"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2014.2313741"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8206123"},{"key":"ref3","doi-asserted-by":"crossref","first-page":"45","DOI":"10.1002\/rob.10070","article-title":"Kinematics and the Implementation of an Elephant&#x2019;s Trunk Manipulator and Other Continuum Style Robots","volume":"20","author":"hannan","year":"2003","journal-title":"Journal of Robotic Systems"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1088\/1748-3190\/aa839f"},{"key":"ref29","article-title":"When to Trust Your Model: Model-Based Policy Optimization","author":"janner","year":"2019"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2013.2287890"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1089\/soro.2016.0065"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793766"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1177\/0278364910368147"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.3390\/robotics8010004"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.5402\/2013\/726506"},{"key":"ref20","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2016","journal-title":"International Conference on Learning Representations (ICLR)"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ROBIO49542.2019.8961852"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/RoboSoft48309.2020.9116003"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/s10846-020-01237-6"},{"key":"ref23","article-title":"Soft Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor","author":"haarnoja","year":"2018"},{"key":"ref26","doi-asserted-by":"crossref","first-page":"173","DOI":"10.1007\/978-3-319-65289-4_17","article-title":"Toward Effective Soft Robot Control via Reinforcement Learning","author":"zhang","year":"2017","journal-title":"Intelligent Robotics and Applications"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCPCCT.2018.8574225"}],"event":{"name":"2021 IEEE 4th International Conference on Soft Robotics (RoboSoft)","location":"New Haven, CT, USA","start":{"date-parts":[[2021,4,12]]},"end":{"date-parts":[[2021,4,16]]}},"container-title":["2021 IEEE 4th International Conference on Soft Robotics (RoboSoft)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9478969\/9479187\/09479340.pdf?arnumber=9479340","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,2]],"date-time":"2022-08-02T23:43:26Z","timestamp":1659483806000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9479340\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,4,12]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/robosoft51838.2021.9479340","relation":{},"subject":[],"published":{"date-parts":[[2021,4,12]]}}}