{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,12]],"date-time":"2026-01-12T21:10:57Z","timestamp":1768252257519,"version":"3.49.0"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,12]]},"DOI":"10.1109\/icarm49381.2020.9195341","type":"proceedings-article","created":{"date-parts":[[2020,9,14]],"date-time":"2020-09-14T21:41:00Z","timestamp":1600119660000},"page":"300-305","source":"Crossref","is-referenced-by-count":10,"title":["Model-Based Reinforcement Learning For Robot Control"],"prefix":"10.1109","author":[{"given":"Xiang","family":"Li","sequence":"first","affiliation":[]},{"given":"Weiwei","family":"Shang","sequence":"additional","affiliation":[]},{"given":"Shuang","family":"Cong","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"1","article-title":"Benchmarking model-based reinforcement learning","author":"wang","year":"2019","journal-title":"ArXiv"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1561\/2300000021"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143845"},{"key":"ref13","first-page":"1071","article-title":"Learning neural network policies with guided policy search under unknown dynamics","volume":"2","author":"levine","year":"2014","journal-title":"Advances in neural information processing systems"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2011.2159412"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8463189"},{"key":"ref16","article-title":"Model-based reinforcement learning via meta-policy optimization","volume":"abs 1809 5214","author":"clavera","year":"2018","journal-title":"ArXiv"},{"key":"ref17","first-page":"1","article-title":"Model-ensemble trust-region policy optimization","volume":"1","author":"kurutach","year":"2018","journal-title":"6th International Conference on Learning Representations ICLR - Conference Track Proceedings"},{"key":"ref18","first-page":"4754","article-title":"Deep reinforcement learning in a handful of trials using probabilistic dynamics models","author":"chua","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/122344.122377"},{"key":"ref4","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"Computer Science"},{"key":"ref3","first-page":"1","article-title":"End-to-end training of deep visuomotor policies","volume":"17","author":"levine","year":"2015","journal-title":"Journal of Machine Learning Research"},{"key":"ref6","first-page":"465","article-title":"Pilco: a model-based and data-efficient approach to policy search","author":"deisenroth","year":"2011","journal-title":"Proceedings of the 28th International Conference on International Conference on Machine Learning ICML"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/s10339-011-0404-1"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2018.2878318"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2011.VII.008"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1177\/0278364913495721"},{"key":"ref1","article-title":"Playing atari with deep reinforcement learning","volume":"abs 1312 5602","author":"mnih","year":"2013","journal-title":"ArXiv"},{"key":"ref20","article-title":"Weight normalization: a simple reparameterization to accelerate training of deep neural networks","author":"salimans","year":"0","journal-title":"Advances in neural information processing systems"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/s10846-017-0468-y"},{"key":"ref21","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"Computer Science"},{"key":"ref24","first-page":"6403","article-title":"Simple and scalable predictive uncertainty estimation using deep ensembles","volume":"2017","author":"lakshminarayanan","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref23","article-title":"Model-based value estimation for efficient model-free reinforcement learning","volume":"abs 1803 101","author":"feinberg","year":"2018","journal-title":"ArXiv"}],"event":{"name":"2020 5th International Conference on Advanced Robotics and Mechatronics (ICARM)","location":"Shenzhen, China","start":{"date-parts":[[2020,12,18]]},"end":{"date-parts":[[2020,12,21]]}},"container-title":["2020 5th International Conference on Advanced Robotics and Mechatronics (ICARM)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9180365\/9195268\/09195341.pdf?arnumber=9195341","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T00:07:32Z","timestamp":1656374852000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9195341\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,12]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/icarm49381.2020.9195341","relation":{},"subject":[],"published":{"date-parts":[[2020,12]]}}}