{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,19]],"date-time":"2025-12-19T15:23:28Z","timestamp":1766157808981,"version":"3.28.0"},"reference-count":28,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2014,12]]},"DOI":"10.1109\/adprl.2014.7010608","type":"proceedings-article","created":{"date-parts":[[2015,1,19]],"date-time":"2015-01-19T21:48:03Z","timestamp":1421704083000},"page":"1-8","source":"Crossref","is-referenced-by-count":38,"title":["Approximate real-time optimal control based on sparse Gaussian process models"],"prefix":"10.1109","author":[{"given":"Joschka","family":"Boedecker","sequence":"first","affiliation":[]},{"given":"Jost Tobias","family":"Springenberg","sequence":"additional","affiliation":[]},{"given":"Jan","family":"Wulfing","sequence":"additional","affiliation":[]},{"given":"Martin","family":"Riedmiller","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/MRA.2010.936957"},{"key":"ref11","first-page":"213","article-title":"R-max-A General Polynomial Time Algorithm for Near-optimal Reinforcement Learning","volume":"3","author":"brafman","year":"2003","journal-title":"JMLR"},{"journal-title":"Differential Dynamic Programming","year":"1970","author":"jacobson","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386025"},{"key":"ref14","article-title":"Receding Horizon Differential Dynamic Programming","author":"tassa","year":"2008","journal-title":"NIPS"},{"key":"ref15","article-title":"Real-Time Inverse Dynamics Learning for Musculoskeletal Robots based on Echo State Gaussian Process Regression","author":"hartmann","year":"2013","journal-title":"R SS"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1163\/016918609X12529286896877"},{"key":"ref17","article-title":"Derivative Observations in Gaussian Process Models of Dynamic Systems","author":"solak","year":"2003","journal-title":"NIPS"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1137\/1.9781611970128"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/5.58326"},{"key":"ref28","article-title":"Variance-Based Rewards for Approximate Bayesian Reinforcement Learning","author":"sorg","year":"2010","journal-title":"UAI"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1177\/0278364913495721"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2008.4586462"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-011-5235-x"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553508"},{"key":"ref5","first-page":"222","article-title":"Iterative Linear Quadratic Regulator Design for Nonlinear Biological Movement Systems","author":"li","year":"2004","journal-title":"ICINCO"},{"key":"ref8","article-title":"Learning Complex Neural Network Policies with Trajectory Optimization","author":"levine","year":"2014","journal-title":"ICML"},{"key":"ref7","article-title":"Variational Policy Search via Trajectory Optimization","author":"levine","year":"2013","journal-title":"NIPS"},{"key":"ref2","article-title":"PILCO: A Model-Based and Data-Efficient Approach to Policy Search","author":"deisenroth","year":"2011","journal-title":"ICML"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/s10339-011-0404-1"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/1390156.1390175"},{"journal-title":"Gaussian Processes for Machine Learning","year":"2006","author":"rasmussen","key":"ref20"},{"article-title":"Efficient Reinforcement Learning using Gaussian Processes","year":"2010","author":"deisenroth","key":"ref22"},{"key":"ref21","doi-asserted-by":"crossref","DOI":"10.1201\/9781439821091","author":"busoniu","year":"2010","journal-title":"Reinforcement Learning and Dynamic Programming Using Function Approximators"},{"key":"ref24","article-title":"Optimal Control with Adaptive Internal Dynamics Model","author":"mitrovic","year":"2008","journal-title":"ICINCO"},{"key":"ref23","first-page":"12","article-title":"Robot Learning from Demonstration","author":"atkeson","year":"1997","journal-title":"ICML"},{"key":"ref26","article-title":"Gaussian Processes in Reinforcement Learning","author":"kuss","year":"2004","journal-title":"NIPS"},{"key":"ref25","article-title":"Learning Vehicular Dynamics, with Application to Modeling Helicopters","author":"abbeel","year":"2006","journal-title":"NIPS"}],"event":{"name":"2014 IEEE Symposium on Adaptive Dynamic Programming and Reinforcement Learning (ADPRL)","start":{"date-parts":[[2014,12,9]]},"location":"Orlando, FL, USA","end":{"date-parts":[[2014,12,12]]}},"container-title":["2014 IEEE Symposium on Adaptive Dynamic Programming and Reinforcement Learning (ADPRL)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7000183\/7010603\/07010608.pdf?arnumber=7010608","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2017,6,22]],"date-time":"2017-06-22T23:55:02Z","timestamp":1498175702000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/7010608\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2014,12]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/adprl.2014.7010608","relation":{},"subject":[],"published":{"date-parts":[[2014,12]]}}}