{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,25]],"date-time":"2025-03-25T14:12:21Z","timestamp":1742911941381,"version":"3.40.3"},"publisher-location":"Berlin, Heidelberg","reference-count":15,"publisher":"Springer Berlin Heidelberg","isbn-type":[{"type":"print","value":"9783642217371"},{"type":"electronic","value":"9783642217388"}],"license":[{"start":{"date-parts":[[2011,1,1]],"date-time":"2011-01-01T00:00:00Z","timestamp":1293840000000},"content-version":"unspecified","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2011]]},"DOI":"10.1007\/978-3-642-21738-8_29","type":"book-chapter","created":{"date-parts":[[2011,6,12]],"date-time":"2011-06-12T11:53:18Z","timestamp":1307879598000},"page":"221-228","source":"Crossref","is-referenced-by-count":2,"title":["Improving Gaussian Process Value Function Approximation in Policy Gradient Algorithms"],"prefix":"10.1007","author":[{"given":"Hunor","family":"Jakab","sequence":"first","affiliation":[]},{"given":"Lehel","family":"Csat\u00f3","sequence":"additional","affiliation":[]}],"member":"297","reference":[{"key":"29_CR1","first-page":"968","volume-title":"NIPS 1998. Advances in Neural Information Processing Systems","author":"L. Baird","year":"1998","unstructured":"Baird, L., Moore, A.: Gradient descent for general reinforcement learning. In: Kearns, M.S., Solla, S.A., Cohn, D.A. (eds.) NIPS 1998. Advances in Neural Information Processing Systems, vol.\u00a011, pp. 968\u2013974. MIT Press, Cambridge (1998)"},{"key":"29_CR2","unstructured":"Csat\u00f3, L.: Gaussian Processes \u2013 Iterative Sparse Approximation. PhD thesis, Neural Computing Research Group\u00a0 (2002)"},{"key":"29_CR3","first-page":"444","volume-title":"NIPS","author":"L. Csat\u00f3","year":"2001","unstructured":"Csat\u00f3, L., Opper, M.: Sparse representation for Gaussian process models. In: Leen, T.K., Dietterich, T.G., Tresp, V. (eds.) NIPS, vol.\u00a013, pp. 444\u2013450. MIT Press, Cambridge (2001)"},{"issue":"7-9","key":"29_CR4","doi-asserted-by":"publisher","first-page":"1508","DOI":"10.1016\/j.neucom.2008.12.019","volume":"72","author":"M.P. Deisenroth","year":"2009","unstructured":"Deisenroth, M.P., Rasmussen, C.E., Peters, J.: Gaussian process dynamic programming. Neurocomputing\u00a072(7-9), 1508\u20131524 (2009)","journal-title":"Neurocomputing"},{"key":"29_CR5","doi-asserted-by":"crossref","unstructured":"Engel, Y., Mannor, S., Meir, R.: Reinforcement learning with Gaussian processes. In: Proceedings of the 22nd International Conference on Machine learning, pp. 201\u2013208, New York (2005)","DOI":"10.1145\/1102351.1102377"},{"key":"29_CR6","first-page":"2115","volume":"11","author":"Y. Fan","year":"2010","unstructured":"Fan, Y., Xu, J., Shelton, C.R.: Importance sampling for continuous time Bayesian networks. Journal of Machine Learning Research\u00a011, 2115\u20132140 (2010)","journal-title":"Journal of Machine Learning Research"},{"key":"29_CR7","first-page":"457","volume-title":"NIPS 2007, Advances in Neural Information Processing Systems","author":"M. Ghavamzadeh","year":"2007","unstructured":"Ghavamzadeh, M., Engel, Y.: Bayesian policy gradient algorithms. In: Sch\u00f6lkopf, B., Platt, J., Hoffman, T. (eds.) NIPS 2007, Advances in Neural Information Processing Systems, vol.\u00a019, pp. 457\u2013464. MIT Press, Cambridge (2007)"},{"key":"29_CR8","unstructured":"Jakab, H.S., Csat\u00f3, L.: Using Gaussian processes for variance reduction in policy gradient algorithms. In: 8th International Conference on Applied Informatics, Eger, pp. 55\u201363 (2010)"},{"issue":"4","key":"29_CR9","doi-asserted-by":"publisher","first-page":"682","DOI":"10.1016\/j.neunet.2008.02.003","volume":"21","author":"J. Peters","year":"2008","unstructured":"Peters, J., Schaal, S.: Reinforcement learning of motor skills with policy gradients. Neural Networks\u00a021(4), 682\u2013697 (2008)","journal-title":"Neural Networks"},{"key":"29_CR10","doi-asserted-by":"crossref","unstructured":"Puterman, M.L.: Markov Decision Processes: Discrete Stochastic Dynamic Programming. John Wiley & Sons, New York (1994)","DOI":"10.1002\/9780470316887"},{"key":"29_CR11","first-page":"751","volume-title":"NIPS 2003, Advances in Neural Information Processing Systems","author":"C.E. Rasmussen","year":"2004","unstructured":"Rasmussen, C.E., Kuss, M.: Gaussian processes in reinforcement learning. In: Saul, L.K., Thrun, S., Schlkopf, B. (eds.) NIPS 2003, Advances in Neural Information Processing Systems, pp. 751\u2013759. MIT Press, Cambridge (2004)"},{"key":"29_CR12","volume-title":"Gaussian Processes for Machine Learning","author":"C.E. Rasmussen","year":"2006","unstructured":"Rasmussen, C.E., Williams, C.: Gaussian Processes for Machine Learning. MIT Press, Cambridge (2006)"},{"key":"29_CR13","doi-asserted-by":"publisher","first-page":"287","DOI":"10.1007\/s10514-008-9095-6","volume":"25","author":"M. Sugiyama","year":"2008","unstructured":"Sugiyama, M., Hachiya, H., Towell, C., Vijayakumar, S.: Geodesic gaussian kernels for value function approximation. Auton. Robots\u00a025, 287\u2013304 (2008)","journal-title":"Auton. Robots"},{"key":"29_CR14","first-page":"1057","volume-title":"NIPS 1999, Advances in Neural Information Processing Systems","author":"R.S. Sutton","year":"1999","unstructured":"Sutton, R.S., McAllester, D.A., Singh, S.P., Mansour, Y.: Policy gradient methods for reinforcement learning with function approximation. In: Solla, S.A., Leen, T.K., M\u00fcller, K.R. (eds.) NIPS 1999, Advances in Neural Information Processing Systems, pp. 1057\u20131063. MIT Press, Cambridge (1999)"},{"key":"29_CR15","first-page":"229","volume":"8","author":"R.J. Williams","year":"1992","unstructured":"Williams, R.J.: Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine Learning\u00a08, 229\u2013256 (1992)","journal-title":"Machine Learning"}],"container-title":["Lecture Notes in Computer Science","Artificial Neural Networks and Machine Learning \u2013 ICANN 2011"],"original-title":[],"link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-642-21738-8_29","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2018,11,8]],"date-time":"2018-11-08T07:37:55Z","timestamp":1541662675000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-642-21738-8_29"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2011]]},"ISBN":["9783642217371","9783642217388"],"references-count":15,"URL":"https:\/\/doi.org\/10.1007\/978-3-642-21738-8_29","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2011]]}}}