{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,9]],"date-time":"2024-09-09T14:02:41Z","timestamp":1725890561985},"publisher-location":"Berlin, Heidelberg","reference-count":28,"publisher":"Springer Berlin Heidelberg","isbn-type":[{"type":"print","value":"9783540874805"},{"type":"electronic","value":"9783540874812"}],"license":[{"start":{"date-parts":[[2008,1,1]],"date-time":"2008-01-01T00:00:00Z","timestamp":1199145600000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2008]]},"DOI":"10.1007\/978-3-540-87481-2_5","type":"book-chapter","created":{"date-parts":[[2008,8,13]],"date-time":"2008-08-13T23:30:46Z","timestamp":1218670246000},"page":"66-81","source":"Crossref","is-referenced-by-count":10,"title":["Fitted Natural Actor-Critic: A New Algorithm for Continuous State-Action MDPs"],"prefix":"10.1007","author":[{"given":"Francisco S.","family":"Melo","sequence":"first","affiliation":[]},{"given":"Manuel","family":"Lopes","sequence":"additional","affiliation":[]}],"member":"297","reference":[{"doi-asserted-by":"crossref","unstructured":"Peters, J., Vijayakumar, S., Schaal, S.: Natural Actor-Critic. In: Proc. European Conf. Machine Learning, pp. 280\u2013291 (2005)","key":"5_CR1","DOI":"10.1007\/11564096_29"},{"unstructured":"Bertsekas, D., Tsitsiklis, J.: Neuro-Dynamic Programming. Athena Scientific (1996)","key":"5_CR2"},{"issue":"2","key":"5_CR3","doi-asserted-by":"publisher","first-page":"215","DOI":"10.1162\/neco.1994.6.2.215","volume":"6","author":"G. Tesauro","year":"1994","unstructured":"Tesauro, G.: TD-Gammon, a self-teaching backgammon program, achieves master-level play. Neural Computation\u00a06(2), 215\u2013219 (1994)","journal-title":"Neural Computation"},{"doi-asserted-by":"crossref","unstructured":"Baird, L.: Residual algorithms: Reinforcement learning with function approximation. In: Proc. Int. Conf. Machine Learning, pp. 30\u201337 (1995)","key":"5_CR4","DOI":"10.1016\/B978-1-55860-377-6.50013-X"},{"issue":"5","key":"5_CR5","doi-asserted-by":"publisher","first-page":"674","DOI":"10.1109\/9.580874","volume":"42","author":"J. Tsitsiklis","year":"1996","unstructured":"Tsitsiklis, J., Van Roy, B.: An analysis of temporal-difference learning with function approximation. IEEE Trans. Automatic Control\u00a042(5), 674\u2013690 (1996)","journal-title":"IEEE Trans. Automatic Control"},{"doi-asserted-by":"crossref","unstructured":"Sutton, R.: Open theoretical questions in reinforcement learning. In: Proc. European Conf. Computational Learning Theory, pp. 11\u201317 (1999)","key":"5_CR6","DOI":"10.1007\/3-540-49097-3_2"},{"unstructured":"Antos, A., Munos, R., Szepesv\u00e1ri, C.: Fitted Q-iteration in continuous action-space MDPs. In: Adv. Neural Information Proc. Systems, vol.\u00a020 (2007)","key":"5_CR7"},{"unstructured":"Munos, R., Szepesv\u00e1ri, C.: Finite-time bounds for sampling-based fitted value iteration. J. Machine Learning Research (submitted, 2007)","key":"5_CR8"},{"unstructured":"Gordon, G.: Stable fitted reinforcement learning. In: Adv. Neural Information Proc. Systems, vol.\u00a08, pp. 1052\u20131058 (1996)","key":"5_CR9"},{"key":"5_CR10","doi-asserted-by":"publisher","first-page":"161","DOI":"10.1023\/A:1017928328829","volume":"49","author":"D. Ormoneit","year":"2002","unstructured":"Ormoneit, D., Sen, S.: Kernel-based reinforcement learning. Machine Learning\u00a049, 161\u2013178 (2002)","journal-title":"Machine Learning"},{"key":"5_CR11","first-page":"503","volume":"6","author":"D. Ernst","year":"2005","unstructured":"Ernst, D., Geurts, P., Wehenkel, L.: Tree-based batch mode reinforcement learning. J. Machine Learning Research\u00a06, 503\u2013556 (2005)","journal-title":"J. Machine Learning Research"},{"key":"5_CR12","series-title":"Lecture Notes in Artificial Intelligence","doi-asserted-by":"publisher","first-page":"317","DOI":"10.1007\/11564096_32","volume-title":"Machine Learning: ECML 2005","author":"M. Riedmiller","year":"2005","unstructured":"Riedmiller, M.: Neural fitted Q-iteration: First experiences with a data efficient neural reinforcement learning method. In: Gama, J., Camacho, R., Brazdil, P.B., Jorge, A.M., Torgo, L. (eds.) ECML 2005. LNCS (LNAI), vol.\u00a03720, pp. 317\u2013328. Springer, Heidelberg (2005)"},{"unstructured":"Kimura, H., Kobayashi, S.: Reinforcement learning for continuous action using stochastic gradient ascent. In: Proc. Int. Conf. Intelligent Autonomous Systems, pp. 288\u2013295 (1998)","key":"5_CR13"},{"unstructured":"Lazaric, A., Restelli, M., Bonarini, A.: Reinforcement learning in continuous action spaces through sequential Monte Carlo methods. In: Adv. Neural Information Proc. Systems, vol.\u00a020 (2007)","key":"5_CR14"},{"issue":"4","key":"5_CR15","doi-asserted-by":"publisher","first-page":"1143","DOI":"10.1137\/S0363012901385691","volume":"42","author":"V. Konda","year":"2003","unstructured":"Konda, V., Tsitsiklis, J.: On actor-critic algorithms. SIAM J. Control and Optimization\u00a042(4), 1143\u20131166 (2003)","journal-title":"SIAM J. Control and Optimization"},{"issue":"5","key":"5_CR16","doi-asserted-by":"crossref","first-page":"834","DOI":"10.1109\/TSMC.1983.6313077","volume":"13","author":"A. Barto","year":"1983","unstructured":"Barto, A., Sutton, R., Anderson, C.: Neuronlike adaptive elements that can solve difficult learning control problems. IEEE Trans. Systems, Man and Cybernetics\u00a013(5), 834\u2013846 (1983)","journal-title":"IEEE Trans. Systems, Man and Cybernetics"},{"doi-asserted-by":"crossref","unstructured":"van Hasselt, H., Wiering, M.: Reinforcement learning in continuous action spaces. In: Proc. 2007 IEEE Symp. Approx. Dynamic Programming and Reinforcement Learning, pp. 272\u2013279 (2007)","key":"5_CR17","DOI":"10.1109\/ADPRL.2007.368199"},{"unstructured":"Bhatnagar, S., Sutton, R., Ghavamzadeh, M., Lee, M.: Incremental natural actor-critic algorithms. In: Adv. Neural Information Proc. Systems, vol.\u00a020 (2007)","key":"5_CR18"},{"unstructured":"Kakade, S.: A natural policy gradient. In: Adv. Neural Information Proc. Systems, vol.\u00a014, pp. 1531\u20131538 (2001)","key":"5_CR19"},{"key":"5_CR20","doi-asserted-by":"crossref","DOI":"10.1002\/9780470316887","volume-title":"Markov Decision Processes: Discrete Stochastic Dynamic Programming","author":"M. Puterman","year":"1994","unstructured":"Puterman, M.: Markov Decision Processes: Discrete Stochastic Dynamic Programming. John Wiley & Sons, Inc., Chichester (1994)"},{"unstructured":"Sutton, R., McAllester, D., Singh, S., Mansour, Y.: Policy gradient methods for reinforcement learning with function approximation. In: Adv. Neural Information Proc. Systems, vol.\u00a012, pp. 1057\u20131063 (2000)","key":"5_CR21"},{"issue":"2","key":"5_CR22","doi-asserted-by":"publisher","first-page":"191","DOI":"10.1109\/9.905687","volume":"46","author":"P. Marbach","year":"2001","unstructured":"Marbach, P., Tsitsiklis, J.: Simulation-based optimization of Markov reward processes. IEEE Trans. Automatic Control\u00a046(2), 191\u2013209 (2001)","journal-title":"IEEE Trans. Automatic Control"},{"key":"5_CR23","doi-asserted-by":"crossref","DOI":"10.1007\/978-1-4471-3267-7","volume-title":"Markov Chains and Stochastic Stability","author":"S. Meyn","year":"1993","unstructured":"Meyn, S., Tweedie, R.: Markov Chains and Stochastic Stability. Springer, Heidelberg (1993)"},{"unstructured":"Baird, L.: Advantage updating. Tech. Rep. WL-TR-93-1146, Wright Laboratory, Wright-Patterson Air Force Base (1993)","key":"5_CR24"},{"issue":"2","key":"5_CR25","doi-asserted-by":"publisher","first-page":"251","DOI":"10.1162\/089976698300017746","volume":"10","author":"S. Amari","year":"1998","unstructured":"Amari, S.: Natural gradient works efficiently in learning. Neural Computation\u00a010(2), 251\u2013276 (1998)","journal-title":"Neural Computation"},{"key":"5_CR26","doi-asserted-by":"publisher","first-page":"89","DOI":"10.1007\/s10994-007-5038-2","volume":"71","author":"A. Antos","year":"2008","unstructured":"Antos, A., Szepesv\u00e1ri, C., Munos, R.: Learning near-optimal policies with Bellman-residual minimization based fitted policy iteration and a single sample path. Machine Learning\u00a071, 89\u2013129 (2008)","journal-title":"Machine Learning"},{"key":"5_CR27","first-page":"123","volume":"22","author":"S. Singh","year":"1996","unstructured":"Singh, S., Sutton, R.: Reinforcement learning with replacing eligibility traces. Machine Learning\u00a022, 123\u2013158 (1996)","journal-title":"Machine Learning"},{"unstructured":"Munos, R.: Error bounds for approximate policy iteration. In: Proc. Int. Conf. Machine Learning, pp. 560\u2013567 (2003)","key":"5_CR28"}],"container-title":["Lecture Notes in Computer Science","Machine Learning and Knowledge Discovery in Databases"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-540-87481-2_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2019,5,19]],"date-time":"2019-05-19T15:29:21Z","timestamp":1558279761000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-540-87481-2_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2008]]},"ISBN":["9783540874805","9783540874812"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-3-540-87481-2_5","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2008]]}}}