{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,24]],"date-time":"2025-11-24T16:29:13Z","timestamp":1764001753544},"reference-count":37,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2011,2,1]],"date-time":"2011-02-01T00:00:00Z","timestamp":1296518400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Syst., Man, Cybern. B"],"published-print":{"date-parts":[[2011,2]]},"DOI":"10.1109\/tsmcb.2010.2050586","type":"journal-article","created":{"date-parts":[[2010,6,25]],"date-time":"2010-06-25T13:02:51Z","timestamp":1277470971000},"page":"196-209","source":"Crossref","is-referenced-by-count":55,"title":["Cross-Entropy Optimization of Control Policies With Adaptive Basis Functions"],"prefix":"10.1109","volume":"41","author":[{"given":"Lucian","family":"Busoniu","sequence":"first","affiliation":[]},{"given":"Damien","family":"Ernst","sequence":"additional","affiliation":[]},{"given":"Bart","family":"De Schutter","sequence":"additional","affiliation":[]},{"given":"Robert","family":"Babuska","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1023\/A:1017928328829"},{"key":"ref32","doi-asserted-by":"crossref","first-page":"237","DOI":"10.1613\/jair.301","article-title":"reinforcement learning: a survey","volume":"4","author":"kaelbling","year":"1996","journal-title":"J Artif Intell Res"},{"key":"ref31","doi-asserted-by":"crossref","first-page":"619","DOI":"10.1016\/S1574-0021(96)01016-7","author":"rust","year":"1996","journal-title":"Handbook of Computational Economics"},{"key":"ref30","first-page":"153","article-title":"Policy search with cross-entropy optimization of basis functions","author":"buoniu","year":"2009","journal-title":"Proc IEEE Int Symp ADPRL"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2006.377527"},{"key":"ref36","doi-asserted-by":"crossref","first-page":"223","DOI":"10.3934\/mbe.2004.1.223","article-title":"dynamic multidrug therapies for hiv: optimal and sti control approaches","volume":"1","author":"adams","year":"2004","journal-title":"Math Biosci Eng"},{"key":"ref35","first-page":"463","article-title":"Learning to drive a bicycle using reinforcement learning and shaping","author":"randlv","year":"1998","journal-title":"Proc 15th ICML"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.orl.2006.11.005"},{"key":"ref10","doi-asserted-by":"crossref","first-page":"111","DOI":"10.1023\/A:1022145020786","article-title":"approximate gradient methods in policy-space optimization of markov reward processes","volume":"13","author":"marbach","year":"2003","journal-title":"Discrete Event Dyn Syst Theory Appl"},{"key":"ref11","first-page":"512","article-title":"The cross-entropy method for fast policy search","author":"mannor","year":"2003","journal-title":"Proc 20th ICML"},{"key":"ref12","first-page":"771","article-title":"policy gradient in continuous time","volume":"7","author":"munos","year":"2006","journal-title":"J Mach Learn Res"},{"key":"ref13","doi-asserted-by":"crossref","DOI":"10.1007\/978-1-84628-690-2","author":"chang","year":"2007","journal-title":"Simulation-Based Algorithms for Markov Decision Processes"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ADPRL.2007.368196"},{"key":"ref15","author":"rubinstein","year":"2004","journal-title":"The Cross Entropy Method A Unified Approach to Combinatorial Optimization Monte-Carlo Simulation and Machine Learning"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-387-74759-0_128"},{"key":"ref17","first-page":"406","article-title":"PEGASUS: A policy search method for large MDPs and POMDPs","author":"ng","year":"2000","journal-title":"Proc 10th Conf UAI"},{"key":"ref18","first-page":"1","article-title":"epicardial ecg mapping of human ventricular fibrillation","author":"mourad","year":"2006","journal-title":"2006 IET 3rd International Conference On Advances in Medical Signal and Information Processing - MEDSIP 2006 MEDSIP"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1137\/S0363012901385691"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/BF00114724"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.3166\/ejc.11.310-334"},{"key":"ref3","author":"bertsekas","year":"1996","journal-title":"Neuro-Dynamic Programming"},{"key":"ref27","first-page":"424","article-title":"Reinforcement learning as classification: Leveraging modern classifiers","author":"lagoudakis","year":"2003","journal-title":"Proc 20th ICML"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1162\/jmlr.2003.4.6.1107"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1023\/A:1017992615625"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-008-5069-3"},{"key":"ref8","first-page":"503","article-title":"tree-based batch mode reinforcement learning","volume":"6","author":"ernst","year":"2005","journal-title":"J Mach Learn Res"},{"key":"ref7","first-page":"27","author":"buoniu","year":"2008","journal-title":"Adaptive Agents and Multi-Agent Systems"},{"key":"ref2","author":"sutton","year":"1998","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref9","first-page":"2169","article-title":"proto-value functions: a laplacian framework for learning representation and control in markov decision processes","volume":"8","author":"mahadevan","year":"2007","journal-title":"J Mach Learn Res"},{"key":"ref1","author":"bertsekas","year":"2007","journal-title":"Dynamic Programming and Optimal Control"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2007.11.026"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/SSST.1998.660132"},{"key":"ref21","doi-asserted-by":"crossref","first-page":"988","DOI":"10.1109\/TSMCB.2008.922019","article-title":"adaptive critic learning techniques for engine torque and air-fuel ratio control","volume":"38","author":"liu","year":"2008","journal-title":"IEEE Trans Syst Man Cybern B Cybern"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCB.2006.883265"},{"key":"ref23","article-title":"A genetic search in policy space for solving Markov decision processes","author":"barash","year":"1999","journal-title":"Proc AAAI Spring Symp Search Techn Probl Solving Under Uncertainty Incomplete Inf"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCB.2008.926610"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/s10479-005-5732-z"}],"container-title":["IEEE Transactions on Systems, Man, and Cybernetics, Part B (Cybernetics)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx5\/3477\/5688133\/05491120.pdf?arnumber=5491120","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,10,11]],"date-time":"2021-10-11T00:44:38Z","timestamp":1633913078000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/5491120\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2011,2]]},"references-count":37,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/tsmcb.2010.2050586","relation":{},"ISSN":["1083-4419"],"issn-type":[{"value":"1083-4419","type":"print"}],"subject":[],"published":{"date-parts":[[2011,2]]}}}