{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,28]],"date-time":"2025-03-28T08:07:04Z","timestamp":1743149224844,"version":"3.40.3"},"publisher-location":"Cham","reference-count":11,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783319018539"},{"type":"electronic","value":"9783319018546"}],"license":[{"start":{"date-parts":[[2014,1,1]],"date-time":"2014-01-01T00:00:00Z","timestamp":1388534400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2014,1,1]],"date-time":"2014-01-01T00:00:00Z","timestamp":1388534400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2014]]},"DOI":"10.1007\/978-3-319-01854-6_11","type":"book-chapter","created":{"date-parts":[[2013,8,8]],"date-time":"2013-08-08T06:47:25Z","timestamp":1375944445000},"page":"101-108","source":"Crossref","is-referenced-by-count":0,"title":["Empirical Study of the Sensitivity of CACLA to Sub-optimal Parameter Setting in Learning Feedback Controllers"],"prefix":"10.1007","author":[{"given":"Borja","family":"Fernandez-Gauna","sequence":"first","affiliation":[]},{"given":"Igor","family":"Ansoategui","sequence":"additional","affiliation":[]},{"given":"Ismael","family":"Etxeberria-Agiriano","sequence":"additional","affiliation":[]},{"given":"Manuel","family":"Gra\u00f1a","sequence":"additional","affiliation":[]}],"member":"297","reference":[{"issue":"1-2","key":"11_CR1","doi-asserted-by":"publisher","first-page":"137","DOI":"10.1007\/s10994-011-5235-x","volume":"84","author":"R. Hafner","year":"2011","unstructured":"Hafner, R., Riedmiller, M.: Reinforcement learning in feedback control: Challenges and benchmarks from technical process control. Machine Learning\u00a084(1-2), 137\u2013169 (2011)","journal-title":"Machine Learning"},{"key":"11_CR2","unstructured":"Neumann, G.: The Reinforcement Learning Toolbox, Reinforcement Learning for Optimal Control Tasks. PhD thesis, Technischen Universitaet, Graz (2005)"},{"key":"11_CR3","doi-asserted-by":"crossref","first-page":"32","DOI":"10.1109\/MCAS.2009.933854","volume":"9","author":"F.L. Lewis","year":"2012","unstructured":"Lewis, F.L., Vrabie, D., Vamvoudakis, K.G.: Reinforcement learning and feedback control. IEEE Control Systems Magazine\u00a09, 32\u201350 (2012)","journal-title":"IEEE Control Systems Magazine"},{"key":"11_CR4","doi-asserted-by":"crossref","unstructured":"Lewis, F.L., Liu, D., et al.: Reinforcement Learning and Approximate Dynamic Programming for Feedback Control. Wiley (2013)","DOI":"10.1002\/9781118453988"},{"key":"11_CR5","doi-asserted-by":"crossref","unstructured":"Koren, Y., Lo, C.C.: Advanced controllers for feed drives. Annals of the CIRP\u00a041 (1992)","DOI":"10.1016\/S0007-8506(07)63255-7"},{"key":"11_CR6","doi-asserted-by":"publisher","first-page":"743","DOI":"10.1115\/1.2836818","volume":"119","author":"K. Srinivasan","year":"1997","unstructured":"Srinivasan, K., Tsao, T.C.: Machine feed drives and their control - a survey of the state of the art. Journal of Manufacturing Science and Engineering\u00a0119, 743\u2013748 (1997)","journal-title":"Journal of Manufacturing Science and Engineering"},{"key":"11_CR7","doi-asserted-by":"crossref","unstructured":"Fernandez-Gauna, B., Ansoategui, I., Etxeberria-Agiriano, I., Gra\u00f1a, M.: Empirical study of actor-critic methods for feedback controllers. In: Proceedings from IWINAC (2013)","DOI":"10.1007\/978-3-642-38622-0_46"},{"key":"11_CR8","series-title":"ALO","doi-asserted-by":"publisher","first-page":"207","DOI":"10.1007\/978-3-642-27645-3_7","volume-title":"Reinforcement Learning","author":"H. van Hasselt","year":"2012","unstructured":"van Hasselt, H.: Reinforcement Learning in Continuous State and Action Spaces. In: Wiering, M., van Otterlo, M. (eds.) Reinforcement Learning. ALO, vol.\u00a012, pp. 207\u2013251. Springer, Heidelberg (2012)"},{"key":"11_CR9","doi-asserted-by":"crossref","unstructured":"Sutton, R., Barto, A.: Reinforcement Learning: An Introduction. MIT Press (1998)","DOI":"10.1109\/TNN.1998.712192"},{"key":"11_CR10","unstructured":"Busoniu, L., Babuska, R., De Schutter, B., Ernst, D.: Reinforcement Learning and Dynamic Programming using Function Approximation. CRC Press (2010)"},{"key":"11_CR11","first-page":"1","volume":"1","author":"P. Vamplew","year":"2011","unstructured":"Vamplew, P., Dazeley, R., Berry, A., Issabekov, R., Dekker, E.: Empirical evaluation methods for multiobjective reinforcement learning algorithms. Machine Learning\u00a01, 1\u201330 (2011)","journal-title":"Machine Learning"}],"container-title":["Advances in Intelligent Systems and Computing","International Joint Conference SOCO\u201913-CISIS\u201913-ICEUTE\u201913"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-319-01854-6_11","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,2,7]],"date-time":"2023-02-07T17:25:24Z","timestamp":1675790724000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-319-01854-6_11"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2014]]},"ISBN":["9783319018539","9783319018546"],"references-count":11,"URL":"https:\/\/doi.org\/10.1007\/978-3-319-01854-6_11","relation":{},"ISSN":["2194-5357","2194-5365"],"issn-type":[{"type":"print","value":"2194-5357"},{"type":"electronic","value":"2194-5365"}],"subject":[],"published":{"date-parts":[[2014]]}}}