{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,19]],"date-time":"2026-03-19T14:47:16Z","timestamp":1773931636492,"version":"3.50.1"},"reference-count":38,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2015,2,1]],"date-time":"2015-02-01T00:00:00Z","timestamp":1422748800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"}],"funder":[{"name":"Indo-U.S. Science and Technology Forum (IUSSTF), New Delhi, INDIA","award":["ECCS-1128050"],"award-info":[{"award-number":["ECCS-1128050"]}]},{"name":"Indo-U.S. Science and Technology Forum (IUSSTF), New Delhi, INDIA","award":["IIS-1208623"],"award-info":[{"award-number":["IIS-1208623"]}]},{"name":"AFOSR EOARD","award":["13-3055"],"award-info":[{"award-number":["13-3055"]}]},{"name":"China NNSF","award":["61120106011"],"award-info":[{"award-number":["61120106011"]}]},{"name":"China Education Ministry Project 111","award":["B08015"],"award-info":[{"award-number":["B08015"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Cybern."],"published-print":{"date-parts":[[2015,2]]},"DOI":"10.1109\/tcyb.2014.2322116","type":"journal-article","created":{"date-parts":[[2015,1,13]],"date-time":"2015-01-13T21:46:42Z","timestamp":1421185602000},"page":"165-176","source":"Crossref","is-referenced-by-count":87,"title":["Continuous-Time Q-Learning for Infinite-Horizon Discounted Cost Linear Quadratic Regulator Problems"],"prefix":"10.1109","volume":"45","author":[{"given":"Muthukumar","family":"Palanisamy","sequence":"first","affiliation":[]},{"given":"Hamidreza","family":"Modares","sequence":"additional","affiliation":[]},{"given":"Frank L.","family":"Lewis","sequence":"additional","affiliation":[]},{"given":"Muhammad","family":"Aurangzeb","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref38","first-page":"1884","article-title":"Neural network based online simultaneous policy update algorithm for solving the HJI equation in nonlinear $H_\\infty $ control","volume":"23","author":"wu","year":"2012","journal-title":"IEEE Trans Control Syst Technol"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ADPRL.2007.368195"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2008.08.017"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/37.126844"},{"key":"ref30","author":"sutton","year":"1998","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref37","article-title":"Approximate dynamic programming for real-time control and neural modelling","author":"werbos","year":"1992","journal-title":"Handbook of Intelligent Control"},{"key":"ref36","doi-asserted-by":"crossref","first-page":"67","DOI":"10.7551\/mitpress\/4939.003.0007","article-title":"A menu of designs for reinforcement learning over time","author":"werbos","year":"1991","journal-title":"Neural Networks for Control"},{"key":"ref35","article-title":"Learning from delayed rewards","author":"watkins","year":"1989"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref10","doi-asserted-by":"crossref","first-page":"1167","DOI":"10.1016\/j.automatica.2014.02.015","article-title":"Reinforcement Q-learning for optimal tracking control of linear discrete-time systems with unknown dynamics","volume":"50","author":"kiumarsi","year":"2014","journal-title":"Automatica"},{"key":"ref11","doi-asserted-by":"crossref","first-page":"2850","DOI":"10.1016\/j.automatica.2012.06.008","article-title":"Integral Q-learning and explorized policy iteration for adaptive optimal control of continuous-time linear systems","volume":"48","author":"lee","year":"2012","journal-title":"Automatica"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1049\/iet-cta.2010.0521"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2010.5718015"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCB.2008.925890"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCB.2010.2043839"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1002\/9781118122631"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/MCS.2012.2214134"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-012-1249-y"},{"key":"ref19","article-title":"Data-based approximate policy iteration for nonlinear continuous-time optimal control design","author":"luo","year":"2013"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/9780470544785"},{"key":"ref4","first-page":"393","article-title":"Reinforcement learning methods for continuous-time Markov decision problems","volume":"7","author":"bradtke","year":"1994","journal-title":"Adv Neural Inform Process"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2014.2313655"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2013.2253094"},{"key":"ref6","author":"hairer","year":"2008","journal-title":"Solving Ordinary Differential Equations I Non-Stiff Problems"},{"key":"ref29","author":"smart","year":"1973","journal-title":"Fixed Point Theorems"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.1994.735224"},{"key":"ref8","doi-asserted-by":"crossref","first-page":"237","DOI":"10.1613\/jair.301","article-title":"Reinforcement learning: A survey","volume":"4","author":"kaelbling","year":"1996","journal-title":"J Artif Intell Res"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"2699","DOI":"10.1016\/j.automatica.2012.06.096","article-title":"Computational adaptive optimal control for continuous time linear systems with completely unknown dynamics","volume":"48","author":"jiang","year":"2012","journal-title":"Automatica"},{"key":"ref2","author":"bertsekas","year":"1995","journal-title":"Dynamic Programming and Optimal Control"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.1968.1098829"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICNN.1994.374604"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2009.5399753"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2014.2317301"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2013.09.043"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2001.946342"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCC.2002.801727"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1002\/9780470182963"},{"key":"ref25","article-title":"Numerical schemes for the continuous Q-function of reinforcement learning","author":"pareigis","year":"1997"}],"container-title":["IEEE Transactions on Cybernetics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6221036\/7008577\/06822502.pdf?arnumber=6822502","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,6,6]],"date-time":"2024-06-06T15:00:21Z","timestamp":1717686021000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/6822502\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2015,2]]},"references-count":38,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tcyb.2014.2322116","relation":{},"ISSN":["2168-2267","2168-2275"],"issn-type":[{"value":"2168-2267","type":"print"},{"value":"2168-2275","type":"electronic"}],"subject":[],"published":{"date-parts":[[2015,2]]}}}