{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,15]],"date-time":"2026-04-15T18:54:14Z","timestamp":1776279254054,"version":"3.50.1"},"reference-count":191,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2021,4,20]],"date-time":"2021-04-20T00:00:00Z","timestamp":1618876800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,4,20]],"date-time":"2021-04-20T00:00:00Z","timestamp":1618876800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Artif Intell Rev"],"published-print":{"date-parts":[[2022,2]]},"DOI":"10.1007\/s10462-021-09997-9","type":"journal-article","created":{"date-parts":[[2021,4,20]],"date-time":"2021-04-20T08:04:02Z","timestamp":1618905842000},"page":"945-990","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":304,"title":["Reinforcement learning in robotic applications: a comprehensive survey"],"prefix":"10.1007","volume":"55","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2494-0710","authenticated-orcid":false,"given":"Bharat","family":"Singh","sequence":"first","affiliation":[]},{"given":"Rajesh","family":"Kumar","sequence":"additional","affiliation":[]},{"given":"Vinay Pratap","family":"Singh","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,4,20]]},"reference":[{"issue":"4","key":"9997_CR1","doi-asserted-by":"crossref","first-page":"485","DOI":"10.1109\/5326.897075","volume":"30","author":"O Abul","year":"2000","unstructured":"Abul O, Polat F, Alhajj R (2000) Multiagent reinforcement learning using function approximation. IEEE Trans Syst Man Cybern Part C (Appl Rev) 30(4):485\u2013497","journal-title":"IEEE Trans Syst Man Cybern Part C (Appl Rev)"},{"issue":"2","key":"9997_CR2","doi-asserted-by":"crossref","first-page":"201","DOI":"10.1109\/TSMCC.2011.2106494","volume":"42","author":"S Adam","year":"2011","unstructured":"Adam S, Busoniu L, Babuska R (2011) Experience replay for real-time reinforcement learning control. IEEE Trans Syst Man Cybern Part C (Appl Rev) 42(2):201\u2013212","journal-title":"IEEE Trans Syst Man Cybern Part C (Appl Rev)"},{"issue":"1","key":"9997_CR3","doi-asserted-by":"crossref","first-page":"108","DOI":"10.1109\/LRA.2017.2734247","volume":"3","author":"Y Ansari","year":"2017","unstructured":"Ansari Y, Manti M, Falotico E, Cianchetti M, Laschi C (2017) Multiobjective optimization for stiffness and position control in a soft robot arm module. IEEE Robot Autom Lett 3(1):108\u2013115","journal-title":"IEEE Robot Autom Lett"},{"issue":"4","key":"9997_CR4","doi-asserted-by":"crossref","first-page":"763","DOI":"10.1109\/TNNLS.2014.2323247","volume":"26","author":"EA Antonelo","year":"2014","unstructured":"Antonelo EA, Schrauwen B (2014) On learning navigation behaviors for small mobile robots with reservoir computing architectures. IEEE Trans Neural Netw Learn Syst 26(4):763\u2013780","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"issue":"6","key":"9997_CR5","doi-asserted-by":"crossref","first-page":"26","DOI":"10.1109\/MSP.2017.2743240","volume":"34","author":"K Arulkumaran","year":"2017","unstructured":"Arulkumaran K, Deisenroth MP, Brundage M, Bharath AA (2017) Deep reinforcement learning: a brief survey. IEEE Signal Process Mag 34(6):26\u201338","journal-title":"IEEE Signal Process Mag"},{"issue":"4","key":"9997_CR6","doi-asserted-by":"crossref","first-page":"505","DOI":"10.1038\/nn.4506","volume":"20","author":"BB Averbeck","year":"2017","unstructured":"Averbeck BB, Costa VD (2017) Motivational neural circuits underlying reinforcement learning. Nat Neurosci 20(4):505\u2013512","journal-title":"Nat Neurosci"},{"key":"9997_CR7","doi-asserted-by":"crossref","unstructured":"Baird L (1995) Residual algorithms: reinforcement learning with function approximation. In: Machine learning proceedings 1995, Elsevier, pp 30\u201337","DOI":"10.1016\/B978-1-55860-377-6.50013-X"},{"key":"9997_CR8","unstructured":"Baird\u00a0III LC, Moore AW (1999) Gradient descent for general reinforcement learning. In: Advances in neural information processing systems, pp 968\u2013974"},{"key":"9997_CR9","doi-asserted-by":"crossref","first-page":"834","DOI":"10.1109\/TSMC.1983.6313077","volume":"5","author":"AG Barto","year":"1983","unstructured":"Barto AG, Sutton RS, Anderson CW (1983) Neuron like adaptive elements that can solve difficult learning control problems. IEEE Trans Syst Man Cybern 5:834\u2013846","journal-title":"IEEE Trans Syst Man Cybern"},{"key":"9997_CR10","doi-asserted-by":"crossref","unstructured":"Bejar E, Moran A (2019) A preview neuro-fuzzy controller based on deep reinforcement learning for backing up a truck-trailer vehicle. In: 2019 IEEE canadian conference of electrical and computer engineering (CCECE), IEEE, pp 1\u20134","DOI":"10.1109\/CCECE.2019.8861534"},{"issue":"3","key":"9997_CR11","doi-asserted-by":"crossref","first-page":"464","DOI":"10.1109\/21.364859","volume":"25","author":"HR Beom","year":"1995","unstructured":"Beom HR, Cho HS (1995) A sensor-based navigation for a mobile robot using fuzzy logic and reinforcement learning. IEEE Trans Syst Man Cybern 25(3):464\u2013477","journal-title":"IEEE Trans Syst Man Cybern"},{"key":"9997_CR12","volume-title":"Dynamic programming and optimal control","author":"DP Bertsekas","year":"1995","unstructured":"Bertsekas DP (1995) Dynamic programming and optimal control. Athena scientific, Belmont"},{"issue":"1","key":"9997_CR13","first-page":"1","volume":"6","author":"DP Bertsekas","year":"2018","unstructured":"Bertsekas DP (2018) Feature-based aggregation and deep reinforcement learning: a survey and some new implementations. IEEE\/CAA J Autom Sin 6(1):1\u201331","journal-title":"IEEE\/CAA J Autom Sin"},{"issue":"4","key":"9997_CR14","doi-asserted-by":"crossref","first-page":"353","DOI":"10.1007\/s13218-015-0356-1","volume":"29","author":"W B\u00f6hmer","year":"2015","unstructured":"B\u00f6hmer W, Springenberg JT, Boedecker J, Riedmiller M, Obermayer K (2015) Autonomous learning of state representations for control: an emerging field aims to autonomously learn state representations for reinforcement learning agents from their real-world sensor observations. KI-K\u00fcnstliche Intelligenz 29(4):353\u2013362","journal-title":"KI-K\u00fcnstliche Intelligenz"},{"issue":"3","key":"9997_CR15","doi-asserted-by":"crossref","first-page":"288","DOI":"10.1109\/3477.931510","volume":"31","author":"A Bonarini","year":"2001","unstructured":"Bonarini A, Bonacina C, Matteucci M (2001) An approach to the design of reinforcement functions in real world, agent-based applications. IEEE Trans Syst Man Cybern Part B (Cybern) 31(3):288\u2013301","journal-title":"IEEE Trans Syst Man Cybern Part B (Cybern)"},{"key":"9997_CR16","first-page":"1021","volume":"17","author":"M Bowling","year":"2001","unstructured":"Bowling M, Veloso M (2001) Rational and convergent learning in stochastic games. Int Joint Conf Artif Intell 17:1021\u20131026","journal-title":"Int Joint Conf Artif Intell"},{"issue":"2","key":"9997_CR17","doi-asserted-by":"crossref","first-page":"215","DOI":"10.1016\/S0004-3702(02)00121-2","volume":"136","author":"M Bowling","year":"2002","unstructured":"Bowling M, Veloso M (2002) Multiagent learning using a variable learning rate. Artif Intell 136(2):215\u2013250","journal-title":"Artif Intell"},{"issue":"2\u20133","key":"9997_CR18","doi-asserted-by":"crossref","first-page":"233","DOI":"10.1023\/A:1017936530646","volume":"49","author":"JA Boyan","year":"2002","unstructured":"Boyan JA (2002) Technical update: least-squares temporal difference learning. Mach Learn 49(2\u20133):233\u2013246","journal-title":"Mach Learn"},{"key":"9997_CR19","doi-asserted-by":"crossref","unstructured":"Bradtke SJ, Ydstie BE, Barto AG (1994) Adaptive linear quadratic control using policy iteration. In: Proceedings of 1994 American control conference-ACC\u201994, IEEE, vol\u00a03, pp 3475\u20133479","DOI":"10.1109\/ACC.1994.735224"},{"issue":"2","key":"9997_CR20","doi-asserted-by":"crossref","first-page":"1549","DOI":"10.1109\/LRA.2019.2896467","volume":"4","author":"M Breyer","year":"2019","unstructured":"Breyer M, Furrer F, Novkovic T, Siegwart R, Nieto J (2019) Comparing task simplifications to learn closed-loop object picking using deep reinforcement learning. IEEE Robot Autom Lett 4(2):1549\u20131556","journal-title":"IEEE Robot Autom Lett"},{"issue":"2","key":"9997_CR22","doi-asserted-by":"crossref","first-page":"156","DOI":"10.1109\/TSMCC.2007.913919","volume":"38","author":"L Bu","year":"2008","unstructured":"Bu L, Babu R, De Schutter B et al (2008) A comprehensive survey of multiagent reinforcement learning. IEEE Trans Syst Man Cybern Part C (Appl Rev) 38(2):156\u2013172","journal-title":"IEEE Trans Syst Man Cybern Part C (Appl Rev)"},{"issue":"7","key":"9997_CR23","doi-asserted-by":"crossref","first-page":"1457","DOI":"10.1109\/TNNLS.2015.2442233","volume":"27","author":"W Caarls","year":"2015","unstructured":"Caarls W, Schuitema E (2015) Parallel online temporal difference learning for motor control. IEEE Trans Neural Netw Learn Syst 27(7):1457\u20131468","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"9997_CR24","doi-asserted-by":"crossref","first-page":"96549","DOI":"10.1109\/ACCESS.2019.2929120","volume":"7","author":"X Cao","year":"2019","unstructured":"Cao X, Sun C, Yan M (2019) Target search control of auv in underwater environment with deep reinforcement learning. IEEE Access 7:96549\u201396559","journal-title":"IEEE Access"},{"key":"9997_CR25","doi-asserted-by":"crossref","first-page":"183","DOI":"10.1016\/j.eswa.2017.03.002","volume":"80","author":"I Carlucho","year":"2017","unstructured":"Carlucho I, De Paula M, Villar SA, Acosta GG (2017) Incremental q-learning strategy for adaptive pid control of mobile robots. Expert Syst Appl 80:183\u2013199","journal-title":"Expert Syst Appl"},{"key":"9997_CR26","doi-asserted-by":"crossref","first-page":"71","DOI":"10.1016\/j.robot.2018.05.016","volume":"107","author":"I Carlucho","year":"2018","unstructured":"Carlucho I, De Paula M, Wang S, Petillot Y, Acosta GG (2018) Adaptive low-level control of autonomous underwater vehicles using deep reinforcement learning. Robot Auton Syst 107:71\u201386","journal-title":"Robot Auton Syst"},{"issue":"4","key":"9997_CR27","doi-asserted-by":"crossref","first-page":"3774","DOI":"10.1109\/LRA.2019.2929996","volume":"4","author":"G Chalvatzaki","year":"2019","unstructured":"Chalvatzaki G, Papageorgiou XS, Maragos P, Tzafestas CS (2019) Learn to adapt to human walking: a model-based reinforcement learning approach for a robotic assistant rollator. IEEE Robot Autom Lett 4(4):3774\u20133781","journal-title":"IEEE Robot Autom Lett"},{"key":"9997_CR28","doi-asserted-by":"crossref","first-page":"63","DOI":"10.1016\/j.neucom.2017.06.066","volume":"272","author":"Y Cheng","year":"2018","unstructured":"Cheng Y, Zhang W (2018) Concise deep reinforcement learning obstacle avoidance for underactuated unmanned marine vessels. Neurocomputing 272:63\u201373","journal-title":"Neurocomputing"},{"issue":"3","key":"9997_CR29","doi-asserted-by":"crossref","first-page":"602","DOI":"10.1109\/TRO.2018.2808924","volume":"34","author":"A Colom\u00e9","year":"2018","unstructured":"Colom\u00e9 A, Torras C (2018) Dimensionality reduction for dynamic movement primitives and application to bimanual manipulation of clothes. IEEE Trans Robot 34(3):602\u2013615","journal-title":"IEEE Trans Robot"},{"issue":"4","key":"9997_CR30","doi-asserted-by":"crossref","first-page":"271","DOI":"10.1109\/TCDS.2016.2543839","volume":"8","author":"F Cruz","year":"2016","unstructured":"Cruz F, Magg S, Weber C, Wermter S (2016) Training agents with interactive reinforcement learning and contextual affordances. IEEE Trans Cogn Dev Syst 8(4):271\u2013284","journal-title":"IEEE Trans Cogn Dev Syst"},{"issue":"3","key":"9997_CR31","doi-asserted-by":"crossref","first-page":"655","DOI":"10.1109\/TRO.2015.2419431","volume":"31","author":"M Cutler","year":"2015","unstructured":"Cutler M, Walsh TJ, How JP (2015) Real-world reinforcement learning via multifidelity simulators. IEEE Trans Robot 31(3):655\u2013671","journal-title":"IEEE Trans Robot"},{"key":"9997_CR32","unstructured":"Da\u00a0Silva B, Konidaris G, Barto A (2012) Learning parameterized skills. Preprint arXiv:12066398"},{"issue":"3","key":"9997_CR33","doi-asserted-by":"crossref","first-page":"285","DOI":"10.1109\/TITS.2005.853698","volume":"6","author":"X Dai","year":"2005","unstructured":"Dai X, Li CK, Rad AB (2005) An approach to tune fuzzy controllers based on reinforcement learning for autonomous vehicle control. IEEE Trans Intell Transp Syst 6(3):285\u2013293","journal-title":"IEEE Trans Intell Transp Syst"},{"issue":"2","key":"9997_CR34","doi-asserted-by":"crossref","first-page":"185","DOI":"10.1016\/j.conb.2008.08.003","volume":"18","author":"P Dayan","year":"2008","unstructured":"Dayan P, Niv Y (2008) Reinforcement learning: the good, the bad and the ugly. Curr Opin Neurobiol 18(2):185\u2013196","journal-title":"Curr Opin Neurobiol"},{"issue":"3","key":"9997_CR21","doi-asserted-by":"crossref","first-page":"1394","DOI":"10.1109\/LRA.2018.2800101","volume":"3","author":"T de Bruin","year":"2018","unstructured":"de Bruin T, Kober J, Tuyls K, Babu\u0161ka R (2018) Integrating state representation learning into deep reinforcement learning. IEEE Robot Autom Lett 3(3):1394\u20131401","journal-title":"IEEE Robot Autom Lett"},{"key":"9997_CR35","unstructured":"Deisenroth M, Rasmussen CE (2011) Pilco: a model-based and data-efficient approach to policy search. In: Proceedings of the 28th international conference on machine learning (ICML-11), pp 465\u2013472"},{"issue":"2","key":"9997_CR36","doi-asserted-by":"crossref","first-page":"408","DOI":"10.1109\/TPAMI.2013.218","volume":"37","author":"MP Deisenroth","year":"2013","unstructured":"Deisenroth MP, Fox D, Rasmussen CE (2013a) Gaussian processes for data-efficient learning in robotics and control. IEEE Trans Pattern Anal Mach Intell 37(2):408\u2013423","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"1\u20132","key":"9997_CR37","first-page":"1","volume":"2","author":"MP Deisenroth","year":"2013","unstructured":"Deisenroth MP, Neumann G, Peters J et al (2013b) A survey on policy search for robotics. Found Trends Robot 2(1\u20132):1\u2013142","journal-title":"Found Trends Robot"},{"issue":"1","key":"9997_CR38","doi-asserted-by":"crossref","first-page":"26","DOI":"10.1109\/TCDS.2017.2718938","volume":"11","author":"Z Deng","year":"2017","unstructured":"Deng Z, Guan H, Huang R, Liang H, Zhang L, Zhang J (2017) Combining model-based $$q$$-learning with structural knowledge transfer for robot skill learning. IEEE Trans Cogn Dev Syst 11(1):26\u201335","journal-title":"IEEE Trans Cogn Dev Syst"},{"issue":"1","key":"9997_CR39","doi-asserted-by":"crossref","first-page":"86","DOI":"10.1109\/TMECH.2010.2090896","volume":"17","author":"D Dong","year":"2010","unstructured":"Dong D, Chen C, Chu J, Tarn TJ (2010) Robust quantum-inspired reinforcement learning for robot navigation. IEEE\/ASME Trans Mech 17(1):86\u201397","journal-title":"IEEE\/ASME Trans Mech"},{"issue":"12","key":"9997_CR40","doi-asserted-by":"crossref","first-page":"2719","DOI":"10.1109\/TCYB.2014.2314294","volume":"44","author":"B Doroodgar","year":"2014","unstructured":"Doroodgar B, Liu Y, Nejat G (2014) A learning-based semi-autonomous controller for robotic exploration of unknown disaster scenes while searching for victims. IEEE Trans Cybern 44(12):2719\u20132732","journal-title":"IEEE Trans Cybern"},{"issue":"2","key":"9997_CR41","doi-asserted-by":"crossref","first-page":"394","DOI":"10.1109\/TPAMI.2013.191","volume":"37","author":"F Doshi-Velez","year":"2013","unstructured":"Doshi-Velez F, Pfau D, Wood F, Roy N (2013) Bayesian nonparametric methods for partially-observable reinforcement learning. IEEE Trans Pattern Anal Mach Intell 37(2):394\u2013407","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"3","key":"9997_CR42","doi-asserted-by":"crossref","first-page":"193","DOI":"10.1007\/s10462-011-9244-8","volume":"38","author":"Y Duan","year":"2012","unstructured":"Duan Y, Cui BX, Xu XH (2012) A multi-agent reinforcement learning approach to robot soccer. Artif Intell Rev 38(3):193\u2013211","journal-title":"Artif Intell Rev"},{"issue":"3","key":"9997_CR43","doi-asserted-by":"crossref","first-page":"271","DOI":"10.1016\/j.robot.2012.11.009","volume":"61","author":"A El-Fakdi","year":"2013","unstructured":"El-Fakdi A, Carreras M (2013) Two-step gradient-based reinforcement learning for underwater robotics behavior learning. Robot Auton Syst 61(3):271\u2013282","journal-title":"Robot Auton Syst"},{"issue":"3","key":"9997_CR44","doi-asserted-by":"crossref","first-page":"898","DOI":"10.1109\/TIE.2005.847576","volume":"52","author":"MJ Er","year":"2005","unstructured":"Er MJ, Deng C (2005) Obstacle avoidance of a mobile robot using hybrid learning approach. IEEE Trans Ind Electron 52(3):898\u2013905","journal-title":"IEEE Trans Ind Electron"},{"issue":"3","key":"9997_CR45","doi-asserted-by":"crossref","first-page":"1482","DOI":"10.1109\/LRA.2018.2800110","volume":"3","author":"P Falco","year":"2018","unstructured":"Falco P, Attawia A, Saveriano M, Lee D (2018) On policy learning robust to irreversible events: an application to robotic in-hand manipulation. IEEE Robot Autom Lett 3(3):1482\u20131489","journal-title":"IEEE Robot Autom Lett"},{"issue":"1","key":"9997_CR46","doi-asserted-by":"crossref","first-page":"23","DOI":"10.1109\/TEVC.2009.2016216","volume":"14","author":"AM Farahmand","year":"2009","unstructured":"Farahmand AM, Ahmadabadi MN, Lucas C, Araabi BN (2009) Interaction of culture-based learning and cooperative co-evolution and its application to automatic behavior-based system design. IEEE Trans Evol Comput 14(1):23\u201357","journal-title":"IEEE Trans Evol Comput"},{"issue":"3","key":"9997_CR47","doi-asserted-by":"crossref","first-page":"323","DOI":"10.1109\/JAS.2014.7004690","volume":"1","author":"A Faust","year":"2014","unstructured":"Faust A, Ruymgaart P, Salman M, Fierro R, Tapia L (2014) Continuous action reinforcement learning for control-affine systems with unknown dynamics. IEEE\/CAA J Autom Sin 1(3):323\u2013336","journal-title":"IEEE\/CAA J Autom Sin"},{"key":"9997_CR48","doi-asserted-by":"crossref","unstructured":"Foglino F, Christakou CC, Leonetti M (2019) An optimization framework for task sequencing in curriculum learning. In: 2019 Joint IEEE 9th international conference on development and learning and epigenetic robotics (ICDL-EpiRob), IEEE, pp 207\u2013214","DOI":"10.1109\/DEVLRN.2019.8850690"},{"key":"9997_CR49","doi-asserted-by":"crossref","unstructured":"Frost G, Maurelli F, Lane DM (2015) Reinforcement learning in a behaviour-based control architecture for marine archaeology. In: OCEANS 2015-Genova, IEEE, pp 1\u20135","DOI":"10.1109\/OCEANS-Genova.2015.7271619"},{"issue":"5","key":"9997_CR50","doi-asserted-by":"crossref","first-page":"2111","DOI":"10.1109\/TIE.2008.921205","volume":"55","author":"C Fu","year":"2008","unstructured":"Fu C, Chen K (2008) Gait synthesis and sensory control of stair climbing for a humanoid robot. IEEE Trans Ind Electron 55(5):2111\u20132120","journal-title":"IEEE Trans Ind Electron"},{"key":"9997_CR51","doi-asserted-by":"crossref","unstructured":"Gordon GJ (1995) Stable function approximation in dynamic programming. In: Machine learning proceedings 1995, Elsevier, pp 261\u2013268","DOI":"10.1016\/B978-1-55860-377-6.50040-2"},{"issue":"2","key":"9997_CR52","doi-asserted-by":"crossref","first-page":"178","DOI":"10.1287\/ijoc.1080.0305","volume":"21","author":"A Gosavi","year":"2009","unstructured":"Gosavi A (2009) Reinforcement learning: a tutorial survey and recent advances. INFORMS J Comput 21(2):178\u2013192","journal-title":"INFORMS J Comput"},{"issue":"4","key":"9997_CR53","doi-asserted-by":"crossref","first-page":"4394","DOI":"10.1109\/LRA.2019.2932575","volume":"4","author":"SK Gottipati","year":"2019","unstructured":"Gottipati SK, Seo K, Bhatt D, Mai V, Murthy K, Paull L (2019) Deep active localization. IEEE Robot Autom Lett 4(4):4394\u20134401","journal-title":"IEEE Robot Autom Lett"},{"key":"9997_CR54","first-page":"242","volume":"3","author":"A Greenwald","year":"2003","unstructured":"Greenwald A, Hall K, Serrano R (2003) Correlated q-learning. ICML 3:242\u2013249","journal-title":"ICML"},{"key":"9997_CR55","doi-asserted-by":"crossref","unstructured":"Grigorescu S, Trasnea B, Marina L, Vasilcoi A, Cocias T (2019) Neurotrajectory: a neuroevolutionary approach to local state trajectory learning for autonomous vehicles. Preprint arXiv:190610971","DOI":"10.1109\/LRA.2019.2926224"},{"issue":"6","key":"9997_CR56","doi-asserted-by":"crossref","first-page":"1291","DOI":"10.1109\/TSMCC.2012.2218595","volume":"42","author":"I Grondman","year":"2012","unstructured":"Grondman I, Busoniu L, Lopes GA, Babuska R (2012) A survey of actor-critic reinforcement learning: standard and natural policy gradients. IEEE Trans Syst Man Cybern Part C (Appl Rev) 42(6):1291\u20131307","journal-title":"IEEE Trans Syst Man Cybern Part C (Appl Rev)"},{"issue":"4","key":"9997_CR57","doi-asserted-by":"crossref","first-page":"670","DOI":"10.1109\/TSMCC.2007.897491","volume":"37","author":"D Gu","year":"2007","unstructured":"Gu D, Hu H (2007) Integration of coordination architecture and behavior fuzzy learning in quadruped walking robots. IEEE Trans Syst Man Cybern Part C (Appl Rev) 37(4):670\u2013681","journal-title":"IEEE Trans Syst Man Cybern Part C (Appl Rev)"},{"issue":"6","key":"9997_CR58","doi-asserted-by":"crossref","first-page":"671","DOI":"10.1016\/0893-6080(90)90056-Q","volume":"3","author":"V Gullapalli","year":"1990","unstructured":"Gullapalli V (1990) A stochastic reinforcement learning algorithm for learning real-valued functions. Neural Netw 3(6):671\u2013692","journal-title":"Neural Netw"},{"issue":"5","key":"9997_CR59","doi-asserted-by":"crossref","first-page":"2140","DOI":"10.1109\/TSMCB.2004.832154","volume":"34","author":"M Guo","year":"2004","unstructured":"Guo M, Liu Y, Malec J (2004) A new q-learning algorithm based on the metropolis criterion. IEEE Trans Syst Man Cybern Part B (Cybern) 34(5):2140\u20132143","journal-title":"IEEE Trans Syst Man Cybern Part B (Cybern)"},{"issue":"2","key":"9997_CR60","doi-asserted-by":"crossref","first-page":"159","DOI":"10.1162\/106365601750190398","volume":"9","author":"N Hansen","year":"2001","unstructured":"Hansen N, Ostermeier A (2001) Completely derandomized self-adaptation in evolution strategies. Evol Comput 9(2):159\u2013195","journal-title":"Evol Comput"},{"issue":"6","key":"9997_CR61","doi-asserted-by":"crossref","first-page":"1123","DOI":"10.1109\/41.807999","volume":"46","author":"Y Hasegawa","year":"1999","unstructured":"Hasegawa Y, Fukuda T, Shimojima K (1999) Self-scaling reinforcement learning for fuzzy logic controller-applications to motion control of two-link brachiation robot. IEEE Trans Ind Electron 46(6):1123\u20131131","journal-title":"IEEE Trans Ind Electron"},{"issue":"2","key":"9997_CR62","doi-asserted-by":"crossref","first-page":"2172","DOI":"10.1109\/LRA.2019.2900768","volume":"4","author":"M Hazara","year":"2019","unstructured":"Hazara M, Kyrki V (2019) Transferring generalizable motor primitives from simulation to real world. IEEE Robot Autom Lett 4(2):2172\u20132179","journal-title":"IEEE Robot Autom Lett"},{"issue":"4","key":"9997_CR63","doi-asserted-by":"crossref","first-page":"602","DOI":"10.1109\/JAS.2017.7510604","volume":"4","author":"W He","year":"2017","unstructured":"He W, Li Z, Chen CP (2017) A survey of human-centered intelligent robots: issues and challenges. IEEE\/CAA J Autom Sin 4(4):602\u2013609","journal-title":"IEEE\/CAA J Autom Sin"},{"key":"9997_CR64","doi-asserted-by":"crossref","unstructured":"Heidrich-Meisner V, Igel C (2008) Evolution strategies for direct policy search. In: International conference on parallel problem solving from nature, Springer, pp 428\u2013437","DOI":"10.1007\/978-3-540-87700-4_43"},{"key":"9997_CR65","unstructured":"Ho MK, Littman ML, Cushman F, Austerweil JL (2015) Teaching with rewards and punishments: Reinforcement or communication? In: CogSci"},{"key":"9997_CR66","doi-asserted-by":"crossref","unstructured":"Hu H, Song S, Chen CP (2019) Plume tracing via model-free reinforcement learning method. IEEE Trans Neural Netw Learn Syst","DOI":"10.1109\/TNNLS.2018.2885374"},{"key":"9997_CR67","first-page":"1039","volume":"4","author":"J Hu","year":"2003","unstructured":"Hu J, Wellman MP (2003) Nash q-learning for general-sum stochastic games. J Mach Learn Res 4:1039\u20131069","journal-title":"J Mach Learn Res"},{"key":"9997_CR68","doi-asserted-by":"crossref","unstructured":"Hu J, Zhang H, Song L (2018) Reinforcement learning for decentralized trajectory design in cellular uav networks with sense-and-send protocol. IEEE Internet of Things Journal","DOI":"10.1109\/JIOT.2018.2876513"},{"key":"9997_CR69","doi-asserted-by":"crossref","unstructured":"Huang R, Cheng H, Qiu J, Zhang J (2019) Learning physical human\u2013robot interaction with coupled cooperative primitives for a lower exoskeleton. IEEE Trans Autom Scie Eng","DOI":"10.1109\/TASE.2018.2886376"},{"issue":"4","key":"9997_CR70","doi-asserted-by":"crossref","first-page":"730","DOI":"10.1109\/TSMC.2017.2712561","volume":"49","author":"Z Huang","year":"2017","unstructured":"Huang Z, Xu X, He H, Tan J, Sun Z (2017) Parameterized batch reinforcement learning for longitudinal control of autonomous land vehicles. IEEE Trans Syst Man Cybern Syst 49(4):730\u2013741","journal-title":"IEEE Trans Syst Man Cybern Syst"},{"issue":"1","key":"9997_CR71","doi-asserted-by":"crossref","first-page":"186","DOI":"10.1109\/TCYB.2015.2509646","volume":"47","author":"SM Hung","year":"2016","unstructured":"Hung SM, Givigi SN (2016) A q-learning approach to flocking with uavs in a stochastic environment. IEEE Trans Cybern 47(1):186\u2013197","journal-title":"IEEE Trans Cybern"},{"issue":"8","key":"9997_CR72","doi-asserted-by":"crossref","first-page":"2797","DOI":"10.1109\/TIM.2009.2016301","volume":"58","author":"KS Hwang","year":"2009","unstructured":"Hwang KS, Lo CY, Liu WL (2009) A modular agent architecture for an autonomous robot. IEEE Trans Instrum Meas 58(8):2797\u20132806","journal-title":"IEEE Trans Instrum Meas"},{"issue":"12","key":"9997_CR73","doi-asserted-by":"crossref","first-page":"1481","DOI":"10.1109\/TSMC.2015.2418321","volume":"45","author":"KS Hwang","year":"2015","unstructured":"Hwang KS, Lin JL, Yeh KH (2015) Learning to adjust and refine gait patterns for a biped robot. IEEE Trans Syst Man Cybern Syst 45(12):1481\u20131490","journal-title":"IEEE Trans Syst Man Cybern Syst"},{"issue":"4","key":"9997_CR74","doi-asserted-by":"crossref","first-page":"2096","DOI":"10.1109\/LRA.2017.2720851","volume":"2","author":"J Hwangbo","year":"2017","unstructured":"Hwangbo J, Sa I, Siegwart R, Hutter M (2017) Control of a quadrotor with reinforcement learning. IEEE Robot Autom Lett 2(4):2096\u20132103","journal-title":"IEEE Robot Autom Lett"},{"issue":"4","key":"9997_CR75","doi-asserted-by":"crossref","first-page":"792","DOI":"10.1109\/TNN.2004.828760","volume":"15","author":"K Iwata","year":"2004","unstructured":"Iwata K, Ikeda K, Sakai H (2004) A new criterion using information gain for action selection strategy in reinforcement learning. IEEE Trans Neural Netw 15(4):792\u2013799","journal-title":"IEEE Trans Neural Netw"},{"issue":"10","key":"9997_CR76","doi-asserted-by":"crossref","first-page":"3931","DOI":"10.1109\/TIE.2009.2017557","volume":"56","author":"CF Juang","year":"2009","unstructured":"Juang CF, Hsu CH (2009) Reinforcement ant optimized fuzzy controller for mobile-robot wall-following control. IEEE Trans Ind Electron 56(10):3931\u20133940","journal-title":"IEEE Trans Ind Electron"},{"key":"9997_CR77","doi-asserted-by":"crossref","first-page":"237","DOI":"10.1613\/jair.301","volume":"4","author":"LP Kaelbling","year":"1996","unstructured":"Kaelbling LP, Littman ML, Moore AW (1996) Reinforcement learning: a survey. J Artif Intell Res 4:237\u2013285","journal-title":"J Artif Intell Res"},{"issue":"3","key":"9997_CR78","doi-asserted-by":"crossref","first-page":"318","DOI":"10.1109\/TEVC.2005.850290","volume":"9","author":"S Kamio","year":"2005","unstructured":"Kamio S, Iba H (2005) Adaptation technique for integrating genetic programming and reinforcement learning for real robots. IEEE Trans Evol Comput 9(3):318\u2013333","journal-title":"IEEE Trans Evol Comput"},{"issue":"4","key":"9997_CR79","doi-asserted-by":"crossref","first-page":"881","DOI":"10.1109\/TCDS.2018.2843122","volume":"10","author":"M Khamassi","year":"2018","unstructured":"Khamassi M, Velentzas G, Tsitsimis T, Tzafestas C (2018) Robot fast adaptation to changes in human engagement during simulated dynamic social interaction with active exploration in parameterized reinforcement learning. IEEE Trans Cogn Dev Syst 10(4):881\u2013893","journal-title":"IEEE Trans Cogn Dev Syst"},{"issue":"2","key":"9997_CR80","first-page":"433","volume":"40","author":"B Kim","year":"2009","unstructured":"Kim B, Park J, Park S, Kang S (2009) Impedance learning for robotic contact tasks using natural actor-critic algorithm. IEEE Trans Syst Man Cybern Part B (Cybern) 40(2):433\u2013443","journal-title":"IEEE Trans Syst Man Cybern Part B (Cybern)"},{"issue":"6","key":"9997_CR81","doi-asserted-by":"crossref","first-page":"2042","DOI":"10.1109\/TNNLS.2017.2773458","volume":"29","author":"B Kiumarsi","year":"2017","unstructured":"Kiumarsi B, Vamvoudakis KG, Modares H, Lewis FL (2017) Optimal and autonomous control using reinforcement learning: a survey. IEEE Trans Neural Netw Learn Syst 29(6):2042\u20132062","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"9997_CR82","doi-asserted-by":"crossref","first-page":"171","DOI":"10.1007\/s10994-010-5223-6","volume":"84","author":"J Kober","year":"2011","unstructured":"Kober J, Peters J (2011) Policy search for motor primitives in robotics. Mach Learn 84:171\u2013203","journal-title":"Mach Learn"},{"issue":"11","key":"9997_CR83","doi-asserted-by":"crossref","first-page":"1238","DOI":"10.1177\/0278364913495721","volume":"32","author":"J Kober","year":"2013","unstructured":"Kober J, Bagnell JA, Peters J (2013) Reinforcement learning in robotics: a survey. Int J Robot Res 32(11):1238\u20131274","journal-title":"Int J Robot Res"},{"issue":"2","key":"9997_CR84","doi-asserted-by":"crossref","first-page":"1784","DOI":"10.1109\/LRA.2019.2896466","volume":"4","author":"O Ko\u00e7","year":"2019","unstructured":"Ko\u00e7 O, Peters J (2019) Learning to serve: an experimental study for a new learning from demonstrations framework. IEEE Robot Autom Lett 4(2):1784\u20131791","journal-title":"IEEE Robot Autom Lett"},{"key":"9997_CR85","unstructured":"Konda VR, Tsitsiklis JN (2000) Actor-critic algorithms. In: Advances in neural information processing systems, pp 1008\u20131014"},{"issue":"3","key":"9997_CR86","doi-asserted-by":"crossref","first-page":"2471","DOI":"10.1109\/LRA.2018.2800106","volume":"3","author":"I Koryakovskiy","year":"2018","unstructured":"Koryakovskiy I, Kudruss M, Vallery H, Babu\u0161ka R, Caarls W (2018) Model-plant mismatch compensation using reinforcement learning. IEEE Robot Autom Lett 3(3):2471\u20132477","journal-title":"IEEE Robot Autom Lett"},{"issue":"1","key":"9997_CR87","first-page":"52","volume":"23","author":"HM La","year":"2014","unstructured":"La HM, Lim R, Sheng W (2014) Multirobot cooperative learning for predator avoidance. IEEE Trans Control Syst Technol 23(1):52\u201363","journal-title":"IEEE Trans Control Syst Technol"},{"issue":"4","key":"9997_CR88","doi-asserted-by":"crossref","first-page":"4224","DOI":"10.1109\/LRA.2019.2930489","volume":"4","author":"NO Lambert","year":"2019","unstructured":"Lambert NO, Drew DS, Yaconelli J, Levine S, Calandra R, Pister KS (2019) Low-level control of a quadrotor with deep model-based reinforcement learning. IEEE Robot Autom Lett 4(4):4224\u20134230","journal-title":"IEEE Robot Autom Lett"},{"issue":"2","key":"9997_CR89","doi-asserted-by":"crossref","first-page":"119","DOI":"10.1007\/s10462-011-9243-9","volume":"38","author":"Y Lasheng","year":"2012","unstructured":"Lasheng Y, Zhongbin J, Kang L (2012) Research on task decomposition and state abstraction in reinforcement learning. Artif Intell Rev 38(2):119\u2013127","journal-title":"Artif Intell Rev"},{"issue":"3","key":"9997_CR90","doi-asserted-by":"crossref","first-page":"2039","DOI":"10.1007\/s10462-017-9579-x","volume":"52","author":"TP Le","year":"2019","unstructured":"Le TP, Ngo VA, Jaramillo PM, Chung T (2019) Importance sampling policy gradient algorithms in reproducing kernel hilbert space. Artif Intell Rev 52(3):2039\u20132059","journal-title":"Artif Intell Rev"},{"key":"9997_CR91","doi-asserted-by":"crossref","unstructured":"Li G, Gomez R, Nakamura K, He B (2019) Human-centered reinforcement learning: a survey. IEEE Trans Hum Mach Syst","DOI":"10.1109\/THMS.2019.2912447"},{"issue":"3","key":"9997_CR92","first-page":"736","volume":"41","author":"THS Li","year":"2010","unstructured":"Li THS, Su YT, Lai SW, Hu JJ (2010) Walking motion generation, synthesis, and control for biped robot by using pgrl, lpi, and fuzzy logic. IEEE Trans Syst Man Cybern Part B (Cybern) 41(3):736\u2013748","journal-title":"IEEE Trans Syst Man Cybern Part B (Cybern)"},{"issue":"10","key":"9997_CR93","doi-asserted-by":"crossref","first-page":"8013","DOI":"10.1109\/TIE.2017.2694391","volume":"64","author":"Z Li","year":"2017","unstructured":"Li Z, Liu J, Huang Z, Peng Y, Pu H, Ding L (2017a) Adaptive impedance control of human-robot cooperation using reinforcement learning. IEEE Trans Ind Electron 64(10):8013\u20138022","journal-title":"IEEE Trans Ind Electron"},{"issue":"1","key":"9997_CR94","doi-asserted-by":"crossref","first-page":"121","DOI":"10.1109\/TMECH.2017.2717461","volume":"23","author":"Z Li","year":"2017","unstructured":"Li Z, Zhao T, Chen F, Hu Y, Su CY, Fukuda T (2017b) Reinforcement learning of manipulation and grasping using dynamical movement primitives for a humanoidlike mobile manipulator. IEEE\/ASME Trans Mech 23(1):121\u2013131","journal-title":"IEEE\/ASME Trans Mech"},{"issue":"6","key":"9997_CR95","first-page":"1033","volume":"25","author":"JL Lin","year":"2013","unstructured":"Lin JL, Hwang KS, Wang YL (2013) A simple scheme for formation control based on weighted behavior learning. IEEE Trans Neural Netw Learn Syst 25(6):1033\u20131044","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"issue":"2","key":"9997_CR96","doi-asserted-by":"crossref","first-page":"87","DOI":"10.1007\/s10462-011-9204-3","volume":"36","author":"Y Lin","year":"2011","unstructured":"Lin Y, Makedon F, Xu Y (2011) Episodic task learning in markov decision processes. Artif Intell Rev 36(2):87\u201398","journal-title":"Artif Intell Rev"},{"issue":"7553","key":"9997_CR97","doi-asserted-by":"crossref","first-page":"445","DOI":"10.1038\/nature14540","volume":"521","author":"ML Littman","year":"2015","unstructured":"Littman ML (2015) Reinforcement learning improves behaviour from evaluative feedback. Nature 521(7553):445\u2013451","journal-title":"Nature"},{"key":"9997_CR98","doi-asserted-by":"crossref","unstructured":"Liu S, Ngiam KY, Feng M (2019) Deep reinforcement learning for clinical decision support: a brief survey. Preprint arXiv:190709475","DOI":"10.2196\/preprints.18477"},{"key":"9997_CR99","unstructured":"Luo B, Liu D, Huang T, Liu J (2017) Output tracking control based on adaptive dynamic programming with multistep policy evaluation. IEEE Trans Syst Man Cybern Syst"},{"issue":"12","key":"9997_CR100","doi-asserted-by":"crossref","first-page":"3337","DOI":"10.1109\/TCYB.2018.2821369","volume":"48","author":"B Luo","year":"2018","unstructured":"Luo B, Yang Y, Liu D (2018) Adaptive q-learning for data-based optimal output regulation with experience replay. IEEE Trans Cybern 48(12):3337\u20133348","journal-title":"IEEE Trans Cybern"},{"issue":"1","key":"9997_CR101","doi-asserted-by":"crossref","first-page":"76","DOI":"10.1109\/TNNLS.2019.2899594","volume":"31","author":"B Luo","year":"2019","unstructured":"Luo B, Yang Y, Liu D, Wu HN (2019) Event-triggered optimal control with performance guarantees using adaptive dynamic programming. IEEE Trans Neural Netw Learn Syst 31(1):76\u201388","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"9997_CR102","doi-asserted-by":"crossref","unstructured":"Lv L, Zhang S, Ding D, Wang Y (2019) Path planning via an improved dqn-based learning policy. IEEE Access","DOI":"10.1109\/ACCESS.2019.2918703"},{"issue":"3\u20134","key":"9997_CR103","doi-asserted-by":"crossref","first-page":"375","DOI":"10.1023\/B:AIRE.0000036264.95672.64","volume":"21","author":"MG Madden","year":"2004","unstructured":"Madden MG, Howley T (2004) Transfer of experience between reinforcement learning environments with progressive difficulty. Artif Intell Rev 21(3\u20134):375\u2013398","journal-title":"Artif Intell Rev"},{"key":"9997_CR104","doi-asserted-by":"crossref","unstructured":"Markova VD, Shopov VK (2019) Knowledge transfer in reinforcement learning agent. In: 2019 international conference on information technologies (InfoTech), IEEE, pp 1\u20134","DOI":"10.1109\/InfoTech.2019.8860881"},{"issue":"1","key":"9997_CR105","doi-asserted-by":"crossref","first-page":"43","DOI":"10.1109\/TCIAIG.2010.2100395","volume":"3","author":"M McPartland","year":"2010","unstructured":"McPartland M, Gallagher M (2010) Reinforcement learning in first person shooter games. IEEE Trans Comput Intell AI Games 3(1):43\u201356","journal-title":"IEEE Trans Comput Intell AI Games"},{"issue":"3","key":"9997_CR106","doi-asserted-by":"crossref","first-page":"474","DOI":"10.1109\/3477.499797","volume":"26","author":"LA Meeden","year":"1996","unstructured":"Meeden LA (1996) An incremental approach to developing intelligent neural network controllers for robots. IEEE Trans Syst Man Cybern Part B (Cybern) 26(3):474\u2013485","journal-title":"IEEE Trans Syst Man Cybern Part B (Cybern)"},{"key":"9997_CR107","doi-asserted-by":"crossref","unstructured":"Melo FS, Meyn SP, Ribeiro MI (2008) An analysis of reinforcement learning with function approximation. In: Proceedings of the 25th international conference on Machine learning, ACM, pp 664\u2013671","DOI":"10.1145\/1390156.1390240"},{"issue":"7540","key":"9997_CR108","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","volume":"518","author":"V Mnih","year":"2015","unstructured":"Mnih V, Kavukcuoglu K, Silver D, Rusu AA, Veness J, Bellemare MG, Graves A, Riedmiller M, Fidjeland AK, Ostrovski G et al (2015a) Human-level control through deep reinforcement learning. Nature 518(7540):529","journal-title":"Nature"},{"issue":"7540","key":"9997_CR109","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","volume":"518","author":"V Mnih","year":"2015","unstructured":"Mnih V, Kavukcuoglu K, Silver D, Rusu AA, Veness J, Bellemare MG, Graves A, Riedmiller M, Fidjeland AK, Ostrovski G et al (2015b) Human-level control through deep reinforcement learning. Nature 518(7540):529\u2013533","journal-title":"Nature"},{"issue":"3","key":"9997_CR110","doi-asserted-by":"crossref","first-page":"655","DOI":"10.1109\/TCYB.2015.2412554","volume":"46","author":"H Modares","year":"2015","unstructured":"Modares H, Ranatunga I, Lewis FL, Popa DO (2015) Optimized assistive human\u2013robot interaction using reinforcement learning. IEEE Trans Cybern 46(3):655\u2013667","journal-title":"IEEE Trans Cybern"},{"issue":"1","key":"9997_CR111","doi-asserted-by":"crossref","first-page":"117","DOI":"10.1109\/TAC.2017.2713339","volume":"63","author":"H Modares","year":"2017","unstructured":"Modares H, Lewis FL, Kang W, Davoudi A (2017) Optimal synchronization of heterogeneous nonlinear systems with unknown dynamics. IEEE Trans Autom Control 63(1):117\u2013131","journal-title":"IEEE Trans Autom Control"},{"key":"9997_CR112","doi-asserted-by":"crossref","unstructured":"Muelling K, Kober J, Peters J (2010) Learning table tennis with a mixture of motor primitives. In: 2010 10th IEEE-RAS international conference on humanoid robots, IEEE, pp 411\u2013416","DOI":"10.1109\/ICHR.2010.5686298"},{"key":"9997_CR113","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-019-0025-4","author":"E Neftci","year":"2019","unstructured":"Neftci E, Averbeck B (2019) Reinforcement learning in artificial and biological systems. Nat Mach Intell. https:\/\/doi.org\/10.1038\/s42256-019-0025-4","journal-title":"Nat Mach Intell"},{"key":"9997_CR114","unstructured":"Neftci EO, Averbeck BB (2002) Reinforcement learning in artificial and biological systems. Environment p\u00a03"},{"key":"9997_CR115","first-page":"278","volume":"99","author":"AY Ng","year":"1999","unstructured":"Ng AY, Harada D, Russell S (1999) Policy invariance under reward transformations: theory and application to reward shaping. ICML 99:278\u2013287","journal-title":"ICML"},{"key":"9997_CR116","doi-asserted-by":"crossref","first-page":"27091","DOI":"10.1109\/ACCESS.2017.2777827","volume":"5","author":"ND Nguyen","year":"2017","unstructured":"Nguyen ND, Nguyen T, Nahavandi S (2017) System design perspective for human-level agents using deep reinforcement le arning: a survey. IEEE Access 5:27091\u201327102","journal-title":"IEEE Access"},{"key":"9997_CR117","unstructured":"Nguyen TT, Nguyen ND, Nahavandi S (2018) Deep reinforcement learning for multi-agent systems: a review of challenges, solutions and applications. Preprint arXiv:181211794"},{"issue":"1","key":"9997_CR118","doi-asserted-by":"crossref","first-page":"19","DOI":"10.1109\/TASE.2014.2349915","volume":"12","author":"R O\u2019Flaherty","year":"2014","unstructured":"O\u2019Flaherty R, Egerstedt M (2014) Low-dimensional learning for complex robots. IEEE Trans Autom Sci Eng 12(1):19\u201327","journal-title":"IEEE Trans Autom Sci Eng"},{"issue":"5","key":"9997_CR119","doi-asserted-by":"crossref","first-page":"1186","DOI":"10.1109\/TRO.2019.2920206","volume":"35","author":"M Ohnishi","year":"2019","unstructured":"Ohnishi M, Wang L, Notomista G, Egerstedt M (2019) Barrier-certified adaptive reinforcement learning with applications to brushbot navigation. IEEE Trans Robot 35(5):1186\u20131205","journal-title":"IEEE Trans Robot"},{"issue":"4","key":"9997_CR120","doi-asserted-by":"crossref","first-page":"695","DOI":"10.1109\/JOE.2012.2205638","volume":"37","author":"N Palomeras","year":"2012","unstructured":"Palomeras N, El-Fakdi A, Carreras M, Ridao P (2012) Cola2: a control architecture for auvs. IEEE J Ocean Eng 37(4):695\u2013716","journal-title":"IEEE J Ocean Eng"},{"key":"9997_CR121","unstructured":"Parunak HVD (1999) Industrial and practical applications of dai. Multiagent systems: a modern approach to distributed artificial intelligence pp 337\u2013421"},{"issue":"4","key":"9997_CR122","doi-asserted-by":"crossref","first-page":"682","DOI":"10.1016\/j.neunet.2008.02.003","volume":"21","author":"J Peters","year":"2008","unstructured":"Peters J, Schaal S (2008) Reinforcement learning of motor skills with policy gradients. Neural netw 21(4):682\u2013697","journal-title":"Neural netw"},{"key":"9997_CR123","doi-asserted-by":"crossref","unstructured":"Peters J, Vijayakumar S, Schaal S (2005) Natural actor-critic. In: European conference on machine learning, Springer, pp 280\u2013291","DOI":"10.1007\/11564096_29"},{"key":"9997_CR124","doi-asserted-by":"crossref","unstructured":"Peters J, Mulling K, Altun Y (2010) Relative entropy policy search. In: Twenty-Fourth AAAI conference on artificial intelligence","DOI":"10.1609\/aaai.v24i1.7727"},{"issue":"9","key":"9997_CR125","doi-asserted-by":"crossref","first-page":"3094","DOI":"10.1109\/TIM.2009.2016880","volume":"58","author":"MG Plaza","year":"2009","unstructured":"Plaza MG, Mart\u00ednez-Mar\u00edn T, Prieto SS, Luna DM (2009) Integration of cell-mapping and reinforcement-learning techniques for motion planning of car-like robots. IEEE Trans Instrum Meas 58(9):3094\u20133103","journal-title":"IEEE Trans Instrum Meas"},{"issue":"2","key":"9997_CR126","doi-asserted-by":"crossref","first-page":"97","DOI":"10.1023\/A:1019935502139","volume":"18","author":"F Polat","year":"2002","unstructured":"Polat F et al (2002) Learning intelligent behavior in a non-stationary and partially observable environment. Artif Intell Rev 18(2):97\u2013115","journal-title":"Artif Intell Rev"},{"key":"9997_CR127","unstructured":"Puterman ML (2014) Markov decision processes: discrete stochastic dynamic programming. John Wiley & Sons"},{"key":"9997_CR128","unstructured":"Rescorla R, Wagner A, Black AH, Prokasy WF (1972) Classical conditioning ii: current research and theory pp 64\u201399"},{"issue":"3","key":"9997_CR129","doi-asserted-by":"crossref","first-page":"223","DOI":"10.1023\/A:1015008417172","volume":"17","author":"C Ribeiro","year":"2002","unstructured":"Ribeiro C (2002) Reinforcement learning agents. Artif Intell Rev 17(3):223\u2013250","journal-title":"Artif Intell Rev"},{"key":"9997_CR130","doi-asserted-by":"crossref","unstructured":"Riedmiller M, Peters J, Schaal S (2007) Evaluation of policy gradient methods and variants on the cart-pole benchmark. In: 2007 IEEE international symposium on approximate dynamic programming and reinforcement learning, IEEE, pp 254\u2013261","DOI":"10.1109\/ADPRL.2007.368196"},{"issue":"2","key":"9997_CR131","doi-asserted-by":"crossref","first-page":"569","DOI":"10.1109\/TMECH.2012.2219880","volume":"18","author":"E Rombokas","year":"2012","unstructured":"Rombokas E, Malhotra M, Theodorou EA, Todorov E, Matsuoka Y (2012) Reinforcement learning and synergistic control of the act hand. IEEE\/ASME Trans Mech 18(2):569\u2013577","journal-title":"IEEE\/ASME Trans Mech"},{"issue":"4","key":"9997_CR132","doi-asserted-by":"crossref","first-page":"1753","DOI":"10.1109\/TII.2017.2748236","volume":"14","author":"L Roveda","year":"2017","unstructured":"Roveda L, Pallucca G, Pedrocchi N, Braghin F, Tosatti LM (2017) Iterative learning procedure with reinforcement for high-accuracy force tracking in robotized tasks. IEEE Trans Ind Inform 14(4):1753\u20131763","journal-title":"IEEE Trans Ind Inform"},{"key":"9997_CR133","unstructured":"Rummery GA, Niranjan M (1994) On-line Q-learning using connectionist systems, vol 37. University of Cambridge, Department of Engineering Cambridge, England"},{"issue":"6","key":"9997_CR134","doi-asserted-by":"crossref","first-page":"445","DOI":"10.1023\/A:1006567623867","volume":"12","author":"M Rylatt","year":"1998","unstructured":"Rylatt M, Czarnecki C, Routen T (1998) Connectionist learning in behaviour-based mobile robots: a survey. Artif Intell Rev 12(6):445\u2013468","journal-title":"Artif Intell Rev"},{"key":"9997_CR135","doi-asserted-by":"crossref","first-page":"70","DOI":"10.2352\/ISSN.2470-1173.2017.19.AVM-023","volume":"19","author":"AE Sallab","year":"2017","unstructured":"Sallab AE, Abdou M, Perot E, Yogamani S (2017) Deep reinforcement learning framework for autonomous driving. Electron Imaging 19:70\u201376","journal-title":"Electron Imaging"},{"issue":"4","key":"9997_CR136","doi-asserted-by":"crossref","first-page":"1376","DOI":"10.1109\/JSYST.2014.2374334","volume":"9","author":"SRB dos Santos","year":"2015","unstructured":"dos Santos SRB, Givigi SN, Nascimento CL (2015) Autonomous construction of multiple structures using learning automata: description and experimental validation. IEEE Syst J 9(4):1376\u20131387","journal-title":"IEEE Syst J"},{"key":"9997_CR137","doi-asserted-by":"crossref","unstructured":"Santucci VG, Baldassarre G, Cartoni E (2019) Autonomous reinforcement learning of multiple interrelated tasks. Preprint arXiv:190601374","DOI":"10.1109\/DEVLRN.2019.8850713"},{"key":"9997_CR138","unstructured":"Schaul T, Horgan D, Gregor K, Silver D (2015) Universal value function approximators. In: International conference on machine learning, pp 1312\u20131320"},{"issue":"1","key":"9997_CR139","doi-asserted-by":"crossref","first-page":"171","DOI":"10.1109\/TFUZZ.2007.903323","volume":"16","author":"R Sharma","year":"2008","unstructured":"Sharma R, Gopal M (2008) A markov game-adaptive fuzzy controller for robot manipulators. IEEE Trans Fuzzy Syst 16(1):171\u2013186","journal-title":"IEEE Trans Fuzzy Syst"},{"key":"9997_CR140","doi-asserted-by":"crossref","unstructured":"Sharma RS, Nair RR, Agrawal P, Behera L, Subramanian VK (2018) Robust hybrid visual servoing using reinforcement learning and finite-time adaptive fosmc. IEEE Syst J","DOI":"10.1109\/JSYST.2018.2875789"},{"issue":"1","key":"9997_CR141","doi-asserted-by":"crossref","first-page":"241","DOI":"10.1109\/TII.2016.2617464","volume":"14","author":"H Shi","year":"2016","unstructured":"Shi H, Li X, Hwang KS, Pan W, Xu G (2016) Decoupled visual servoing with fuzzyq-learning. IEEE Trans Ind Inform 14(1):241\u2013252","journal-title":"IEEE Trans Ind Inform"},{"key":"9997_CR142","unstructured":"Silver D, Lever G, Heess N, Degris T, Wierstra D, Riedmiller M (2014) Deterministic policy gradient algorithms"},{"issue":"1","key":"9997_CR143","doi-asserted-by":"crossref","first-page":"24","DOI":"10.1038\/s42256-018-0006-z","volume":"1","author":"KO Stanley","year":"2019","unstructured":"Stanley KO, Clune J, Lehman J, Miikkulainen R (2019) Designing neural networks through neuroevolution. Nat Mach Intell 1(1):24\u201335","journal-title":"Nat Mach Intell"},{"issue":"3","key":"9997_CR144","doi-asserted-by":"crossref","first-page":"345","DOI":"10.1023\/A:1008942012299","volume":"8","author":"P Stone","year":"2000","unstructured":"Stone P, Veloso M (2000) Multiagent systems: a survey from a machine learning perspective. Auton Robots 8(3):345\u2013383","journal-title":"Auton Robots"},{"issue":"4","key":"9997_CR145","doi-asserted-by":"crossref","first-page":"330","DOI":"10.1109\/TAMD.2012.2205924","volume":"4","author":"F Stulp","year":"2012","unstructured":"Stulp F, Buchli J, Ellmer A, Mistry M, Theodorou EA, Schaal S (2012) Model-free reinforcement learning of impedance control in stochastic environments. IEEE Trans Auton Mental Dev 4(4):330\u2013341","journal-title":"IEEE Trans Auton Mental Dev"},{"key":"9997_CR146","unstructured":"Such FP, Madhavan V, Conti E, Lehman J, Stanley KO, Clune J (2017) Deep neuroevolution: genetic algorithms are a competitive alternative for training deep neural networks for reinforcement learning. Preprint arXiv:171206567"},{"issue":"1","key":"9997_CR147","first-page":"9","volume":"3","author":"RS Sutton","year":"1988","unstructured":"Sutton RS (1988) Learning to predict by the methods of temporal differences. Mach Learn 3(1):9\u201344","journal-title":"Mach Learn"},{"key":"9997_CR148","doi-asserted-by":"crossref","unstructured":"Sutton RS (1992) A special issue of machine learning on reinforcement learning. Mach Learn 8","DOI":"10.1007\/BF00992695"},{"key":"9997_CR149","unstructured":"Sutton RS, Barto AG (2018) Reinforcement learning: an introduction. MIT press"},{"key":"9997_CR150","unstructured":"Sutton RS, McAllester DA, Singh SP, Mansour Y (2000) Policy gradient methods for reinforcement learning with function approximation. In: Advances in neural information processing systems, pp 1057\u20131063"},{"key":"9997_CR151","doi-asserted-by":"crossref","unstructured":"Tenorio-Gonzalez AC, Morales EF, Villase\u00f1or-Pineda L (2010) Dynamic reward shaping: training a robot by voice. In: Ibero-American conference on artificial intelligence, Springer, pp 483\u2013492","DOI":"10.1007\/978-3-642-16952-6_49"},{"key":"9997_CR152","first-page":"3137","volume":"11","author":"E Theodorou","year":"2010","unstructured":"Theodorou E, Buchli J, Schaal S (2010) A generalized path integral control approach to reinforcement learning. J Mach Learn Res 11:3137\u20133181","journal-title":"J Mach Learn Res"},{"issue":"6\u20137","key":"9997_CR153","doi-asserted-by":"crossref","first-page":"716","DOI":"10.1016\/j.artint.2007.09.009","volume":"172","author":"AL Thomaz","year":"2008","unstructured":"Thomaz AL, Breazeal C (2008) Teachable robots: understanding human teaching behavior to build more effective robot learners. Artif Intell 172(6\u20137):716\u2013737","journal-title":"Artif Intell"},{"issue":"4","key":"9997_CR154","doi-asserted-by":"crossref","first-page":"1743","DOI":"10.1109\/TASE.2017.2731371","volume":"14","author":"XT Truong","year":"2017","unstructured":"Truong XT, Ngo TD (2017) Toward socially aware robot navigation in dynamic and crowded environments: a proactive social motion model. IEEE Trans Autom Sci Eng 14(4):1743\u20131760","journal-title":"IEEE Trans Autom Sci Eng"},{"issue":"1\u20133","key":"9997_CR155","first-page":"59","volume":"22","author":"JN Tsitsiklis","year":"1996","unstructured":"Tsitsiklis JN, Van Roy B (1996) Feature-based methods for large scale dynamic programming. Mach Learn 22(1\u20133):59\u201394","journal-title":"Mach Learn"},{"key":"9997_CR156","unstructured":"Tsitsiklis JN, Van\u00a0Roy B (1997) Analysis of temporal-diffference learning with function approximation. In: Advances in neural information processing systems, pp 1075\u20131081"},{"issue":"3","key":"9997_CR157","doi-asserted-by":"crossref","first-page":"3075","DOI":"10.1109\/LRA.2019.2924846","volume":"4","author":"M Turan","year":"2019","unstructured":"Turan M, Almalioglu Y, Gilbert HB, Mahmood F, Durr NJ, Araujo H, Sar\u0131 AE, Ajay A, Sitti M (2019) Learning to navigate endoscopic capsule robots. IEEE Robot Autom Lett 4(3):3075\u20133082","journal-title":"IEEE Robot Autom Lett"},{"issue":"1","key":"9997_CR158","doi-asserted-by":"crossref","first-page":"107","DOI":"10.1109\/3477.979965","volume":"32","author":"SG Tzafestas","year":"2002","unstructured":"Tzafestas SG, Rigatos GG (2002) Fuzzy reinforcement learning control for compliance tasks of robotic manipulators. IEEE Trans Syst Man Cybern Part B (Cybern) 32(1):107\u2013113","journal-title":"IEEE Trans Syst Man Cybern Part B (Cybern)"},{"key":"9997_CR159","doi-asserted-by":"crossref","unstructured":"Van\u00a0Hasselt H, Guez A, Silver D (2016) Deep reinforcement learning with double q-learning. In: Thirtieth AAAI conference on artificial intelligence, IEEE, pp 2094\u20132100","DOI":"10.1609\/aaai.v30i1.10295"},{"issue":"3","key":"9997_CR160","doi-asserted-by":"crossref","first-page":"3059","DOI":"10.1109\/LRA.2019.2924839","volume":"4","author":"A Viseras","year":"2019","unstructured":"Viseras A, Garcia R (2019) Deepig: multi-robot information gathering with deep reinforcement learning. IEEE Robot Autom Lett 4(3):3059\u20133066","journal-title":"IEEE Robot Autom Lett"},{"issue":"1","key":"9997_CR161","first-page":"1","volume":"1","author":"N Vlassis","year":"2007","unstructured":"Vlassis N (2007) A concise introduction to multiagent systems and distributed artificial intelligence. Synth Lect Artif Intell Mach Learn 1(1):1\u201371","journal-title":"Synth Lect Artif Intell Mach Learn"},{"issue":"3","key":"9997_CR162","doi-asserted-by":"crossref","first-page":"2124","DOI":"10.1109\/TVT.2018.2890773","volume":"68","author":"C Wang","year":"2019","unstructured":"Wang C, Wang J, Shen Y, Zhang X (2019) Autonomous navigation of uavs in large-scale complex environments: a deep reinforcement learning approach. IEEE Trans Veh Technol 68(3):2124\u20132136","journal-title":"IEEE Trans Veh Technol"},{"issue":"3","key":"9997_CR163","doi-asserted-by":"crossref","first-page":"1078","DOI":"10.1109\/TCST.2013.2271276","volume":"22","author":"J Wang","year":"2013","unstructured":"Wang J, Xu X, Liu D, Sun Z, Chen Q (2013) Self-learning cruise control using kernel-based least squares policy iteration. IEEE Trans Control Syst Technol 22(3):1078\u20131087","journal-title":"IEEE Trans Control Syst Technol"},{"issue":"4","key":"9997_CR164","doi-asserted-by":"crossref","first-page":"2395","DOI":"10.1109\/TII.2018.2881266","volume":"15","author":"JP Wang","year":"2018","unstructured":"Wang JP, Shi YK, Zhang WS, Thomas I, Duan SH (2018a) Multitask policy adversarial learning for human-level control with large state spaces. IEEE Trans Ind Inform 15(4):2395\u20132404","journal-title":"IEEE Trans Ind Inform"},{"issue":"5","key":"9997_CR165","doi-asserted-by":"crossref","first-page":"728","DOI":"10.1109\/TSMCC.2012.2186565","volume":"42","author":"S Wang","year":"2012","unstructured":"Wang S, Chaovalitwongse W, Babuska R (2012) Machine learning algorithms in bipedal robot control. IEEE Trans Syst Man Cybern Part C (Appl Rev) 42(5):728\u2013743","journal-title":"IEEE Trans Syst Man Cybern Part C (Appl Rev)"},{"issue":"5","key":"9997_CR166","doi-asserted-by":"crossref","first-page":"757","DOI":"10.1109\/TMECH.2009.2034740","volume":"15","author":"Y Wang","year":"2010","unstructured":"Wang Y, Lang H, De Silva CW (2010) A hybrid visual servo controller for robust grasping by wheeled mobile robots. IEEE\/ASME Trans Mech 15(5):757\u2013769","journal-title":"IEEE\/ASME Trans Mech"},{"issue":"4","key":"9997_CR167","doi-asserted-by":"crossref","first-page":"400","DOI":"10.1109\/TG.2018.2849942","volume":"10","author":"Y Wang","year":"2018","unstructured":"Wang Y, He H, Sun C (2018b) Learning to navigate through complex dynamic environment with modular deep reinforcement learning. IEEE Trans Games 10(4):400\u2013412","journal-title":"IEEE Trans Games"},{"issue":"3\u20134","key":"9997_CR168","first-page":"279","volume":"8","author":"CJ Watkins","year":"1992","unstructured":"Watkins CJ, Dayan P (1992) Q-learning. Mach Learn 8(3\u20134):279\u2013292","journal-title":"Mach Learn"},{"key":"9997_CR169","unstructured":"Watkins CJCH (1989) Learning from delayed rewards"},{"issue":"6","key":"9997_CR170","doi-asserted-by":"crossref","first-page":"1581","DOI":"10.1109\/TSMCB.2007.907334","volume":"37","author":"AM Whitbrook","year":"2007","unstructured":"Whitbrook AM, Aickelin U, Garibaldi JM (2007) Idiotypic immune networks in mobile-robot control. IEEE Trans Syst Man Cybern Part B (Cybern) 37(6):1581\u20131598","journal-title":"IEEE Trans Syst Man Cybern Part B (Cybern)"},{"issue":"3\u20134","key":"9997_CR171","first-page":"229","volume":"8","author":"RJ Williams","year":"1992","unstructured":"Williams RJ (1992) Simple statistical gradient-following algorithms for connectionist reinforcement learning. Mach Learn 8(3\u20134):229\u2013256","journal-title":"Mach Learn"},{"issue":"4","key":"9997_CR172","doi-asserted-by":"crossref","first-page":"286","DOI":"10.1016\/S0019-9958(77)90354-0","volume":"34","author":"IH Witten","year":"1977","unstructured":"Witten IH (1977) An adaptive optimal controller for discrete-time markov environments. Inf Control 34(4):286\u2013295","journal-title":"Inf Control"},{"key":"9997_CR173","doi-asserted-by":"crossref","first-page":"117227","DOI":"10.1109\/ACCESS.2019.2933002","volume":"7","author":"C Wu","year":"2019","unstructured":"Wu C, Ju B, Wu Y, Lin X, Xiong N, Xu G, Li H, Liang X (2019) Uav autonomous target search based on deep reinforcement learning in complex disaster scene. IEEE Access 7:117227\u2013117245","journal-title":"IEEE Access"},{"issue":"4","key":"9997_CR174","doi-asserted-by":"crossref","first-page":"938","DOI":"10.1109\/JAS.2019.1911567","volume":"6","author":"A Xi","year":"2019","unstructured":"Xi A, Mudiyanselage TW, Tao D, Chen C (2019) Balance control of a biped robot on a rotating platform based on efficient reinforcement learning. IEEE\/CAA J Autom Sin 6(4):938\u2013951","journal-title":"IEEE\/CAA J Autom Sin"},{"issue":"4","key":"9997_CR175","doi-asserted-by":"crossref","first-page":"3420","DOI":"10.1109\/TVT.2017.2785414","volume":"67","author":"L Xiao","year":"2017","unstructured":"Xiao L, Xie C, Min M, Zhuang W (2017) User-centric view of unmanned aerial vehicle transmission against smart attacks. IEEE Trans Veh Technol 67(4):3420\u20133430","journal-title":"IEEE Trans Veh Technol"},{"issue":"12","key":"9997_CR176","doi-asserted-by":"crossref","first-page":"1863","DOI":"10.1109\/TNN.2011.2168422","volume":"22","author":"X Xu","year":"2011","unstructured":"Xu X, Liu C, Yang SX, Hu D (2011) Hierarchical approximate policy iteration with binary-tree state space decomposition. IEEE Trans Neural Netw 22(12):1863\u20131877","journal-title":"IEEE Trans Neural Netw"},{"key":"9997_CR177","unstructured":"Yang E, Gu D (2004) Multiagent reinforcement learning for multi-robot systems: a survey. Tech. rep., tech. rep"},{"key":"9997_CR178","unstructured":"Yang X, He H, Liu D (2017) Event-triggered optimal neuro-controller design with reinforcement learning for unknown nonlinear systems. IEEE Trans Syst Man Cybern Syst"},{"issue":"11","key":"9997_CR179","doi-asserted-by":"crossref","first-page":"5174","DOI":"10.1109\/TNNLS.2018.2805379","volume":"29","author":"Z Yang","year":"2018","unstructured":"Yang Z, Merrick K, Jin L, Abbass HA (2018) Hierarchical deep reinforcement learning for continuous action control. IEEE Trans Neural Netw Learn Syst 29(11):5174\u20135184","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"issue":"1","key":"9997_CR180","doi-asserted-by":"crossref","first-page":"17","DOI":"10.1109\/TSMCB.2003.808179","volume":"33","author":"C Ye","year":"2003","unstructured":"Ye C, Yung NH, Wang D (2003) A fuzzy controller with supervised learning assisted reinforcement learning algorithm for obstacle avoidance. IEEE Trans Syst Man Cybern Part B (Cybern) 33(1):17\u201327","journal-title":"IEEE Trans Syst Man Cybern Part B (Cybern)"},{"issue":"8","key":"9997_CR181","doi-asserted-by":"crossref","first-page":"8227","DOI":"10.1109\/TVT.2019.2923214","volume":"68","author":"S Yin","year":"2019","unstructured":"Yin S, Zhao S, Zhao Y, Yu FR (2019) Intelligent trajectory design in uav-aided communications with reinforcement learning. IEEE Trans Veh Technol 68(8):8227\u20138231","journal-title":"IEEE Trans Veh Technol"},{"issue":"12","key":"9997_CR182","doi-asserted-by":"crossref","first-page":"2853","DOI":"10.1109\/TCYB.2014.2387277","volume":"45","author":"C Yu","year":"2015","unstructured":"Yu C, Zhang M, Ren F, Tan G (2015a) Multiagent learning of coordination in loosely coupled multiagent systems. IEEE Trans Cybern 45(12):2853\u20132867","journal-title":"IEEE Trans Cybern"},{"issue":"2","key":"9997_CR183","doi-asserted-by":"crossref","first-page":"1280","DOI":"10.1109\/TIE.2015.2425359","volume":"63","author":"J Yu","year":"2015","unstructured":"Yu J, Wang C, Xie G (2015b) Coordination of multiple robotic fish with applications to underwater robot competition. IEEE Trans Ind Electron 63(2):1280\u20131288","journal-title":"IEEE Trans Ind Electron"},{"issue":"2","key":"9997_CR184","doi-asserted-by":"crossref","first-page":"314","DOI":"10.1109\/3477.752807","volume":"29","author":"NH Yung","year":"1999","unstructured":"Yung NH, Ye C (1999) An intelligent mobile vehicle navigator based on fuzzy logic and reinforcement learning. IEEE Trans Syst Man Cybern Part B (Cybern) 29(2):314\u2013321","journal-title":"IEEE Trans Syst Man Cybern Part B (Cybern)"},{"issue":"1","key":"9997_CR185","doi-asserted-by":"crossref","first-page":"160","DOI":"10.1109\/3468.995537","volume":"32","author":"E Zalama","year":"2002","unstructured":"Zalama E, Gomez J, Paul M, Peran JR (2002) Adaptive behavior navigation of a mobile robot. IEEE Trans Syst Man Cybern Part A Syst Hum 32(1):160\u2013169","journal-title":"IEEE Trans Syst Man Cybern Part A Syst Hum"},{"issue":"5","key":"9997_CR186","doi-asserted-by":"crossref","first-page":"36","DOI":"10.1109\/MCOM.2016.7470933","volume":"54","author":"Y Zeng","year":"2016","unstructured":"Zeng Y, Zhang R, Lim TJ (2016) Wireless communications with unmanned aerial vehicles: opportunities and challenges. IEEE Commun Mag 54(5):36\u201342","journal-title":"IEEE Commun Mag"},{"issue":"5","key":"9997_CR187","doi-asserted-by":"crossref","first-page":"4091","DOI":"10.1109\/TIE.2016.2542134","volume":"64","author":"H Zhang","year":"2016","unstructured":"Zhang H, Jiang H, Luo Y, Xiao G (2016) Data-driven optimal consensus control for discrete-time multi-agent systems with unknown dynamics using reinforcement learning method. IEEE Trans Ind Electron 64(5):4091\u20134100","journal-title":"IEEE Trans Ind Electron"},{"issue":"2","key":"9997_CR188","doi-asserted-by":"crossref","first-page":"1148","DOI":"10.1109\/LRA.2019.2894216","volume":"4","author":"J Zhang","year":"2019","unstructured":"Zhang J, Tai L, Yun P, Xiong Y, Liu M, Boedecker J, Burgard W (2019) Vr-goggles for robots: real-to-sim domain adaptation for visual control. IEEE Robot Autom Lett 4(2):1148\u20131155","journal-title":"IEEE Robot Autom Lett"},{"issue":"5","key":"9997_CR189","doi-asserted-by":"crossref","first-page":"1238","DOI":"10.1109\/TCYB.2016.2543238","volume":"47","author":"L Zhou","year":"2016","unstructured":"Zhou L, Yang P, Chen C, Gao Y (2016) Multiagent reinforcement learning with sparse interactions by negotiation and knowledge transfer. IEEE Trans Cybern 47(5):1238\u20131250","journal-title":"IEEE Trans Cybern"},{"issue":"2","key":"9997_CR190","doi-asserted-by":"crossref","first-page":"464","DOI":"10.1109\/TNNLS.2018.2844466","volume":"30","author":"J Zhu","year":"2018","unstructured":"Zhu J, Zhu J, Wang Z, Guo S, Xu C (2018) Hierarchical decision and control for continuous multitarget problem: policy evaluation with action delay. IEEE Trans Neural Netw Learn Syst 30(2):464\u2013473","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"9997_CR191","doi-asserted-by":"crossref","unstructured":"Zhu Y, Mottaghi R, Kolve E, Lim JJ, Gupta A, Fei-Fei L, Farhadi A (2017) Target-driven visual navigation in indoor scenes using deep reinforcement learning. In: 2017 IEEE international conference on robotics and automation (ICRA), IEEE, pp 3357\u20133364","DOI":"10.1109\/ICRA.2017.7989381"}],"container-title":["Artificial Intelligence Review"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10462-021-09997-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10462-021-09997-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10462-021-09997-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,24]],"date-time":"2022-12-24T20:17:15Z","timestamp":1671913035000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10462-021-09997-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,4,20]]},"references-count":191,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2022,2]]}},"alternative-id":["9997"],"URL":"https:\/\/doi.org\/10.1007\/s10462-021-09997-9","relation":{},"ISSN":["0269-2821","1573-7462"],"issn-type":[{"value":"0269-2821","type":"print"},{"value":"1573-7462","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,4,20]]},"assertion":[{"value":"29 March 2021","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 April 2021","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}