{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T17:33:31Z","timestamp":1775237611811,"version":"3.50.1"},"reference-count":163,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2018YFC0807500"],"award-info":[{"award-number":["2018YFC0807500"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundations of China","doi-asserted-by":"publisher","award":["61772396"],"award-info":[{"award-number":["61772396"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundations of China","doi-asserted-by":"publisher","award":["61772392"],"award-info":[{"award-number":["61772392"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundations of China","doi-asserted-by":"publisher","award":["61902296"],"award-info":[{"award-number":["61902296"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundations of China","doi-asserted-by":"publisher","award":["61825305"],"award-info":[{"award-number":["61825305"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Xi\u2019an Key Laboratory of Big Data and Intelligent Vision","award":["201805053ZD4CG37"],"award-info":[{"award-number":["201805053ZD4CG37"]}]},{"DOI":"10.13039\/501100007128","name":"National Natural Science Foundation of Shaanxi Province","doi-asserted-by":"publisher","award":["2020JQ-330"],"award-info":[{"award-number":["2020JQ-330"]}],"id":[{"id":"10.13039\/501100007128","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100007128","name":"National Natural Science Foundation of Shaanxi Province","doi-asserted-by":"publisher","award":["2020JM-195"],"award-info":[{"award-number":["2020JM-195"]}],"id":[{"id":"10.13039\/501100007128","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100002858","name":"China Postdoctoral Science Foundation","doi-asserted-by":"publisher","award":["2019M663640"],"award-info":[{"award-number":["2019M663640"]}],"id":[{"id":"10.13039\/501100002858","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Guangxi Key Laboratory of Trusted Software","award":["KX202061"],"award-info":[{"award-number":["KX202061"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2024,4]]},"DOI":"10.1109\/tnnls.2022.3207346","type":"journal-article","created":{"date-parts":[[2022,9,28]],"date-time":"2022-09-28T19:49:52Z","timestamp":1664394592000},"page":"5064-5078","source":"Crossref","is-referenced-by-count":664,"title":["Deep Reinforcement Learning: A Survey"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0935-6735","authenticated-orcid":false,"given":"Xu","family":"Wang","sequence":"first","affiliation":[{"name":"Xi&#x2019;an Key Laboratory of Big Data and Intelligent Vision, Xidian University, Xi&#x2019;an, China"}]},{"given":"Sen","family":"Wang","sequence":"additional","affiliation":[{"name":"Xi&#x2019;an Key Laboratory of Big Data and Intelligent Vision, Xidian University, Xi&#x2019;an, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3594-2167","authenticated-orcid":false,"given":"Xingxing","family":"Liang","sequence":"additional","affiliation":[{"name":"Science and Technology on Information Systems Engineering Laboratory, National University of Defense Technology, Changsha, China"}]},{"given":"Dawei","family":"Zhao","sequence":"additional","affiliation":[{"name":"National Innovation Institute of Defense Technology, Beijing, China"}]},{"given":"Jincai","family":"Huang","sequence":"additional","affiliation":[{"name":"Science and Technology on Information Systems Engineering Laboratory, National University of Defense Technology, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3238-745X","authenticated-orcid":false,"given":"Xin","family":"Xu","sequence":"additional","affiliation":[{"name":"College of Intelligence Science, National University of Defense Technology (NUDT), Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9405-2626","authenticated-orcid":false,"given":"Bin","family":"Dai","sequence":"additional","affiliation":[{"name":"National Innovation Institute of Defense Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6766-8310","authenticated-orcid":false,"given":"Qiguang","family":"Miao","sequence":"additional","affiliation":[{"name":"Xi&#x2019;an Key Laboratory of Big Data and Intelligent Vision, Xidian University, Xi&#x2019;an, China"}]}],"member":"263","reference":[{"key":"ref1","volume-title":"Markov Decision Processes: Discrete Stochastic Dynamic Programming","author":"Puterman","year":"2014"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/JRPROC.1961.287775"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/tnn.1998.712192"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1126\/science.153.3731.34"},{"key":"ref5","volume-title":"Dynamic Programming and Optimal Control","volume":"1","author":"Bertsekas","year":"2000"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1147\/rd.33.0210"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1023\/A:1017992615625"},{"key":"ref8","article-title":"Playing Atari with deep reinforcement learning","author":"Mnih","year":"2013","journal-title":"arXiv:1312.5602"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"ref10","first-page":"1097","article-title":"ImageNet classification with deep convolutional neural networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","volume":"25","author":"Krizhevsky"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1561\/2000000039"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/0893-6080(89)90020-8"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/BF02551274"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/0893-6080(90)90005-6"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460968"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202134"},{"key":"ref18","article-title":"Curiosity-driven exploration for mapless navigation with deep reinforcement learning","author":"Zhelo","year":"2018","journal-title":"arXiv:1804.00456"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aau5872"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.13140\/RG.2.2.18893.74727"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1126\/science.aar6404"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref24","article-title":"Dota 2 with large scale deep reinforcement learning","author":"Berner","year":"2019","journal-title":"arXiv:1912.06680"},{"key":"ref25","first-page":"1","article-title":"Compositional attention networks for machine reasoning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Hudson"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00443"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1397"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.5220\/0007520300002108"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2018.00062"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1016\/j.ins.2020.03.105"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/3178876.3185994"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/3289600.3290999"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.148"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1126\/science.aar6170"},{"key":"ref35","first-page":"153","article-title":"Learning to see physics via visual de-animation","volume-title":"Proc. NIPS","author":"Wu"},{"key":"ref36","volume-title":"OpenAI Baselines","author":"Dhariwal","year":"2017"},{"key":"ref37","volume-title":"Reinforcement learning coach","author":"Caspi","year":"2017"},{"key":"ref38","volume-title":"TF-Agents: A Library for Reinforcement Learning in Tensorflow","author":"Guadarrama","year":"2018"},{"key":"ref39","volume-title":"Tianshou","author":"Weng","year":"2020"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2743240"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1561\/2200000071"},{"key":"ref42","article-title":"Deep reinforcement learning: An overview","author":"Li","year":"2017","journal-title":"arXiv:1701.07274"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1631\/fitee.1900533"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1512\/iumj.1957.6.56038"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(98)00023-X"},{"key":"ref46","volume-title":"Dynamic Programming and Markov Processes","author":"Howard","year":"1960"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/BF00115009"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.2200\/s00268ed1v01y201005aim009"},{"key":"ref49","volume-title":"On-line Q-learning using connectionist systems","volume":"37","author":"Rummery","year":"1994"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref51","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","volume-title":"Proc. NIPs","volume":"99","author":"Sutton"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992696"},{"key":"ref53","first-page":"1","article-title":"Continuous control with deep reinforcement learning","volume-title":"Proc. ICLR","author":"Lillicrap"},{"key":"ref54","article-title":"End-to-end deep reinforcement learning for lane keeping assist","author":"Sallab","year":"2016","journal-title":"arXiv:1612.04340"},{"key":"ref55","volume-title":"Deep Learning","volume":"1","author":"Goodfellow","year":"2016"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1038\/nature14539"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.5555\/2999792.2999959"},{"key":"ref59","first-page":"1","article-title":"Efficient estimation of word representations in vector space","volume-title":"Proc. 1st Int. Conf. Learn. Represent.","author":"Mikolov"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1406.1078"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1162"},{"key":"ref62","article-title":"A sensitivity analysis of (and practitioners\u2019 guide to) convolutional neural networks for sentence classification","author":"Zhang","year":"2015","journal-title":"arXiv:1510.03820"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.5555\/2969033.2969125"},{"key":"ref64","first-page":"2204","article-title":"Recurrent models of visual attention","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Mnih"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1146\/annurev.neuro.26.041002.131047"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref67","first-page":"3320","article-title":"How transferable are features in deep neural networks?","volume-title":"Proc. 27th Int. Conf. Neural Inf. Process. Syst.","volume":"2","author":"Yosinski"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1037\/h0042519"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-387-84858-7"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref71","first-page":"255","article-title":"Issues in using function approximation for reinforcement learning","volume-title":"Proc. 4th Connectionist Models Summer School","author":"Thrun"},{"key":"ref72","first-page":"2613","article-title":"Double Q-learning","volume-title":"Proc. Neural Inf. Process. Syst. (NIPS)","volume":"23","author":"Hasselt"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref74","first-page":"1","article-title":"Prioritized experience replay","volume-title":"Proc. ICLR","author":"Schaul"},{"key":"ref75","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wang"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.21236\/ADA280862"},{"key":"ref77","first-page":"1","article-title":"Noisy networks for exploration","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Fortunato"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11757"},{"key":"ref79","first-page":"449","article-title":"A distributional perspective on reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Bellemare"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1214\/aos\/1176345637"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11791"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1257\/jep.15.4.143"},{"key":"ref83","first-page":"1","article-title":"Deep recurrent Q-learning for partially observable MDPs","volume-title":"Proc. AAAI Fall Symp. Ser.","author":"Hausknecht"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.2307\/2332286"},{"key":"ref85","first-page":"4033","article-title":"Deep exploration via bootstrapped DQN","volume-title":"Proc. 30th Int. Conf. Neural Inf. Process. Syst.","author":"Osband"},{"key":"ref86","first-page":"1","article-title":"Provably efficient Q-learning with function approximation via distribution shift error checking Oracle","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Du"},{"key":"ref87","first-page":"1","article-title":"Recurrent experience replay in distributed reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Kapturowski"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11796"},{"key":"ref89","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mnih"},{"key":"ref90","volume-title":"Learning from delayed rewards","author":"Watkins","year":"1989"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50035-0"},{"key":"ref92","article-title":"Sample efficient actor-critic with experience replay","author":"Wang","year":"2016","journal-title":"arXiv:1611.01224"},{"key":"ref93","first-page":"1054","article-title":"Safe and efficient off-policy reinforcement learning","volume-title":"Proc. 30th Int. Conf. Neural Inf. Process. Syst.","author":"Munos"},{"key":"ref94","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schulman"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1214\/aoms\/1177729694"},{"key":"ref96","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.6144"},{"key":"ref98","first-page":"5285","article-title":"Scalable trust-region method for deep reinforcement learning using Kronecker-factored approximation","volume-title":"Proc. 31st Int. Conf. Neural Inf. Process. Syst.","author":"Wu"},{"key":"ref99","first-page":"573","article-title":"A Kronecker-factored approximate Fisher matrix for convolution layers","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Grosse"},{"key":"ref100","first-page":"2408","article-title":"Optimizing neural networks with Kronecker-factored approximate curvature","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Martens"},{"key":"ref101","first-page":"1","article-title":"A natural policy gradient","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"14","author":"Kakade"},{"key":"ref102","first-page":"1","article-title":"Trust-PCL: An off-policy trust region method for continuous control","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Nachum"},{"key":"ref103","first-page":"387","article-title":"Deterministic policy gradient algorithms","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Silver"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1103\/PhysRev.36.823"},{"key":"ref105","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto"},{"key":"ref106","first-page":"1352","article-title":"Reinforcement learning with deep energy-based policies","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"issue":"8","key":"ref107","first-page":"1063","article-title":"Reinforcement learning with factored states and actions","volume":"5","author":"Sallans","year":"2004","journal-title":"J. Mach. Learn. Res."},{"key":"ref108","article-title":"Combining policy gradient and Q-learning","author":"O\u2019Donoghue","year":"2016","journal-title":"arXiv:1611.01626"},{"key":"ref109","first-page":"1433","article-title":"Maximum entropy inverse reinforcement learning","volume-title":"Proc. AAAI","volume":"8","author":"Ziebart"},{"key":"ref110","first-page":"182","article-title":"Relative entropy inverse reinforcement learning","volume-title":"Proc. 14th Int. Conf. Artif. Intell. Statist.","author":"Boularias"},{"key":"ref111","first-page":"1","article-title":"Algorithms for inverse reinforcement learning","volume-title":"Proc. ICML","volume":"1","author":"Ng"},{"key":"ref112","first-page":"2772","article-title":"Bridging the gap between value and policy based reinforcement learning","volume-title":"Proc. 31st Int. Conf. Neural Inf. Process. Syst.","author":"Nachum"},{"key":"ref113","article-title":"Equivalence between policy gradients and soft Q-learning","author":"Schulman","year":"2017","journal-title":"arXiv:1704.06440"},{"key":"ref114","first-page":"1","article-title":"Multiagent soft Q-learning","volume-title":"Proc. AAAI Spring Symp. Ser.","author":"Wei"},{"key":"ref115","first-page":"6382","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","volume-title":"Proc. 31st Int. Conf. Neural Inf. Process. Syst.","author":"Lowe"},{"key":"ref116","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref117","first-page":"1","article-title":"Stabilizing off-policy Q-learning via bootstrapping error reduction","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kumar"},{"key":"ref118","doi-asserted-by":"publisher","DOI":"10.1145\/1390156.1390278"},{"key":"ref119","first-page":"213","article-title":"R-MAX\u2014A general polynomial time algorithm for near-optimal reinforcement learning","volume":"3","author":"Brafman","year":"2002","journal-title":"J. Mach. Learn. Res."},{"key":"ref120","doi-asserted-by":"publisher","DOI":"10.1007\/s10846-017-0468-y"},{"key":"ref121","article-title":"Model-based reinforcement learning for Atari","author":"Kaiser","year":"2019","journal-title":"arXiv:1903.00374"},{"key":"ref122","first-page":"2829","article-title":"Continuous deep Q-learning with model-based acceleration","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Gu"},{"key":"ref123","first-page":"1","article-title":"When to trust your model: Model-based policy optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Janner"},{"key":"ref124","first-page":"1","article-title":"Dream to control: Learning behaviors by latent imagination","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Hafner"},{"key":"ref125","first-page":"3540","article-title":"Feudal networks for hierarchical reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Vezhnevets"},{"key":"ref126","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"ref127","first-page":"1","article-title":"Hierarchical deep reinforcement learning: Integrating temporal abstraction and intrinsic motivation","volume-title":"Proc. NIPS","author":"Kulkarni"},{"key":"ref128","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10916"},{"key":"ref129","first-page":"5055","article-title":"Hindsight experience replay","volume-title":"Proc. 31st Int. Conf. Neural Inf. Process. Syst.","author":"Andrychowicz"},{"key":"ref130","first-page":"1","article-title":"Diversity is all you need: Learning skills without a reward function","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Eysenbach"},{"key":"ref131","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref132","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-14435-6_7"},{"key":"ref133","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"ref134","first-page":"2085","article-title":"Value-decomposition networks for cooperative multi-agent learning based on team reward","volume-title":"Proc. 17th Int. Conf. Auton. Agents MultiAgent Syst.","author":"Sunehag"},{"key":"ref135","first-page":"4295","article-title":"QMIX: Monotonic value function factorisation for deep multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rashid"},{"key":"ref136","doi-asserted-by":"publisher","DOI":"10.1016\/S1364-6613(99)01327-3"},{"key":"ref137","doi-asserted-by":"publisher","DOI":"10.1145\/1015330.1015430"},{"key":"ref138","first-page":"1","article-title":"Inverse reinforcement learning through structured classification","volume-title":"Proc. NIPS","author":"Klein"},{"key":"ref139","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143936"},{"key":"ref140","article-title":"A connection between generative adversarial networks, inverse reinforcement learning, and energy-based models","author":"Finn","year":"2016","journal-title":"arXiv:1611.03852"},{"key":"ref141","first-page":"1","article-title":"Learning robust rewards with adverserial inverse reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Fu"},{"key":"ref142","doi-asserted-by":"publisher","DOI":"10.1561\/2300000053"},{"key":"ref143","first-page":"627","article-title":"A reduction of imitation learning and structured prediction to no-regret online learning","volume-title":"Proc. 14th Int. Conf. Artif. Intell. Statist.","author":"Ross"},{"key":"ref144","first-page":"2859","article-title":"Learning from limited demonstrations","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kim"},{"key":"ref145","doi-asserted-by":"publisher","DOI":"10.1145\/3054912"},{"key":"ref146","first-page":"4572","article-title":"Generative adversarial imitation learning","volume-title":"Proc. 30th Int. Conf. Neural Inf. Process. Syst.","author":"Ho"},{"key":"ref147","doi-asserted-by":"publisher","DOI":"10.1016\/j.ins.2022.04.017"},{"key":"ref148","first-page":"77","article-title":"A perspective view and survey of meta-learning","volume":"18","author":"Ricardo","year":"2001","journal-title":"Artif. Intell. Rev."},{"key":"ref149","doi-asserted-by":"publisher","DOI":"10.1016\/S0893-6080(02)00228-9"},{"key":"ref150","first-page":"5331","article-title":"Efficient off-policy meta-reinforcement learning via probabilistic context variables","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rakelly"},{"key":"ref151","doi-asserted-by":"publisher","DOI":"10.1038\/s41593-018-0147-8"},{"key":"ref152","first-page":"5307","article-title":"Meta-reinforcement learning of structured exploration strategies","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst.","author":"Gupta"},{"key":"ref153","article-title":"Learning to adapt in dynamic, real-world environments through meta-reinforcement learning","author":"Nagabandi","year":"2018","journal-title":"arXiv:1803.11347"},{"key":"ref154","article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","author":"Levine","year":"2020","journal-title":"arXiv:2005.01643"},{"key":"ref155","first-page":"1379","article-title":"Trajectory-wise control variates for variance reduction in policy gradient methods","volume-title":"Proc. Conf. Robot Learn.","author":"Cheng"},{"key":"ref156","article-title":"Conservative Q-learning for offline reinforcement learning","author":"Kumar","year":"2020","journal-title":"arXiv:2006.04779"},{"key":"ref157","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3057023"},{"key":"ref158","first-page":"1","article-title":"Deep imitative models for flexible inference, planning, and control","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Rhinehart"},{"key":"ref159","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2009.191"},{"issue":"7","key":"ref160","first-page":"1633","article-title":"Transfer learning for reinforcement learning domains: A survey","volume":"10","author":"Taylor","year":"2009","journal-title":"J. Mach. Learn. Res."},{"key":"ref161","article-title":"Transfer learning in deep reinforcement learning: A survey","author":"Zhu","year":"2020","journal-title":"arXiv:2009.07888"},{"key":"ref162","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2022.108221"},{"key":"ref163","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-84529-2_22"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10492491\/09904958.pdf?arnumber=9904958","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,4,9]],"date-time":"2024-04-09T19:45:28Z","timestamp":1712691928000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9904958\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4]]},"references-count":163,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2022.3207346","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4]]}}}