{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,17]],"date-time":"2026-04-17T09:52:02Z","timestamp":1776419522706,"version":"3.51.2"},"reference-count":100,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2017,11,1]],"date-time":"2017-11-01T00:00:00Z","timestamp":1509494400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Signal Process. Mag."],"published-print":{"date-parts":[[2017,11]]},"DOI":"10.1109\/msp.2017.2743240","type":"journal-article","created":{"date-parts":[[2017,11,9]],"date-time":"2017-11-09T16:36:42Z","timestamp":1510245402000},"page":"26-38","source":"Crossref","is-referenced-by-count":3769,"title":["Deep Reinforcement Learning: A Brief Survey"],"prefix":"10.1109","volume":"34","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0459-892X","authenticated-orcid":false,"given":"Kai","family":"Arulkumaran","sequence":"first","affiliation":[]},{"given":"Marc Peter","family":"Deisenroth","sequence":"additional","affiliation":[]},{"given":"Miles","family":"Brundage","sequence":"additional","affiliation":[]},{"given":"Anil Anthony","family":"Bharath","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1038\/nature14539"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2012.6252823"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1145\/2463372.2463509"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1137\/S0363012901385691"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2004.1307456"},{"key":"ref30","article-title":"Auto-encoding variational bayes","author":"kingma","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref37","first-page":"1","article-title":"Building machines that learn and think like people","author":"lake","year":"2016","journal-title":"Behavioral Brain Sci"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1016\/0196-8858(85)90002-8"},{"key":"ref35","article-title":"Deep successor reinforcement learning","author":"kulkarni","year":"0","journal-title":"Deep Reinforcement Learning Workshop NIPS"},{"key":"ref34","first-page":"3675","article-title":"Hierarchical deep reinforcement learning: Integrating temporal abstraction and intrinsic motivation","author":"kulkarni","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref28","first-page":"1531","article-title":"A natural policy gradient","author":"kakade","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(98)00023-X"},{"key":"ref29","first-page":"1809","article-title":"Schema networks: zero-shot transfer with a generative causal model of intuitive physics","author":"kansky","year":"0","journal-title":"Proc Int Conf Machine Learning"},{"key":"ref20","article-title":"Deep recurrent Q-learning for partially observable MDPs","author":"hausknecht","year":"0","journal-title":"Association for the Advancement of Artificial Intelligence Fall Symp Series"},{"key":"ref22","first-page":"2944","article-title":"Learning continuous control policies by stochastic value gradients","author":"heess","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref21","article-title":"Memory-based control with recurrent neural networks","author":"heess","year":"0","journal-title":"Deep Reinforcement Learning Workshop NIPS"},{"key":"ref24","first-page":"4565","article-title":"Generative adversarial imitation learning","author":"ho","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref23","author":"hester","year":"2017","journal-title":"Learning from demonstrations for real world reinforcement learning"},{"key":"ref26","article-title":"Reinforcement learning with unsupervised auxiliary tasks","author":"jaderberg","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989381"},{"key":"ref25","first-page":"1109","article-title":"VIME: Variational information maximizing exploration","author":"houthooft","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref50","author":"nachum","year":"2017","journal-title":"Bridging the gap between value and policy based reinforcement learning"},{"key":"ref51","article-title":"Massively parallel methods for deep reinforcement learning","author":"nair","year":"0","journal-title":"Proc ICML Deep Learn Workshop"},{"key":"ref59","first-page":"305","article-title":"ALVINN, an autonomous land vehicle in a neural network","author":"pomerleau","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref58","author":"peng","year":"2017","journal-title":"Multiagent bidirectionally-coordinated nets for learning to play StarCraft combat games"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.70"},{"key":"ref56","first-page":"4026","article-title":"Deep exploration via bootstrapped DQN","author":"osband","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref55","first-page":"2863","article-title":"Action-conditional video prediction using deep networks in Atari games","author":"oh","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref54","article-title":"PGQ: Combining policy gradient and Q-learning","author":"o'donoghue","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1007\/11552246_35"},{"key":"ref52","first-page":"663","article-title":"Algorithms for inverse reinforcement learning","author":"ng","year":"0","journal-title":"Proc Int Conf Machine Learning"},{"key":"ref40","article-title":"Guided policy search","author":"levine","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref4","first-page":"1471","article-title":"Unifying count-based exploration and intrinsic motivation","author":"bellemare","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21236\/ADA280862"},{"key":"ref6","doi-asserted-by":"crossref","first-page":"716","DOI":"10.1073\/pnas.38.8.716","article-title":"On the theory of dynamic programming","volume":"38","author":"bellman","year":"0","journal-title":"Proc Nat Acad Sci"},{"key":"ref5","first-page":"253","article-title":"The arcade learning environment: an evaluation platform for general agents","author":"bellemare","year":"0","journal-title":"Proc Int Joint Conf Artificial Intelligence"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCC.2007.913919"},{"key":"ref49","first-page":"2125","article-title":"Variational information maximisation for intrinsically motivated reinforcement learning","author":"mohamed","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2013.50"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(01)00129-1"},{"key":"ref46","article-title":"Learning to navigate in complex environments","author":"mirowski","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992699"},{"key":"ref48","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1177\/0278364917710318"},{"key":"ref41","first-page":"1","article-title":"End-to-end training of deep visuomotor policies","volume":"17","author":"levine","year":"2016","journal-title":"J Mach Learning Res"},{"key":"ref44","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref43","author":"li","year":"2017","journal-title":"Deep reinforcement learning An overview"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"key":"ref72","first-page":"387","article-title":"Deterministic policy gradient algorithms","author":"silver","year":"0","journal-title":"Proc Int Conf Machine Learning"},{"key":"ref71","article-title":"High-dimensional continuous control using generalized advantage estimation","author":"schulman","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref70","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"0","journal-title":"Proc Int Conf Machine Learning"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143955"},{"key":"ref77","first-page":"2244","article-title":"Learning multiagent communication with backpropagation","author":"sukhbaatar","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref74","doi-asserted-by":"crossref","first-page":"105","DOI":"10.1613\/jair.859","article-title":"Optimizing dialogue management with reinforcement learning: Experiments with the NJFun system","volume":"16","author":"singh","year":"2002","journal-title":"J Artificial Intell Res"},{"key":"ref75","article-title":"Incentivizing exploration in reinforcement learning with deep predictive models","author":"stadie","year":"0","journal-title":"Deep Reinforcement Learning Workshop NIPS"},{"key":"ref78","author":"sutton","year":"1998","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"ref60","first-page":"1278","article-title":"Stochastic backpropagation and approximate inference in deep generative models","author":"rezende","year":"0","journal-title":"Proc Int Conf Machine Learning"},{"key":"ref62","article-title":"On-line Q-learning using connectionist systems","author":"rummery","year":"1994","journal-title":"Technical Report CUED\/F-INFENG\/TR291"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/11564096_32"},{"key":"ref63","author":"rusu","year":"2016","journal-title":"Progressive neural networks"},{"key":"ref64","author":"rusu","year":"2016","journal-title":"Sim-to-real robot learning from pixels with progressive nets"},{"key":"ref65","author":"salimans","year":"2017","journal-title":"Evolution strategies as a scalable alternative to reinforcement learning"},{"key":"ref66","first-page":"1312","article-title":"Universal value function approximators","author":"schaul","year":"0","journal-title":"Proc Int Conf Machine Learning"},{"key":"ref67","article-title":"Prioritized experience replay","author":"schaul","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref68","first-page":"222","article-title":"A possibility for implementing curiosity and boredom in model-building neural controllers","author":"schmidhuber","year":"0","journal-title":"Proc 2nd Int Conf on Simulation of Adaptive Behavior"},{"key":"ref2","first-page":"1726","article-title":"The option-critic architecture","author":"bacon","year":"0","journal-title":"Association for the Advancement of Artificial Intelligence"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1142\/S012906579100011X"},{"key":"ref1","article-title":"Classifying options for deep reinforcement learning","author":"arulkumaran","year":"0","journal-title":"the Workshop on Deep Reinforcement Learning Frontiers and Challenges in IJCAI' 16"},{"key":"ref95","first-page":"2746","article-title":"Embed to control: A locally linear latent dynamics model for control from raw images","author":"watter","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref93","article-title":"Sample efficient actor-critic with experience replay","author":"wang","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref92","article-title":"Dueling network architectures for deep reinforcement learning","author":"wang","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref91","article-title":"From pixels to torques: policy learning with deep dynamical models","author":"wahlstr\u00f6m","year":"0","journal-title":"Proc ICML Deep Learn Workshop"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2015.12.271"},{"key":"ref98","article-title":"Maximum entropy deep inverse reinforcement learning","author":"wulfmeier","year":"0","journal-title":"Deep Reinforcement Learning Workshop NIPS"},{"key":"ref99","first-page":"2048","article-title":"Show, attend and tell: Neural image caption generation with visual attention","author":"xu","year":"0","journal-title":"Proc Int Conf Machine Learning"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1093\/jigpal\/jzp049"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992696"},{"key":"ref10","article-title":"Recurrent environment simulators","author":"chiappa","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref11","author":"christiano","year":"2016","journal-title":"Transfer from simulation to real world through learning deep inverse dynamics model"},{"key":"ref12","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1561\/2300000021","article-title":"A survey on policy search for robotics","volume":"2","author":"deisenroth","year":"2013","journal-title":"Foundations and Trends in Robotics"},{"key":"ref13","article-title":"Learning to perform physics experiments via deep reinforcement learning","author":"denil","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487173"},{"key":"ref15","first-page":"2137","article-title":"Learning to communicate with deep multi-agent reinforcement learning","author":"foerster","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref16","article-title":"Towards deep symbolic reinforcement learning","author":"garnelo","year":"0","journal-title":"Deep Reinforcement Learning Workshop NIPS"},{"key":"ref82","first-page":"1553","article-title":"A deep hierarchical approach to lifelong learning in Minecraft","author":"tessler","year":"0","journal-title":"Association for the Advancement of Artificial Intelligence"},{"key":"ref17","first-page":"383","article-title":"Evolving modular fast-weight networks for control","author":"gomez","year":"0","journal-title":"Proc Int Conf Artificial Neural Networks"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1145\/203330.203343"},{"key":"ref18","first-page":"2672","article-title":"Generative adversarial nets","author":"goodfellow","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref84","article-title":"Towards adapting deep visuomotor representations from simulated to real environments","author":"tzeng","year":"0","journal-title":"The First Workshop on the Algorithmic Foundations of Robotics"},{"key":"ref19","article-title":"Continuous deep Q-learning with model-based acceleration","author":"gu","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/9.580874"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/700"},{"key":"ref89","first-page":"3540","article-title":"FeUdal networks for hierarchical reinforcement learning","author":"vezhnevets","year":"0","journal-title":"Proc Int Conf Machine Learning"},{"key":"ref85","article-title":"Episodic exploration for deep deterministic policies: An application to StarCraft micromanagement tasks","author":"usunier","year":"0","journal-title":"Proc Int Conf Learning Representations"},{"key":"ref86","first-page":"2613","article-title":"Double Q-learning","author":"van hasselt","year":"0","journal-title":"Proc Neural Information Processing Systems"},{"key":"ref87","first-page":"2094","article-title":"Deep reinforcement learning with double Q-learning","author":"van hasselt","year":"0","journal-title":"Proc Association for the Advancement of Artificial Intelligence"},{"key":"ref88","first-page":"3486","article-title":"Strategic attentive writer for learning macro-actions","author":"vezhnevets","year":"0","journal-title":"Proc Neural Information Processing Systems"}],"container-title":["IEEE Signal Processing Magazine"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/79\/8103076\/08103164.pdf?arnumber=8103164","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,12]],"date-time":"2022-01-12T11:39:29Z","timestamp":1641987569000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/8103164\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017,11]]},"references-count":100,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/msp.2017.2743240","relation":{},"ISSN":["1053-5888"],"issn-type":[{"value":"1053-5888","type":"print"}],"subject":[],"published":{"date-parts":[[2017,11]]}}}