{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,27]],"date-time":"2026-02-27T02:30:41Z","timestamp":1772159441731,"version":"3.50.1"},"reference-count":44,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/100010661","name":"Horizon 2020 Framework Programme","doi-asserted-by":"publisher","award":["871571"],"award-info":[{"award-number":["871571"]}],"id":[{"id":"10.13039\/100010661","id-type":"DOI","asserted-by":"publisher"}]},{"name":"P2020 Mobilizador","award":["POCI-01-0247-FEDER-046079"],"award-info":[{"award-number":["POCI-01-0247-FEDER-046079"]}]},{"name":"DynamiCITY","award":["NORTE-01-0145-FEDER-000073"],"award-info":[{"award-number":["NORTE-01-0145-FEDER-000073"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2023]]},"DOI":"10.1109\/access.2023.3279729","type":"journal-article","created":{"date-parts":[[2023,5,24]],"date-time":"2023-05-24T13:45:18Z","timestamp":1684935918000},"page":"1-1","source":"Crossref","is-referenced-by-count":1,"title":["Decoding Reinforcement Learning for newcomers"],"prefix":"10.1109","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4366-6227","authenticated-orcid":false,"given":"Francisco S.","family":"Neves","sequence":"first","affiliation":[{"name":"Centre for Robotics and Autonomous Systems, INESC TEC, Porto, Portugal"}]},{"given":"Gustavo A.","family":"Andrade","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, Universidade do Porto, Porto, Portugal"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9958-5958","authenticated-orcid":false,"given":"Matheus F.","family":"Reis","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, Universidade do Porto, Porto, Portugal"}]},{"given":"A.","family":"Pedro Aguiar","sequence":"additional","affiliation":[{"name":"Department of Electrical and Computer Engineering, Universidade do Porto, Porto, Portugal"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2465-5813","authenticated-orcid":false,"given":"Andry M.","family":"Pinto","sequence":"additional","affiliation":[{"name":"Centre for Robotics and Autonomous Systems, INESC TEC, Porto, Portugal"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref35","article-title":"Deep reinforcement learning for dexterous manipulation with concept networks","author":"gudimella","year":"2017","journal-title":"arXiv 1709 06977"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2941229"},{"key":"ref34","first-page":"469","author":"sutton","year":"2018","journal-title":"Chapter 17 Frontiers"},{"key":"ref15","article-title":"EAGERx: Engine agnostic graph environments for robotics","author":"van der heijden","year":"2022"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2019.XV.073"},{"key":"ref14","article-title":"DeepTraffic: Crowdsourced hyperparameter tuning of deep reinforcement learning systems for multi-agent dense traffic navigation","author":"fridman","year":"2018","journal-title":"arXiv 1801 02805"},{"key":"ref36","first-page":"1","article-title":"Deep reinforcement learning from human preferences","volume":"30","author":"christiano","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref31","author":"neves","year":"2022","journal-title":"RL2D"},{"key":"ref30","article-title":"Model-based value estimation for efficient model-free reinforcement learning","author":"feinberg","year":"2018","journal-title":"arXiv 1803 00101"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref33","first-page":"54","author":"sutton","year":"2018","journal-title":"Rewards and Episodes"},{"key":"ref10","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref32","first-page":"47","author":"sutton","year":"2018","journal-title":"The Agent-Environment Interface"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3082697"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/IEEECONF38699.2020.9389349"},{"key":"ref17","article-title":"Playing Atari with deep reinforcement learning","author":"mnih","year":"2013","journal-title":"arXiv 1312 5602"},{"key":"ref39","first-page":"131","author":"sutton","year":"2018","journal-title":"Q-Learning Off-Policy TD Control"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-82544-7_27"},{"key":"ref38","first-page":"40","article-title":"Few-shot goal inference for visuomotor learning and planning","author":"xie","year":"2018","journal-title":"Proc Conf Robot Learn"},{"key":"ref19","article-title":"Distributional reinforcement learning with quantile regression","author":"dabney","year":"2017","journal-title":"arXiv 1710 10044"},{"key":"ref18","article-title":"Deep reinforcement learning with double Q-learning","author":"van hasselt","year":"2015","journal-title":"arXiv 1509 06461 [cs]"},{"key":"ref24","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"arXiv 1502 05477 [cs]"},{"key":"ref23","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref26","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"arXiv 1707 06347"},{"key":"ref25","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"haarnoja","year":"2018","journal-title":"arXiv 1801 01290"},{"key":"ref20","article-title":"Implicit quantile networks for distributional reinforcement learning","author":"dabney","year":"2018","journal-title":"arXiv 1806 06923"},{"key":"ref42","first-page":"131","author":"sutton","year":"2018","journal-title":"SARSA On-Policy TD Control"},{"key":"ref41","first-page":"91","author":"sutton","year":"2018","journal-title":"Monte-Carlo Methods"},{"key":"ref22","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv 1509 02971"},{"key":"ref44","article-title":"Robbins&#x2013;Monro conditions for persistent exploration learning strategies","author":"rokhlin","year":"2018","journal-title":"arXiv 1808 00245"},{"key":"ref21","article-title":"A distributional perspective on reinforcement learning","author":"bellemare","year":"2017","journal-title":"arXiv 1707 06887"},{"key":"ref43","author":"silver","year":"2022","journal-title":"UCL Course on RL"},{"key":"ref28","article-title":"IMPALA: Scalable distributed deep-RL with importance weighted actor-learner architectures","author":"espeholt","year":"2018","journal-title":"arXiv 1802 01561"},{"key":"ref27","article-title":"Addressing function approximation error in actor-critic methods","author":"fujimoto","year":"2018","journal-title":"arXiv 1802 09477"},{"key":"ref29","article-title":"Imagination-augmented agents for deep reinforcement learning","author":"weber","year":"2017","journal-title":"arXiv 1707 06203"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2022.3153585"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2929120"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1177\/0278364920987859"},{"key":"ref4","article-title":"Learning to walk via deep reinforcement learning","author":"haarnoja","year":"2018","journal-title":"arXiv 1812 11103"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3070694"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2953326"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3050338"},{"key":"ref40","first-page":"119","author":"sutton","year":"2018","journal-title":"Temporal-Difference Learning"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/6514899\/10132462.pdf?arnumber=10132462","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,6,6]],"date-time":"2023-06-06T21:08:51Z","timestamp":1686085731000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10132462\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"references-count":44,"URL":"https:\/\/doi.org\/10.1109\/access.2023.3279729","relation":{"has-preprint":[{"id-type":"doi","id":"10.36227\/techrxiv.21583893","asserted-by":"object"},{"id-type":"doi","id":"10.36227\/techrxiv.21583893.v1","asserted-by":"object"}]},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]}}}