{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T15:58:55Z","timestamp":1774022335279,"version":"3.50.1"},"reference-count":11,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,11,1]],"date-time":"2019-11-01T00:00:00Z","timestamp":1572566400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,11,1]],"date-time":"2019-11-01T00:00:00Z","timestamp":1572566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,11,1]],"date-time":"2019-11-01T00:00:00Z","timestamp":1572566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,11]]},"DOI":"10.1109\/la-cci47412.2019.9036763","type":"proceedings-article","created":{"date-parts":[[2020,3,20]],"date-time":"2020-03-20T08:23:56Z","timestamp":1584692636000},"page":"1-4","source":"Crossref","is-referenced-by-count":21,"title":["Performing Deep Recurrent Double Q-Learning for Atari Games"],"prefix":"10.1109","author":[{"given":"Felipe","family":"Moreno-Vera","sequence":"first","affiliation":[]}],"member":"263","reference":[{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"ref3","article-title":"Reinforcement Learning Architectures","author":"sutton","year":"0"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3912"},{"key":"ref6","article-title":"Deep Recurrent Q-Learning for Partially Observable MDPs","author":"hausknecht","year":"0","journal-title":"2015 AAAI Fall Symposium Series"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref5","article-title":"Playing Atari with Deep Reinforcement Learning","author":"mnih","year":"0","journal-title":"NIPS Deep Learning Workshop 2013"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref7","article-title":"Deep Q-Learning with Recurrent Neural Networks","author":"chen","year":"0"},{"key":"ref2","article-title":"Sistema de Monitoreo de Autos por Mini-Robot inteligente utilizando Tcnicas de Visin Computacional en Garaje Subterrneo","author":"len-vera","year":"2018","journal-title":"LACCEI The Latin American and Caribbean Consortium for Engineering Institutions"},{"key":"ref9","article-title":"Deep Reinforcement Learning with Double Q-learning","author":"van hasselt","year":"0"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-11680-4_18"}],"event":{"name":"2019 IEEE Latin American Conference on Computational Intelligence (LA-CCI)","location":"Guayaquil, Ecuador","start":{"date-parts":[[2019,11,11]]},"end":{"date-parts":[[2019,11,15]]}},"container-title":["2019 IEEE Latin American Conference on Computational Intelligence (LA-CCI)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9028002\/9036753\/09036763.pdf?arnumber=9036763","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T14:49:39Z","timestamp":1658155779000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9036763\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,11]]},"references-count":11,"URL":"https:\/\/doi.org\/10.1109\/la-cci47412.2019.9036763","relation":{},"subject":[],"published":{"date-parts":[[2019,11]]}}}