{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,9]],"date-time":"2026-01-09T22:43:25Z","timestamp":1767998605827,"version":"3.49.0"},"reference-count":48,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/100014440","name":"Ministerio de Ciencia, Innovaci?n y Universidades","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100014440","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3508139","type":"journal-article","created":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T18:41:02Z","timestamp":1733164862000},"page":"180146-180160","source":"Crossref","is-referenced-by-count":5,"title":["Deep Reinforcement Learning for the Biologically Inspired Social Behaviour of Autonomous Robots Acting in Dynamic Environments"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9576-1731","authenticated-orcid":false,"given":"Marcos","family":"Maroto-G\u00f3mez","sequence":"first","affiliation":[{"name":"Systems Engineering and Automation Department, Carlos III University of Madrid, Legan&#x00E9;s, Spain"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2317-3329","authenticated-orcid":false,"given":"Mar\u00eda","family":"Malfaz","sequence":"additional","affiliation":[{"name":"Systems Engineering and Automation Department, Carlos III University of Madrid, Legan&#x00E9;s, Spain"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5189-0002","authenticated-orcid":false,"given":"\u00c1lvaro","family":"Castro-Gonz\u00e1lez","sequence":"additional","affiliation":[{"name":"Systems Engineering and Automation Department, Carlos III University of Madrid, Legan&#x00E9;s, Spain"}]},{"given":"Sof\u00eda","family":"\u00c1lvarez Arias","sequence":"additional","affiliation":[{"name":"Systems Engineering and Automation Department, Carlos III University of Madrid, Legan&#x00E9;s, Spain"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0263-6606","authenticated-orcid":false,"given":"Miguel","family":"\u00c1ngel Salichs","sequence":"additional","affiliation":[{"name":"Systems Engineering and Automation Department, Carlos III University of Madrid, Legan&#x00E9;s, Spain"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s12369-023-00977-3"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3563659"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-42307-0_2"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/s12369-021-00811-8"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1080\/10447318.2020.1801172"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s12369-020-00687-0"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.3390\/s18082691"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3095392"},{"key":"ref9","article-title":"Playing Atari with deep reinforcement learning","author":"Mnih","year":"2013","journal-title":"arXiv:1312.5602"},{"key":"ref10","first-page":"1","article-title":"Human learning in Atari","volume-title":"Proc. AAAI Spring Symp. Ser.","author":"Tsividis"},{"key":"ref11","article-title":"A survey of deep reinforcement learning in video games","author":"Shao","year":"2019","journal-title":"arXiv:1912.10944"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref14","first-page":"1995","article-title":"Dueling network architectures for deep reinforcement learning","volume-title":"Proc. Int. Conf. Int. Conf. Mach. Learn.","author":"Wang"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-13-8285-7_8"},{"key":"ref16","article-title":"Soft actor-critic for discrete action settings","author":"Christodoulou","year":"2019","journal-title":"arXiv:1910.07207"},{"key":"ref17","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"key":"ref18","first-page":"449","article-title":"A distributional perspective on reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Bellemare"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11796"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aat5954"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.3102\/0034654318821286"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.2196\/13322"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.23919\/SOFTCOM.2019.8903630"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.3390\/s21041292"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2016.7803357"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2018.03.014"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2020.2974688"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.patrec.2018.05.023"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/RO-MAN46459.2019.8956444"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-10-2585-3_8"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2018.09.104"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794134"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9341540"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3071954"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2018.2878977"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-022-03191-2"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1080\/01691864.2022.2043184"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1561\/2200000071"},{"key":"ref39","volume-title":"Deep Learning","author":"Goodfellow","year":"2016"},{"key":"ref40","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27645-3_1"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1162\/cpsy_a_00025"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-7091-3671-3"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/3122.003.0031"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/s11257-022-09321-2"},{"key":"ref46","volume-title":"Hands-on Reinforcement Learning With Python: Master Reinforcement and Deep Reinforcement Learning Using OpenAI Gym and TensorFlow","author":"Ravichandiran","year":"2018"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1002\/aisy.202200288"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/s40747-023-01077-5"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10380310\/10772320.pdf?arnumber=10772320","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,12]],"date-time":"2024-12-12T06:34:59Z","timestamp":1733985299000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10772320\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":48,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3508139","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}