{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T19:06:40Z","timestamp":1771960000141,"version":"3.50.1"},"reference-count":54,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"5","license":[{"start":{"date-parts":[[2020,10,1]],"date-time":"2020-10-01T00:00:00Z","timestamp":1601510400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,10,1]],"date-time":"2020-10-01T00:00:00Z","timestamp":1601510400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,10,1]],"date-time":"2020-10-01T00:00:00Z","timestamp":1601510400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100010607","name":"Universit\u00e0 degli Studi di Perugia","doi-asserted-by":"publisher","award":["RICBA17MRF"],"award-info":[{"award-number":["RICBA17MRF"]}],"id":[{"id":"10.13039\/501100010607","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100010607","name":"Universit\u00e0 degli Studi di Perugia","doi-asserted-by":"publisher","award":["RICBA18MF"],"award-info":[{"award-number":["RICBA18MF"]}],"id":[{"id":"10.13039\/501100010607","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Robot."],"published-print":{"date-parts":[[2020,10]]},"DOI":"10.1109\/tro.2020.2994002","type":"journal-article","created":{"date-parts":[[2020,5,27]],"date-time":"2020-05-27T20:48:57Z","timestamp":1590612537000},"page":"1546-1561","source":"Crossref","is-referenced-by-count":92,"title":["Towards Generalization in Target-Driven Visual Navigation by Using Deep Reinforcement Learning"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7522-6264","authenticated-orcid":false,"given":"Alessandro","family":"Devo","sequence":"first","affiliation":[]},{"given":"Giacomo","family":"Mezzetti","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8417-9372","authenticated-orcid":false,"given":"Gabriele","family":"Costante","sequence":"additional","affiliation":[]},{"given":"Mario L.","family":"Fravolini","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0486-7678","authenticated-orcid":false,"given":"Paolo","family":"Valigi","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","article-title":"Visual semantic navigation using scene priors","author":"yang","year":"2018"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.60"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460934"},{"key":"ref32","first-page":"1334","article-title":"End-to-end training of deep visuomotor policies","volume":"17","author":"levine","year":"2016","journal-title":"J Mach Learn Res"},{"key":"ref31","article-title":"Cad2rl: Real single-image flight without a single real image","author":"sadeghi","year":"2016"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8594249"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793493"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2019.2930426"},{"key":"ref35","article-title":"One-shot reinforcement learning for robot navigation with interactive replay","author":"bruce","year":"2017"},{"key":"ref34","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref28","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1023\/A:1007634325138"},{"key":"ref29","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/70.88137"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/21.44033"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1177\/0278364919887447"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8593720"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989381"},{"key":"ref24","article-title":"Assessing generalization in deep reinforcement learning","author":"packer","year":"2018"},{"key":"ref23","article-title":"Generalization and regularization in DQN","author":"farebrother","year":"2018"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1145\/203330.203343"},{"key":"ref25","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460528"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/WACV.2018.00097"},{"key":"ref54","article-title":"Finding and visualizing weaknesses of deep reinforcement learning agents","author":"rupprecht","year":"2019"},{"key":"ref53","first-page":"1787","article-title":"Visualizing and understanding Atari agents","author":"greydanus","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref52","article-title":"On evaluation of embodied navigation agents","author":"anderson","year":"2018"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460655"},{"key":"ref40","article-title":"Semi-supervised classification with graph convolutional networks","author":"kipf","year":"2016"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.769"},{"key":"ref13","article-title":"Reinforcement learning with unsupervised auxiliary tasks","author":"jaderberg","year":"2016"},{"key":"ref14","article-title":"Learning to navigate in complex environments","author":"mirowski","year":"2016"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2965857"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1177\/0278364913495721"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989385"},{"key":"ref18","first-page":"651","article-title":"Scalable deep reinforcement learning for vision-based robotic manipulation","author":"kalashnikov","year":"0","journal-title":"Proc Conf Robot Learn"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2018.2878318"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.1995.525695"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1023\/A:1008824626321"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/70.938381"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2017.2705103"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2016.2624754"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-012-9365-8"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202133"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.91"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref48","article-title":"Sample efficient actor-critic with experience replay","author":"wang","year":"2016"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46466-4_15"},{"key":"ref42","article-title":"Efficient parallel methods for deep reinforcement learning","author":"clemente","year":"2017"},{"key":"ref41","article-title":"Playing atari with deep reinforcement learning","author":"mnih","year":"2013"},{"key":"ref44","first-page":"1407","article-title":"Impala: Scalable distributed deep-rl with importance weighted actor-learner architectures","author":"espeholt","year":"2018","journal-title":"Int Conf Mach Learn"},{"key":"ref43","article-title":"Ga3c: GPU-based a3c for deep reinforcement learning","author":"babaeizadeh","year":"2016"}],"container-title":["IEEE Transactions on Robotics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8860\/9210910\/09102361.pdf?arnumber=9102361","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T16:48:00Z","timestamp":1651078080000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9102361\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,10]]},"references-count":54,"journal-issue":{"issue":"5"},"URL":"https:\/\/doi.org\/10.1109\/tro.2020.2994002","relation":{},"ISSN":["1552-3098","1941-0468"],"issn-type":[{"value":"1552-3098","type":"print"},{"value":"1941-0468","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020,10]]}}}