{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,28]],"date-time":"2026-01-28T21:06:08Z","timestamp":1769634368698,"version":"3.49.0"},"reference-count":20,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,12,12]],"date-time":"2024-12-12T00:00:00Z","timestamp":1733961600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,12]],"date-time":"2024-12-12T00:00:00Z","timestamp":1733961600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100000780","name":"European Union","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100000780","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,12,12]]},"DOI":"10.1109\/icarcv63323.2024.10821548","type":"proceedings-article","created":{"date-parts":[[2025,1,9]],"date-time":"2025-01-09T19:36:27Z","timestamp":1736451387000},"page":"460-466","source":"Crossref","is-referenced-by-count":1,"title":["An Energy-Aware Decision-Making Scheme for Mobile Robots on a Graph Map Based on Deep Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Gabriele","family":"Gemignani","sequence":"first","affiliation":[{"name":"DAuSy National Programme for Polytechnic of Bari,Dept. of Electrical and Information Engineering,Bari,Italy,70125"}]},{"given":"Margherita","family":"Bongiorni","sequence":"additional","affiliation":[{"name":"University of Pisa"}]},{"given":"Lorenzo","family":"Pollini","sequence":"additional","affiliation":[{"name":"University of Pisa,Automatic Control"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.3390\/drones6050126"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/tnn.1998.712192"},{"issue":"6","key":"ref3","first-page":"26","article-title":"Deep Reinforcement Learning: A Brief Survey","volume-title":"IEEE Signal Processing Magazine","volume":"34","author":"Arulkumaran","year":"2017"},{"key":"ref4","author":"Mnih","year":"2013","journal-title":"Playing Atari with Deep Reinforcement Learning"},{"issue":"7676","key":"ref5","doi-asserted-by":"crossref","first-page":"354","DOI":"10.1038\/nature24270","article-title":"Mastering the game of Go without human knowledge","volume":"550","author":"Silver","year":"2017","journal-title":"Nature"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s10846-019-01073-3"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIV.2022.3153352"},{"issue":"7","key":"ref8","doi-asserted-by":"crossref","first-page":"6180","DOI":"10.1109\/JIOT.2020.2973193","article-title":"Deep-Reinforcement-Learning-Based Autonomous UAV Navigation With Sparse Rewards","volume":"7","author":"Wang","year":"2020","journal-title":"IEEE Internet of Things Journal"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/DASC50938.2020.9256455"},{"key":"ref10","first-page":"1","article-title":"A Deep Reinforcement Learning Framework for UAV Navigation in Indoor Environments","volume-title":"2019 IEEE Aerospace Conference. Big Sky","author":"Walker"},{"key":"ref11","doi-asserted-by":"crossref","first-page":"59486","DOI":"10.1109\/ACCESS.2021.3073704","article-title":"UAV Path Planning Based on Multi-Layer Reinforcement Learning Technique","volume":"9","author":"Cui","year":"2021","journal-title":"IEEE Access"},{"issue":"18","key":"ref12","doi-asserted-by":"crossref","first-page":"3840","DOI":"10.3390\/electronics12183840","article-title":"Deep Reinforcement Learning-Based 2.5D Multi-Objective Path Planning for Ground Vehicles: Considering Distance and Energy Consumption","volume":"12","author":"Wu","year":"2023","journal-title":"Electronics"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICARCV.2016.7838739"},{"key":"ref14","first-page":"1","article-title":"A DDPG-based Approach for Energy-aware UAV Navigation in Obstacle-constrained Environment","volume-title":"2020 IEEE 6th World Forum on Internet of Things (WF-IoT)","author":"Bouhamed"},{"key":"ref15","author":"Towers","year":"2023","journal-title":"Gymnasium"},{"issue":"267","key":"ref16","first-page":"1","article-title":"Tianshou: A highly modularized deep reinforcement learning library","volume":"23","author":"Weng","year":"2022","journal-title":"Journal of Machine Learning Research"},{"issue":"1","key":"ref17","article-title":"Deep Reinforcement Learning with Double Q-Learning","volume-title":"Proceedings of the AAAI Conference on Artificial Intelligence","volume":"30","author":"Hasselt"},{"key":"ref18","volume-title":"Reinforcement learning for robots using neural networks","author":"Lin","year":"1992"},{"key":"ref19","author":"Schaul","year":"2016","journal-title":"Prioritized Experience Replay"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.3390\/robotics11050109"}],"event":{"name":"2024 18th International Conference on Control, Automation, Robotics and Vision (ICARCV)","location":"Dubai, United Arab Emirates","start":{"date-parts":[[2024,12,12]]},"end":{"date-parts":[[2024,12,15]]}},"container-title":["2024 18th International Conference on Control, Automation, Robotics and Vision (ICARCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10821514\/10821497\/10821548.pdf?arnumber=10821548","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,10]],"date-time":"2025-01-10T05:52:42Z","timestamp":1736488362000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10821548\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,12]]},"references-count":20,"URL":"https:\/\/doi.org\/10.1109\/icarcv63323.2024.10821548","relation":{},"subject":[],"published":{"date-parts":[[2024,12,12]]}}}