{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T07:46:47Z","timestamp":1767340007530},"reference-count":16,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,6,1]],"date-time":"2020-06-01T00:00:00Z","timestamp":1590969600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,6,1]],"date-time":"2020-06-01T00:00:00Z","timestamp":1590969600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,6,1]],"date-time":"2020-06-01T00:00:00Z","timestamp":1590969600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,6]]},"DOI":"10.1109\/icc40277.2020.9148608","type":"proceedings-article","created":{"date-parts":[[2020,7,27]],"date-time":"2020-07-27T22:26:45Z","timestamp":1595888805000},"page":"1-6","source":"Crossref","is-referenced-by-count":18,"title":["A Double Q-Learning Approach for Navigation of Aerial Vehicles with Connectivity Constraint"],"prefix":"10.1109","author":[{"given":"Behzad","family":"Khamidehi","sequence":"first","affiliation":[]},{"given":"Elvino S.","family":"Sousa","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCW.2018.8403623"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/SPAWC.2019.8815469"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/LWC.2014.2342736"},{"journal-title":"Introduction to Reinforcement Learning","year":"1998","author":"sutton","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref15","first-page":"2613","article-title":"Double Q-learning","author":"hasselt","year":"2010","journal-title":"Proc NeurIPS"},{"key":"ref16","first-page":"2094","article-title":"Deep Reinforcement Learning with Double Q-Learning","author":"hasselt","year":"2016","journal-title":"Proc 13th AAAI Conf Artif Intell"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TWC.2017.2789293"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/MCOM.2016.1600178CM"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/WCNC.2018.8377340"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/PIMRC.2019.8904880"},{"journal-title":"Mobile Edge Computing for CellularConnected UAV Computation Offloading and Trajectory Optimization","year":"0","author":"cao","key":"ref8"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/PIMRC.2019.8904357"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2018.2880468"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/MCOM.2016.7470933"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICC.2019.8761259"}],"event":{"name":"ICC 2020 - 2020 IEEE International Conference on Communications (ICC)","start":{"date-parts":[[2020,6,7]]},"location":"Dublin, Ireland","end":{"date-parts":[[2020,6,11]]}},"container-title":["ICC 2020 - 2020 IEEE International Conference on Communications (ICC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9141367\/9148588\/09148608.pdf?arnumber=9148608","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T00:08:53Z","timestamp":1656374933000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9148608\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,6]]},"references-count":16,"URL":"https:\/\/doi.org\/10.1109\/icc40277.2020.9148608","relation":{},"subject":[],"published":{"date-parts":[[2020,6]]}}}