{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,13]],"date-time":"2026-02-13T10:15:24Z","timestamp":1770977724580,"version":"3.50.1"},"reference-count":19,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,11,4]],"date-time":"2020-11-04T00:00:00Z","timestamp":1604448000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,11,4]],"date-time":"2020-11-04T00:00:00Z","timestamp":1604448000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,11,4]],"date-time":"2020-11-04T00:00:00Z","timestamp":1604448000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,11,4]]},"DOI":"10.1109\/ssrr50563.2020.9292613","type":"proceedings-article","created":{"date-parts":[[2022,2,1]],"date-time":"2022-02-01T21:54:06Z","timestamp":1643752446000},"page":"102-107","source":"Crossref","is-referenced-by-count":13,"title":["Wilderness Search and Rescue Missions using Deep Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Ashley","family":"Peake","sequence":"first","affiliation":[]},{"given":"Joe","family":"McCalmon","sequence":"additional","affiliation":[]},{"given":"Yixin","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Benjamin","family":"Raiford","sequence":"additional","affiliation":[]},{"given":"Sarra","family":"Alqahtani","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","author":"van hasselt","year":"2015","journal-title":"Deep reinforcement learning with double q-learning"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/SPAWC.2018.8445768"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICARCV.2016.7838739"},{"key":"ref13","first-page":"1","article-title":"Re-inforcement learning for autonomous uav navigation using function approximation","author":"pham","year":"0","journal-title":"2018 IEEE International Symposium on Safety Security and Rescue Robotics (SSRR)"},{"key":"ref14","first-page":"1","article-title":"A fully-autonomous aerial robot for search and rescue applications in indoor environments using learning-based techniques","volume":"95","author":"sampedro p\u00e9rez","year":"2018","journal-title":"Journal of Intelligent Robotic Systems"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8206247"},{"key":"ref16","doi-asserted-by":"crossref","first-page":"3542","DOI":"10.3390\/s19163542","article-title":"Unsupervised human detection with an embedded vision system on a fully autonomous uav for search and rescue operations","volume":"19","author":"lygouras","year":"2019","journal-title":"SENSORS"},{"key":"ref17","article-title":"Online deep reinforcement learning for autonomous uav navigation and exploration of outdoor environments","volume":"abs 1912 5684","author":"maciel-pearson","year":"2019","journal-title":"ArXiv"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICUAS.2018.8453382"},{"key":"ref19","article-title":"Multi-agent reinforcement learning for cooperative adaptive cruise control","author":"peake","year":"0","journal-title":"IEEE International Conference on Tools with Artificial Intelligence (ICTAI)"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"452","DOI":"10.3390\/electronics8040452","article-title":"Autonomous control of unmanned aerial vehicles","volume":"8","author":"becerra","year":"2019","journal-title":"Electronics"},{"key":"ref3","author":"adams","year":"2011","journal-title":"A survey of unmanned aerial vehicle (UAV) usage for imagery collection in disaster research and management"},{"key":"ref6","doi-asserted-by":"crossref","first-page":"325","DOI":"10.1007\/978-4-431-55879-8_23","article-title":"Discof: Cooperative pathfinding in distributed systems with limited sensing and communication range","author":"zhang","year":"2016","journal-title":"Distributed Autonomous Robotic Systems"},{"key":"ref5","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref8","author":"hausknecht","year":"2015","journal-title":"Deep recurrent q-learning for partially observable mdps"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"283","DOI":"10.1109\/TRO.2008.918056","article-title":"A complete and scalable strategy for coordinating multiple robots within roadmaps","volume":"24","author":"peasgood","year":"2008","journal-title":"IEEE Transactions on Robotics"},{"key":"ref2","article-title":"Forestry applications of uavs in europe: a review","author":"torresan","year":"2016","journal-title":"International Journal of Remote Sensing"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487281"},{"key":"ref9","author":"burda","year":"2018","journal-title":"Large-Scale Study of Curiosity-Driven Learning[C]"}],"event":{"name":"2020 IEEE International Symposium on Safety, Security, and Rescue Robotics (SSRR)","location":"Abu Dhabi, United Arab Emirates","start":{"date-parts":[[2020,11,4]]},"end":{"date-parts":[[2020,11,6]]}},"container-title":["2020 IEEE International Symposium on Safety, Security, and Rescue Robotics (SSRR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9292568\/9292569\/09292613.pdf?arnumber=9292613","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T01:05:02Z","timestamp":1656637502000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9292613\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,11,4]]},"references-count":19,"URL":"https:\/\/doi.org\/10.1109\/ssrr50563.2020.9292613","relation":{},"subject":[],"published":{"date-parts":[[2020,11,4]]}}}