{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,16]],"date-time":"2026-04-16T00:29:10Z","timestamp":1776299350785,"version":"3.50.1"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,7]]},"DOI":"10.1109\/icme46284.2020.9102966","type":"proceedings-article","created":{"date-parts":[[2020,6,9]],"date-time":"2020-06-09T17:40:07Z","timestamp":1591724407000},"page":"1-6","source":"Crossref","is-referenced-by-count":6,"title":["Leveraging Deep Reinforcement Learning For Active Shooting Under Open-World Setting"],"prefix":"10.1109","author":[{"given":"A.","family":"Tzimas","sequence":"first","affiliation":[]},{"given":"N.","family":"Passalis","sequence":"additional","affiliation":[]},{"given":"A.","family":"Tefas","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","first-page":"809","article-title":"Learning a deep compact image representation for visual tracking","author":"wang","year":"2013","journal-title":"Proceedings of the Advances in Neural Information Processing Systems"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref12","article-title":"Emergent tool use from multi-agent autocurricula","author":"baker","year":"2019","journal-title":"arXiv preprint arXiv 1909 01771"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00493"},{"key":"ref14","article-title":"Gimbal control for vision-based target tracking","author":"cunha","year":"2019","journal-title":"Proceedings of the European Conference on Signal Processing"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2019.01.046"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-019-04330-6"},{"key":"ref17","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","volume":"99","author":"ng","year":"1999","journal-title":"Proceedings of the International Conference on Machine Learning"},{"key":"ref18","article-title":"Airsim: High-fidelity visual and physical simulation for autonomous vehicles","author":"shah","year":"2017","journal-title":"Field and Service Robotics"},{"key":"ref19","article-title":"Deep reinforcement learning with double q-learning","author":"hasselt","year":"2016","journal-title":"THIRTIETH AAAI Conference on Artificial Intelligence"},{"key":"ref4","first-page":"4241","article-title":"Toward low-flying autonomous mav trail navigation using deep neural networks for environmental awareness","author":"smolyanskiy","year":"2017","journal-title":"Proceedings of the IEEE\/RSJ International Conference on Intelligent Robots and Systems"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2017.7995703"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.3073712"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1186\/s13049-016-0313-5"},{"key":"ref8","volume":"461","author":"johan \u00e5str\u00f6m","year":"2006","journal-title":"Advanced PID Control"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ISCAS.2018.8351050"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.2352\/ISSN.2470-1173.2017.19.AVM-023"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487175"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.91"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/RTAS.2017.3"},{"key":"ref22","article-title":"Dueling network architectures for deep reinforcement learning","author":"wang","year":"2015","journal-title":"arXiv preprint arXiv 1511 05271"},{"key":"ref21","first-page":"315","article-title":"Deep sparse rectifier neural networks","author":"glorot","year":"2011","journal-title":"Proceedings of the Fourteenth International Conference on Artificial Intelligence and Statistics"},{"key":"ref24","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv preprint arXiv 1412 6980"},{"key":"ref23","article-title":"Prioritized experience replay","author":"schaul","year":"2015","journal-title":"arXiv preprint arXiv 1511 05952"}],"event":{"name":"2020 IEEE International Conference on Multimedia and Expo (ICME)","location":"London, United Kingdom","start":{"date-parts":[[2020,7,6]]},"end":{"date-parts":[[2020,7,10]]}},"container-title":["2020 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9099125\/9102711\/09102966.pdf?arnumber=9102966","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,27]],"date-time":"2022-06-27T20:26:51Z","timestamp":1656361611000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9102966\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,7]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/icme46284.2020.9102966","relation":{},"subject":[],"published":{"date-parts":[[2020,7]]}}}