{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,21]],"date-time":"2026-04-21T15:06:59Z","timestamp":1776784019023,"version":"3.51.2"},"reference-count":12,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,8,21]]},"DOI":"10.1109\/cog51982.2022.9893546","type":"proceedings-article","created":{"date-parts":[[2022,9,20]],"date-time":"2022-09-20T19:33:31Z","timestamp":1663702411000},"page":"377-384","source":"Crossref","is-referenced-by-count":2,"title":["Mitigating Cowardice for Reinforcement Learning Agents in Combat Scenarios"],"prefix":"10.1109","author":[{"given":"Steve","family":"Bakos","sequence":"first","affiliation":[{"name":"Ontario Tech University,Oshawa,Canada"}]},{"given":"Heidar","family":"Davoudi","sequence":"additional","affiliation":[{"name":"Ontario Tech University,Oshawa,Canada"}]}],"member":"263","reference":[{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CoG47356.2020.9231639"},{"key":"ref3","article-title":"Creating prolevel ai for a real-time fighting game using deep reinforcement learning","author":"oh","year":"2020"},{"key":"ref10","article-title":"Openai gym","author":"brockman","year":"2016"},{"key":"ref6","article-title":"Playing atari with deep reinforcement learning","author":"mnih","year":"2013"},{"key":"ref11","article-title":"Stable baselines3","author":"raffin","year":"2019"},{"key":"ref5","doi-asserted-by":"crossref","DOI":"10.1109\/SBGames.2015.25","article-title":"Simulating human behavior in fighting games using reinforcement learning and artificial neural networks","author":"mendon\u00e7a","year":"2015"},{"key":"ref12","article-title":"Dota 2 with large scale deep reinforcement learning","author":"berner","year":"2019"},{"key":"ref8","article-title":"Gotta learn fast: A new benchmark for generalization in rl","author":"nichol","year":"2018","journal-title":"arXiv preprint arXiv 1804 03583"},{"key":"ref7","article-title":"Starcraft micromanagement with reinforcement learning and curriculum transfer learning","author":"shao","year":"2018"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.5772\/6603"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"key":"ref1","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017"}],"event":{"name":"2022 IEEE Conference on Games (CoG)","location":"Beijing, China","start":{"date-parts":[[2022,8,21]]},"end":{"date-parts":[[2022,8,24]]}},"container-title":["2022 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9893561\/9893544\/09893546.pdf?arnumber=9893546","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,10]],"date-time":"2022-10-10T20:25:16Z","timestamp":1665433516000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9893546\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8,21]]},"references-count":12,"URL":"https:\/\/doi.org\/10.1109\/cog51982.2022.9893546","relation":{},"subject":[],"published":{"date-parts":[[2022,8,21]]}}}