{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T12:09:25Z","timestamp":1730203765688,"version":"3.28.0"},"reference-count":10,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,9,23]],"date-time":"2020-09-23T00:00:00Z","timestamp":1600819200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,9,23]],"date-time":"2020-09-23T00:00:00Z","timestamp":1600819200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,9,23]],"date-time":"2020-09-23T00:00:00Z","timestamp":1600819200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,9,23]]},"DOI":"10.1109\/ccs49175.2020.9231455","type":"proceedings-article","created":{"date-parts":[[2020,10,21]],"date-time":"2020-10-21T18:04:34Z","timestamp":1603303474000},"page":"1-6","source":"Crossref","is-referenced-by-count":2,"title":["Apply Deep Reinforcement Learning to NS-SHAFT Game Control"],"prefix":"10.1109","author":[{"given":"BoYu","family":"Lin","sequence":"first","affiliation":[]},{"given":"ChingLung","family":"Chang","sequence":"additional","affiliation":[]},{"given":"Chuan-Yu","family":"Chang","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/nature16961"},{"article-title":"Playing Atari with Deep Reinforcement Learning","year":"2013","author":"mnih","key":"ref3"},{"journal-title":"How to match DeepMind&#x2019;s Deep Q-Learning score in Breakout","year":"0","key":"ref10"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1515\/9781400874651"},{"key":"ref5","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"journal-title":"Playing atari with deep reinforcement learning","year":"0","key":"ref8"},{"article-title":"Playing flappy bird with deep reinforcement learning","year":"2018","author":"appiah","key":"ref7"},{"key":"ref2","doi-asserted-by":"crossref","first-page":"279","DOI":"10.1007\/BF00992698","article-title":"Q-learning","volume":"8","author":"watkins","year":"1992","journal-title":"Machine Learning"},{"journal-title":"Human-level control through deep reinforcement learning","year":"0","key":"ref9"},{"journal-title":"Kusanagi Akihiko NS-SHAFT 1 3J","year":"0","key":"ref1"}],"event":{"name":"2020 International Symposium on Community-centric Systems (CcS)","start":{"date-parts":[[2020,9,23]]},"location":"Hachioji, Tokyo, Japan","end":{"date-parts":[[2020,9,26]]}},"container-title":["2020 International Symposium on Community-centric Systems (CcS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9226399\/9231308\/09231455.pdf?arnumber=9231455","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,27]],"date-time":"2022-06-27T15:56:55Z","timestamp":1656345415000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9231455\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,9,23]]},"references-count":10,"URL":"https:\/\/doi.org\/10.1109\/ccs49175.2020.9231455","relation":{},"subject":[],"published":{"date-parts":[[2020,9,23]]}}}