{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T08:13:23Z","timestamp":1730276003954,"version":"3.28.0"},"reference-count":7,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,11,16]],"date-time":"2021-11-16T00:00:00Z","timestamp":1637020800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,11,16]],"date-time":"2021-11-16T00:00:00Z","timestamp":1637020800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,11,16]]},"DOI":"10.1109\/ispacs51563.2021.9651084","type":"proceedings-article","created":{"date-parts":[[2021,12,28]],"date-time":"2021-12-28T21:29:56Z","timestamp":1640726996000},"page":"1-2","source":"Crossref","is-referenced-by-count":0,"title":["Parameters Optimization for Reinforcement Learning with Nonlinear Time-Varying Strategy by Using Uniform Experiment Design"],"prefix":"10.1109","author":[{"given":"Tien-En","family":"Lin","sequence":"first","affiliation":[{"name":"National Kaohsiung University of Science and Technology,dept. of Electrical Engineering,Kaohsiung,Taiwan"}]},{"given":"Po-Yuan","family":"Yang","sequence":"additional","affiliation":[{"name":"National Pingtung University,dept. of Intelligent Robotics,Pingtung,Taiwan"}]},{"given":"Fu-I","family":"Chou","sequence":"additional","affiliation":[{"name":"National Kaohsiung University of Science and Technology,dept. of Electrical Engineering,Kaohsiung,Taiwan"}]},{"given":"Chia-Wei","family":"Chuang","sequence":"additional","affiliation":[{"name":"National Kaohsiung University of Science and Technology,dept. of Electrical Engineering,Kaohsiung,Taiwan"}]},{"given":"Jyh-Horng","family":"Chou","sequence":"additional","affiliation":[{"name":"National Kaohsiung University of Science and Technology,dept. of Electrical Engineering,Kaohsiung,Taiwan"}]}],"member":"263","reference":[{"article-title":"Playing atari with deep reinforcement learning","year":"2013","author":"mnih","key":"ref4"},{"key":"ref3","doi-asserted-by":"crossref","first-page":"279","DOI":"10.1007\/BF00992698","article-title":"Q-learning","volume":"8","author":"watkins","year":"1992","journal-title":"Machine Learning"},{"key":"ref6","first-page":"485","article-title":"A note on uniform distribution and experimental design","volume":"26","author":"wang","year":"1981","journal-title":"Chin Sci Bull"},{"article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","year":"2017","author":"lowe","key":"ref5"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2007.914879"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1287\/mnsc.1050.0393"},{"journal-title":"Reinforcement Learning An Introduction","year":"2018","author":"sutton","key":"ref1"}],"event":{"name":"2021 International Symposium on Intelligent Signal Processing and Communication Systems (ISPACS)","start":{"date-parts":[[2021,11,16]]},"location":"Hualien City, Taiwan","end":{"date-parts":[[2021,11,19]]}},"container-title":["2021 International Symposium on Intelligent Signal Processing and Communication Systems (ISPACS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9650918\/9650920\/09651084.pdf?arnumber=9651084","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,6]],"date-time":"2022-06-06T20:23:57Z","timestamp":1654547037000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9651084\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,11,16]]},"references-count":7,"URL":"https:\/\/doi.org\/10.1109\/ispacs51563.2021.9651084","relation":{},"subject":[],"published":{"date-parts":[[2021,11,16]]}}}