{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,5]],"date-time":"2024-09-05T09:24:46Z","timestamp":1725528286223},"reference-count":17,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2017,11]]},"DOI":"10.1109\/ssci.2017.8280911","type":"proceedings-article","created":{"date-parts":[[2018,2,7]],"date-time":"2018-02-07T21:44:37Z","timestamp":1518039877000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Efficient actor-critic algorithm with dual piecewise model learning"],"prefix":"10.1109","author":[{"given":"Shan","family":"Zhong","sequence":"first","affiliation":[]},{"given":"Quan","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Shengrong","family":"Gong","sequence":"additional","affiliation":[]},{"given":"Qiming","family":"Fu","sequence":"additional","affiliation":[]},{"given":"Jin","family":"Xu","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1177\/105971239300100403"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/BF00993104"},{"key":"ref12","first-page":"528","article-title":"Dyna-style planning with linear function approximation and prioritized sweeping","author":"sutton","year":"2008","journal-title":"UAI"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2011.09.008"},{"journal-title":"Reinforcement Learning and Dynamic Programming Using Function Approximators","year":"2010","author":"busoniu","key":"ref14"},{"key":"ref15","article-title":"Approximate policy iteration with linear action models","author":"yao","year":"2012","journal-title":"AAAI"},{"key":"ref16","article-title":"Linear options","author":"sorg","year":"2010","journal-title":"AAMAS"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CDC.2012.6426427"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1023\/A:1017936530646"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/BF00114723"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992699"},{"key":"ref5","first-page":"528","article-title":"On the rate of the convergence and error bournds for LSTD($\\lambda$)","author":"tagorti","year":"2015","journal-title":"ICML"},{"key":"ref8","first-page":"692","article-title":"A deeper look at planning as learning from replay","author":"van seijen","year":"2015","journal-title":"ICML"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCC.2011.2106494"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"journal-title":"Reinforcement Learning An Introduction","year":"1998","key":"ref1"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/B978-1-55860-141-3.50030-4"}],"event":{"name":"2017 IEEE Symposium Series on Computational Intelligence (SSCI)","start":{"date-parts":[[2017,11,27]]},"location":"Honolulu, HI","end":{"date-parts":[[2017,12,1]]}},"container-title":["2017 IEEE Symposium Series on Computational Intelligence (SSCI)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8267146\/8280782\/08280911.pdf?arnumber=8280911","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2018,3,16]],"date-time":"2018-03-16T16:01:53Z","timestamp":1521216113000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/8280911\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2017,11]]},"references-count":17,"URL":"https:\/\/doi.org\/10.1109\/ssci.2017.8280911","relation":{},"subject":[],"published":{"date-parts":[[2017,11]]}}}