{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T14:36:15Z","timestamp":1730298975969,"version":"3.28.0"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,12,1]],"date-time":"2020-12-01T00:00:00Z","timestamp":1606780800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,12,1]]},"DOI":"10.1109\/ssci47803.2020.9308175","type":"proceedings-article","created":{"date-parts":[[2021,1,5]],"date-time":"2021-01-05T23:12:38Z","timestamp":1609888358000},"page":"266-275","source":"Crossref","is-referenced-by-count":0,"title":["The True Online Continuous Learning Automation (TOCLA) in a continuous control benchmarking of actor-critic algorithms"],"prefix":"10.1109","author":[{"given":"Gordon","family":"Frost","sequence":"first","affiliation":[]},{"given":"Marta","family":"Vallejo","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"article-title":"Openai gym","year":"2016","author":"brockman","key":"ref10"},{"key":"ref11","article-title":"Genetic algorithms+ data structures= evolution programs","author":"michalewicz","year":"2013","journal-title":"Springer Science & Business Media"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1613\/jair.135"},{"year":"2020","key":"ref13","article-title":"Generalised genetic algorithm (gega) github repository"},{"year":"2020","key":"ref14","article-title":"Autonomous learning library (all) reinforcement learning framework"},{"year":"2020","key":"ref15","article-title":"Allagents - an extension to the autonomous learning library with forward td() actor-critic agents"},{"key":"ref16","first-page":"369","article-title":"Generalization in reinforcement learning: Safely approximating the value function","author":"boyan","year":"1995","journal-title":"Advances in neural information processing systems"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-585-33656-5_7"},{"key":"ref18","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","volume":"12","author":"sutton","year":"2000","journal-title":"Advances in neural information processing systems"},{"key":"ref19","first-page":"8024","article-title":"Bradbury. Pytorch: An imperative style, high-performance deep learning library","volume":"32","author":"paszke","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/BF00115009"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"ref6","article-title":"Effective multi-step temporal-difference learning for non-linear function approximation","author":"seijen","year":"2016","journal-title":"arXiv preprint arXiv 1608 05151"},{"key":"ref5","first-page":"692","article-title":"True online td (lambda)","author":"seijen","year":"2014","journal-title":"Proceedings of the 31st International Conference on Machine Learning"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ADPRL.2007.368199"},{"key":"ref7","first-page":"556","article-title":"Forward actor-critic for nonlinear function approximation in reinforcement learning","author":"veeriah","year":"2017","journal-title":"Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1017\/CBO9780511810329"},{"journal-title":"Guidance and Control of Ocean Vehicles","year":"1994","author":"fossen","key":"ref1"},{"key":"ref9","first-page":"1","article-title":"Auv pipeline following using reinforcement learning","author":"fjerdingen","year":"2010","journal-title":"41st International Symposium on Robotics (ISR) and 6th German Conference on Robotics (ROBOTIK)"},{"key":"ref20","first-page":"249","article-title":"Understanding the difficulty of training deep feedforward neural networks","author":"glorot","year":"2010","journal-title":"Proceedings of the 13th International Conference on Artificial Intelligence and Statistics"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/SSCI.2016.7849959"},{"key":"ref21","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv preprint arXiv 1412 6980"}],"event":{"name":"2020 IEEE Symposium Series on Computational Intelligence (SSCI)","start":{"date-parts":[[2020,12,1]]},"location":"Canberra, ACT, Australia","end":{"date-parts":[[2020,12,4]]}},"container-title":["2020 IEEE Symposium Series on Computational Intelligence (SSCI)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9308061\/9308107\/09308175.pdf?arnumber=9308175","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,30]],"date-time":"2022-06-30T15:16:02Z","timestamp":1656602162000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9308175\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,12,1]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/ssci47803.2020.9308175","relation":{},"subject":[],"published":{"date-parts":[[2020,12,1]]}}}