{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,2]],"date-time":"2025-08-02T04:13:47Z","timestamp":1754108027048,"version":"3.28.0"},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,7]]},"DOI":"10.1109\/ijcnn48605.2020.9207518","type":"proceedings-article","created":{"date-parts":[[2020,9,29]],"date-time":"2020-09-29T20:40:33Z","timestamp":1601412033000},"page":"1-8","source":"Crossref","is-referenced-by-count":3,"title":["Learning to Play Precision Ball Sports from scratch: a Deep Reinforcement Learning Approach"],"prefix":"10.1109","author":[{"given":"Liliana","family":"Antao","sequence":"first","affiliation":[]},{"given":"Armando","family":"Sousa","sequence":"additional","affiliation":[]},{"given":"Luis Paulo","family":"Reis","sequence":"additional","affiliation":[]},{"given":"Gil","family":"Goncalves","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"article-title":"Learning dexterous in-hand manipulation","year":"2018","author":"andrychowicz","key":"ref11"},{"key":"ref12","first-page":"9333","article-title":"Hardware conditioned policies for multi-robot transfer learning","author":"chen","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref13","article-title":"Reinforcement learning to adjust robot movements to new situations","author":"kober","year":"2011","journal-title":"Twenty-Second International Joint Conference on Artificial Intelligence"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386047"},{"article-title":"Bias-reduced hindsight experience replay with virtual goal prioritization","year":"2019","author":"manela","key":"ref15"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2008.4650953"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICHR.2010.5686841"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1177\/0278364912472380"},{"key":"ref19","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v24i1.7727","article-title":"Relative entropy policy search","author":"peters","year":"2010","journal-title":"Twenty-Fourth AAAI Conference on Artificial Intelligence"},{"article-title":"Openai gym","year":"2016","author":"brockman","key":"ref4"},{"key":"ref3","article-title":"Openai baselines","author":"dhariwal","year":"2017","journal-title":"GitHub repository GitHub"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2006.282564"},{"article-title":"Reinforcement learning for robotic manipulation using simulated locomotion demonstrations","year":"2019","author":"kilinc","key":"ref5"},{"article-title":"Proximal policy optimization algorithms","year":"2017","author":"schulman","key":"ref8"},{"key":"ref7","article-title":"Off-policy temporal-difference learning with function approximation","author":"precup","year":"2001","journal-title":"ICML"},{"key":"ref2","article-title":"A robot soccer team as a strategy to develop educational iniciatives","author":"calderon","year":"2012","journal-title":"Latin American and Caribbean Conference for Engineering and Technology"},{"article-title":"Continuous control with deep reinforcement learning","year":"2015","author":"lillicrap","key":"ref9"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1177\/0278364913495721"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICHR.2006.321319"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICARSC.2019.8733632"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2010.5509181"},{"article-title":"Multi-goal reinforcement learning: Challenging robotics environments and request for research","year":"2018","author":"plappert","key":"ref24"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref25","first-page":"5048","article-title":"Hindsight experience replay","author":"andrychowicz","year":"2017","journal-title":"Advances in neural information processing systems"}],"event":{"name":"2020 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2020,7,19]]},"location":"Glasgow, United Kingdom","end":{"date-parts":[[2020,7,24]]}},"container-title":["2020 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9200848\/9206590\/09207518.pdf?arnumber=9207518","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,21]],"date-time":"2022-11-21T01:23:32Z","timestamp":1668993812000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9207518\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,7]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/ijcnn48605.2020.9207518","relation":{},"subject":[],"published":{"date-parts":[[2020,7]]}}}