{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T02:39:30Z","timestamp":1730255970921,"version":"3.28.0"},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,5,30]],"date-time":"2021-05-30T00:00:00Z","timestamp":1622332800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,5,30]],"date-time":"2021-05-30T00:00:00Z","timestamp":1622332800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,5,30]],"date-time":"2021-05-30T00:00:00Z","timestamp":1622332800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,5,30]]},"DOI":"10.1109\/icra48506.2021.9560856","type":"proceedings-article","created":{"date-parts":[[2021,10,20]],"date-time":"2021-10-20T00:28:35Z","timestamp":1634689715000},"page":"8416-8421","source":"Crossref","is-referenced-by-count":7,"title":["Proximal Policy Optimization with Relative Pearson Divergence"],"prefix":"10.1109","author":[{"given":"Taisuke","family":"Kobayashi","sequence":"first","affiliation":[]}],"member":"263","reference":[{"article-title":"Proximal policy optimization algorithms","year":"2017","author":"schulman","key":"ref10"},{"key":"ref11","first-page":"113","article-title":"Truly proximal policy optimization","author":"wang","year":"2020","journal-title":"Uncertainty in Artifi Cial Intelligence"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1162\/NECO_a_00442"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.5626\/JCSE.2013.7.2.99"},{"key":"ref14","article-title":"Pybullet, a python module for physics sim ulation for games, robotics and machine learning","author":"coumans","year":"2016","journal-title":"Github Repository"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3041755"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2006.881731"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2020.12.023"},{"article-title":"Adaptive and multiple time-scale eligibility traces for online deep reinforcement learning","year":"2020","author":"kobayashi","key":"ref19"},{"key":"ref4","doi-asserted-by":"crossref","first-page":"436","DOI":"10.1038\/nature14539","article-title":"Deep learning","volume":"521","author":"lecun","year":"2015","journal-title":"Nature"},{"key":"ref27","article-title":"Automatic differentiation in pytorch","author":"paszke","year":"2017","journal-title":"Advances in Neural Information Processing Systems Workshop"},{"journal-title":"Reinforcement Learning An Introduction","year":"2018","author":"sutton","key":"ref3"},{"key":"ref6","first-page":"1889","article-title":"Trust region policy optimization","author":"schulman","year":"2015","journal-title":"International Conference on Machine Learning"},{"key":"ref5","doi-asserted-by":"crossref","first-page":"484","DOI":"10.1038\/nature16961","article-title":"Mastering the game of go with deep neural networks and tree search","volume":"529","author":"silver","year":"2016","journal-title":"Nature"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-019-05788-0"},{"article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","year":"2018","author":"haarnoja","key":"ref7"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2018.11.004"},{"article-title":"A theory of regularized markov decision processes","year":"2019","author":"geist","key":"ref9"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2015.2412554"},{"article-title":"Openai gym","year":"2016","author":"brockman","key":"ref20"},{"article-title":"Layer normalization","year":"2016","author":"ba","key":"ref22"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-019-01510-8"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992699"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2017.12.012"},{"article-title":"Towards deep robot learning with optimizer applicable to non-stationary problems","year":"2020","author":"kobayashi","key":"ref26"},{"article-title":"Laprop: a better way to combine momentum with adaptive gradient","year":"2020","author":"ziyin","key":"ref25"}],"event":{"name":"2021 IEEE International Conference on Robotics and Automation (ICRA)","start":{"date-parts":[[2021,5,30]]},"location":"Xi'an, China","end":{"date-parts":[[2021,6,5]]}},"container-title":["2021 IEEE International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9560720\/9560666\/09560856.pdf?arnumber=9560856","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T15:47:12Z","timestamp":1652197632000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9560856\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,5,30]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/icra48506.2021.9560856","relation":{},"subject":[],"published":{"date-parts":[[2021,5,30]]}}}