{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,21]],"date-time":"2026-02-21T13:17:02Z","timestamp":1771679822554,"version":"3.50.1"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,5,19]],"date-time":"2021-05-19T00:00:00Z","timestamp":1621382400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,5,19]],"date-time":"2021-05-19T00:00:00Z","timestamp":1621382400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,5,19]],"date-time":"2021-05-19T00:00:00Z","timestamp":1621382400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,5,19]]},"DOI":"10.1109\/saci51354.2021.9465602","type":"proceedings-article","created":{"date-parts":[[2021,6,30]],"date-time":"2021-06-30T20:24:34Z","timestamp":1625084674000},"page":"000521-000526","source":"Crossref","is-referenced-by-count":7,"title":["Decaying Clipping Range in Proximal Policy Optimization"],"prefix":"10.1109","author":[{"given":"Monika","family":"Farsang","sequence":"first","affiliation":[]},{"given":"Luca","family":"Szegletes","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"crossref","first-page":"229","DOI":"10.1007\/BF00992696","article-title":"Simple statistical gradient-following algorithms for connectionist reinforcement learning","volume":"8","author":"williams","year":"1992","journal-title":"Machine Learning"},{"key":"ref11","author":"brockman","year":"2016","journal-title":"OpenAI Gym"},{"key":"ref12","author":"coumans","year":"2017","journal-title":"pybullet a python module for physics simulation in robotics games and machine learning"},{"key":"ref13","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref14","article-title":"Generalization in reinforcement learning: Successful examples using sparse coarse coding","author":"sutton","year":"1996","journal-title":"NIPS"},{"key":"ref15","first-page":"1573","article-title":"Rlpy: A value-function-based reinforcement learning framework for education and research","volume":"16","author":"dann","year":"2015","journal-title":"Journal of Machine Learning Research"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1145\/988525.988552"},{"key":"ref17","article-title":"Guided policy search","author":"levine","year":"2013","journal-title":"International Conference on Machine Learning"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2011.VII.010"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/127719.122755"},{"key":"ref4","author":"schulman","year":"2017","journal-title":"Proximal policy optimization algorithms"},{"key":"ref3","first-page":"1531","article-title":"A natural policy gradient","author":"kakade","year":"2001","journal-title":"Neural Information Processing Systems Natural and Synthetic"},{"key":"ref6","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1162\/neco.2006.18.12.2936"},{"key":"ref8","author":"chen","year":"2018","journal-title":"An adaptive clipping approach for proximal policy optimization"},{"key":"ref7","author":"wang","year":"2017","journal-title":"Sample efficient actor-critic with experience replay"},{"key":"ref2","first-page":"1889","article-title":"Trust region policy optimization","volume":"37","author":"schulman","year":"2015","journal-title":"International Conference on Machine Learning"},{"key":"ref9","first-page":"626","article-title":"Trust region-guided proximal policy optimization","author":"wang","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref1","first-page":"1329","article-title":"Benchmarking deep reinforcement learning for continuous control","author":"duan","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/EURCON.2007.4400335"},{"key":"ref22","author":"raffin","year":"2019","journal-title":"Stable baselines3"},{"key":"ref21","first-page":"2944","article-title":"Learning continuous control policies by stochastic value gradients","volume":"28","author":"heess","year":"2015","journal-title":"Advances in neural information processing systems"}],"event":{"name":"2021 IEEE 15th International Symposium on Applied Computational Intelligence and Informatics (SACI)","location":"Timisoara, Romania","start":{"date-parts":[[2021,5,19]]},"end":{"date-parts":[[2021,5,21]]}},"container-title":["2021 IEEE 15th International Symposium on Applied Computational Intelligence and Informatics (SACI)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9465482\/9465540\/09465602.pdf?arnumber=9465602","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T15:43:03Z","timestamp":1652197383000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9465602\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,5,19]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/saci51354.2021.9465602","relation":{},"subject":[],"published":{"date-parts":[[2021,5,19]]}}}