{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,15]],"date-time":"2025-08-15T00:17:14Z","timestamp":1755217034172,"version":"3.43.0"},"reference-count":18,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"European Union-Next Generation EU Mission 4 Component 1 CUP","award":["E53D23014640001"],"award-info":[{"award-number":["E53D23014640001"]}]},{"name":"PRIN PNRR CUP","award":["C53D23008320001"],"award-info":[{"award-number":["C53D23008320001"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Control Syst. Lett."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/lcsys.2025.3587053","type":"journal-article","created":{"date-parts":[[2025,7,9]],"date-time":"2025-07-09T23:21:37Z","timestamp":1752103297000},"page":"2006-2011","source":"Crossref","is-referenced-by-count":0,"title":["DR-PETS: Learning-Based Control With Planning in Adversarial Environments"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4212-2826","authenticated-orcid":false,"given":"Hozefa","family":"Jesawada","sequence":"first","affiliation":[{"name":"Department of Information and Electrical Engineering and Applied Mathematics, University of Salerno, Salerno, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4349-6320","authenticated-orcid":false,"given":"Antonio","family":"Acernese","sequence":"additional","affiliation":[{"name":"Department of Engineering, University of Sannio, Benevento, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-8150-6276","authenticated-orcid":false,"given":"Davide","family":"Del Vecchio","sequence":"additional","affiliation":[{"name":"resides, Rome, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5001-3027","authenticated-orcid":false,"given":"Giovanni","family":"Russo","sequence":"additional","affiliation":[{"name":"Department of Information and Electrical Engineering and Applied Mathematics, University of Salerno, Salerno, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6937-9678","authenticated-orcid":false,"given":"Carmen Del","family":"Vecchio","sequence":"additional","affiliation":[{"name":"Department of Engineering, University of Sannio, Benevento, Italy"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.arcontrol.2022.09.003"},{"key":"ref2","first-page":"4759","article-title":"Deep reinforcement learning in a handful of trials using probabilistic dynamics models","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst.","author":"Chua"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2024.3474169"},{"key":"ref4","first-page":"1","article-title":"Robustness and regularization in reinforcement learning","volume-title":"Proc. Workshop Generalization Planning","author":"Derman"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2015.2495174"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1287\/opre.2022.2326"},{"key":"ref7","article-title":"Distributional robustness and regularization in reinforcement learning","author":"Derman","year":"2020","journal-title":"arXiv:2003.02894"},{"key":"ref8","first-page":"13623","article-title":"Distributionally robust Q-learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Liu"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2022.110850"},{"journal-title":"Trans. Mach. Learn. Res.","article-title":"Optimal transport perturbations for safe reinforcement learning with robustness guarantees","author":"Queeney","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2020.12.521"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/LCSYS.2021.3091628"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2022.110648"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1287\/moor.2018.0936"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TCST.2019.2949757"},{"issue":"200","key":"ref16","first-page":"1","article-title":"Distributionally robust model-based offline reinforcement learning with near-optimal sample complexity","volume":"25","author":"Shi","year":"2024","journal-title":"J. Mach. Learn. Res."},{"key":"ref17","first-page":"1","article-title":"Explaining and harnessing adversarial examples","volume-title":"Proc. 3rd Int. Conf. Learn. Represent.","author":"Goodfellow"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1016\/B978-0-444-53859-8.00003-5"}],"container-title":["IEEE Control Systems Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7782633\/10939047\/11075840.pdf?arnumber=11075840","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,5]],"date-time":"2025-08-05T04:41:24Z","timestamp":1754368884000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11075840\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/lcsys.2025.3587053","relation":{},"ISSN":["2475-1456"],"issn-type":[{"type":"electronic","value":"2475-1456"}],"subject":[],"published":{"date-parts":[[2025]]}}}