{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,12]],"date-time":"2025-12-12T06:16:23Z","timestamp":1765520183683,"version":"3.48.0"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iros60139.2025.11246765","type":"proceedings-article","created":{"date-parts":[[2025,11,27]],"date-time":"2025-11-27T18:54:45Z","timestamp":1764269685000},"page":"15231-15238","source":"Crossref","is-referenced-by-count":0,"title":["Robust Reinforcement Learning based on Momentum Adversarial Training"],"prefix":"10.1109","author":[{"given":"Li","family":"He","sequence":"first","affiliation":[{"name":"Fudan Unversity,College of Intelligent Robotics and Advanced Manufacturing,Shanghai,China,200433"}]},{"given":"Hanchen","family":"Liu","sequence":"additional","affiliation":[{"name":"Fudan Unversity,College of Intelligent Robotics and Advanced Manufacturing,Shanghai,China,200433"}]},{"given":"Junru","family":"Sheng","sequence":"additional","affiliation":[{"name":"Fudan Unversity,College of Intelligent Robotics and Advanced Manufacturing,Shanghai,China,200433"}]},{"given":"Lihua","family":"Zhang","sequence":"additional","affiliation":[{"name":"Fudan Unversity,College of Intelligent Robotics and Advanced Manufacturing,Shanghai,China,200433"}]},{"given":"Zhiyan","family":"Dong","sequence":"additional","affiliation":[{"name":"Fudan Unversity,College of Intelligent Robotics and Advanced Manufacturing,Shanghai,China,200433"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/SSCI47803.2020.9308468"},{"key":"ref2","article-title":"Successively pruned q-learning: Using self q-function to reduce the overestimation","volume-title":"Adaptive Agents and Multi-Agent Systems","author":"Xue","year":"2024"},{"issue":"1","key":"ref3","first-page":"21","article-title":"A survey of attack, defense and related security analysis for deep reinforcement learning","volume":"48","author":"Yin","year":"2022","journal-title":"Journal of Automation"},{"key":"ref4","first-page":"21 024","article-title":"Robust deep reinforcement learning against adversarial perturbations on state observations","volume":"33","author":"Zhang","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5887"},{"article-title":"Adversarial attacks on neural network policies","year":"2017","author":"Huang","key":"ref6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.6047"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.23919\/ACC45564.2020.9147846"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8460528"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i5.20481"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3220531"},{"key":"ref12","first-page":"6215","article-title":"Action robust reinforcement learning and applications in continuous control","volume-title":"International Conference on Machine Learning","author":"Tessler"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN55064.2022.9892908"},{"article-title":"An overview of gradient descent optimization algorithms","year":"2016","author":"Ruder","key":"ref14"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.12794\/metadc1505267"},{"article-title":"Towards deep learning models resistant to adversarial attacks","year":"2017","author":"Madry","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TAI.2021.3111139"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1214\/aoms\/1177729586"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3301273"},{"key":"ref20","first-page":"2817","article-title":"Robust adversarial reinforcement learning","volume-title":"International Conference on Machine Learning","author":"Pinto"},{"article-title":"Neuroflight: Next generation flight control firmware","year":"2019","author":"Koch","key":"ref21"},{"key":"ref22","article-title":"Flight controller synthesis via deep reinforcement learning","volume-title":"Ph.D. dissertation","author":"Koch","year":"2019"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9196611"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-32552-1_26"}],"event":{"name":"2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","start":{"date-parts":[[2025,10,19]]},"location":"Hangzhou, China","end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11245651\/11245652\/11246765.pdf?arnumber=11246765","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,12]],"date-time":"2025-12-12T06:13:14Z","timestamp":1765519994000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11246765\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/iros60139.2025.11246765","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}