{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,4]],"date-time":"2025-06-04T03:40:06Z","timestamp":1749008406960,"version":"3.41.0"},"reference-count":75,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2027,5,27]],"date-time":"2027-05-27T00:00:00Z","timestamp":1811376000000},"content-version":"am","delay-in-days":603,"URL":"http:\/\/www.elsevier.com\/open-access\/userlicense\/1.0\/"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100002241","name":"Japan Science and Technology Agency","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100002241","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100009023","name":"Precursory Research for Embryonic Science and Technology","doi-asserted-by":"publisher","award":["JPMJPR20C3"],"award-info":[{"award-number":["JPMJPR20C3"]}],"id":[{"id":"10.13039\/501100009023","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Robotics and Autonomous Systems"],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1016\/j.robot.2025.105057","type":"journal-article","created":{"date-parts":[[2025,5,22]],"date-time":"2025-05-22T23:18:42Z","timestamp":1747955922000},"page":"105057","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["LiRA: Light-Robust Adversary for model-based reinforcement learning in real world"],"prefix":"10.1016","volume":"192","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3760-249X","authenticated-orcid":false,"given":"Taisuke","family":"Kobayashi","sequence":"first","affiliation":[]}],"member":"78","reference":[{"year":"2018","series-title":"Reinforcement Learning: an Introduction","author":"Sutton","key":"10.1016\/j.robot.2025.105057_b1"},{"key":"10.1016\/j.robot.2025.105057_b2","series-title":"Advances in Neural Information Processing Systems","first-page":"4754","article-title":"Deep reinforcement learning in a handful of trials using probabilistic dynamics models","author":"Chua","year":"2018"},{"issue":"6","key":"10.1016\/j.robot.2025.105057_b3","doi-asserted-by":"crossref","first-page":"1603","DOI":"10.1109\/TRO.2018.2865891","article-title":"Information-theoretic model predictive control: Theory and applications to autonomous driving","volume":"34","author":"Williams","year":"2018","journal-title":"IEEE Trans. Robot."},{"key":"10.1016\/j.robot.2025.105057_b4","series-title":"Conference on Robot Learning","first-page":"1101","article-title":"Deep dynamics models for learning dexterous manipulation","author":"Nagabandi","year":"2020"},{"key":"10.1016\/j.robot.2025.105057_b5","doi-asserted-by":"crossref","DOI":"10.1109\/TCST.2024.3362514","article-title":"Model predictive control for dynamic cloth manipulation: Parameter learning and experimental validation","author":"Luque","year":"2024","journal-title":"IEEE Trans. Control Syst. Technol."},{"key":"10.1016\/j.robot.2025.105057_b6","doi-asserted-by":"crossref","DOI":"10.1109\/LRA.2023.3303721","article-title":"Learning to shape by grinding: Cutting-surface-aware model-based reinforcement learning","author":"Hachimine","year":"2023","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.robot.2025.105057_b7","series-title":"Conference on Robot Learning","first-page":"1","article-title":"Data efficient reinforcement learning for legged robots","author":"Yang","year":"2020"},{"key":"10.1016\/j.robot.2025.105057_b8","doi-asserted-by":"crossref","DOI":"10.1109\/LRA.2023.3303786","article-title":"Reinforcement learning with energy-exchange dynamics for spring-loaded biped robot walking","author":"Kuo","year":"2023","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"4","key":"10.1016\/j.robot.2025.105057_b9","doi-asserted-by":"crossref","first-page":"4224","DOI":"10.1109\/LRA.2019.2930489","article-title":"Low-level control of a quadrotor with deep model-based reinforcement learning","volume":"4","author":"Lambert","year":"2019","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.robot.2025.105057_b10","doi-asserted-by":"crossref","DOI":"10.1109\/LRA.2023.3264758","article-title":"Bayesian multi-task learning mpc for robotic mobile manipulation","author":"Arcari","year":"2023","journal-title":"IEEE Robot. Autom. Lett."},{"issue":"3","key":"10.1016\/j.robot.2025.105057_b11","doi-asserted-by":"crossref","DOI":"10.1177\/17298814211007305","article-title":"Reinforcement learning for robot research: A comprehensive review and open issues","volume":"18","author":"Zhang","year":"2021","journal-title":"Int. J. Adv. Robot. Syst."},{"issue":"2","key":"10.1016\/j.robot.2025.105057_b12","doi-asserted-by":"crossref","first-page":"335","DOI":"10.1162\/0899766053011528","article-title":"Robust reinforcement learning","volume":"17","author":"Morimoto","year":"2005","journal-title":"Neural Comput."},{"issue":"1","key":"10.1016\/j.robot.2025.105057_b13","doi-asserted-by":"crossref","first-page":"3","DOI":"10.1177\/0278364919887447","article-title":"Learning dexterous in-hand manipulation","volume":"39","author":"Andrychowicz","year":"2020","journal-title":"Int. J. Robot. Res."},{"key":"10.1016\/j.robot.2025.105057_b14","series-title":"International Conference on Machine Learning","first-page":"2817","article-title":"Robust adversarial reinforcement learning","author":"Pinto","year":"2017"},{"issue":"3","key":"10.1016\/j.robot.2025.105057_b15","doi-asserted-by":"crossref","first-page":"1595","DOI":"10.1109\/LRA.2023.3240930","article-title":"Revisiting the adversarial robustness-accuracy tradeoff in robot learning","volume":"8","author":"Lechner","year":"2023","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.robot.2025.105057_b16","series-title":"IEEE\/RSJ International Conference on Intelligent Robots and Systems","first-page":"5622","article-title":"Domains as objectives: Multi-domain reinforcement learning with convex-coverage set learning for domain uncertainty awareness","author":"Ilboudo","year":"2023"},{"key":"10.1016\/j.robot.2025.105057_b17","unstructured":"Diederik P. Kingma, Max Welling, Auto-encoding variational bayes, in: International Conference on Learning Representations, 2014."},{"key":"10.1016\/j.robot.2025.105057_b18","doi-asserted-by":"crossref","first-page":"61","DOI":"10.1007\/978-3-642-05465-5_3","article-title":"Light robustness","author":"Fischetti","year":"2009","journal-title":"Robust Online Large- Scale Optim.: Model. Tech. Transp. Syst."},{"year":"2023","series-title":"Soft actor-critic algorithm with truly-satisfied inequality constraint","author":"Kobayashi","key":"10.1016\/j.robot.2025.105057_b19"},{"key":"10.1016\/j.robot.2025.105057_b20","series-title":"IEEE\/RSJ International Conference on Intelligent Robots and Systems","first-page":"5026","article-title":"Mujoco: A physics engine for model-based control","author":"Todorov","year":"2012"},{"key":"10.1016\/j.robot.2025.105057_b21","article-title":"Self-paced learning for latent variable models","volume":"23","author":"Kumar","year":"2010","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.robot.2025.105057_b22","first-page":"9216","article-title":"Self-paced deep reinforcement learning","volume":"33","author":"Klink","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"issue":"88","key":"10.1016\/j.robot.2025.105057_b23","doi-asserted-by":"crossref","first-page":"eadi7566","DOI":"10.1126\/scirobotics.adi7566","article-title":"Anymal parkour: Learning agile navigation for quadrupedal robots","volume":"9","author":"Hoeller","year":"2024","journal-title":"Sci. Robot."},{"issue":"89","key":"10.1016\/j.robot.2025.105057_b24","doi-asserted-by":"crossref","first-page":"eadi9579","DOI":"10.1126\/scirobotics.adi9579","article-title":"Real-world humanoid locomotion with reinforcement learning","volume":"9","author":"Radosavovic","year":"2024","journal-title":"Sci. Robot."},{"key":"10.1016\/j.robot.2025.105057_b25","series-title":"IEEE\/RSJ International Conference on Intelligent Robots and Systems","first-page":"23","article-title":"Domain randomization for transferring deep neural networks from simulation to the real world","author":"Tobin","year":"2017"},{"key":"10.1016\/j.robot.2025.105057_b26","series-title":"Robotics: Science and Systems","article-title":"Bayessim: adaptive domain randomization via probabilistic inference for robotics simulators","author":"Ramos","year":"2019"},{"key":"10.1016\/j.robot.2025.105057_b27","series-title":"Conference on Robot Learning","first-page":"1532","article-title":"Neural posterior domain randomization","author":"Muratore","year":"2022"},{"key":"10.1016\/j.robot.2025.105057_b28","series-title":"Conference on Robot Learning","first-page":"91","article-title":"Learning to walk in minutes using massively parallel deep reinforcement learning","author":"Rudin","year":"2022"},{"key":"10.1016\/j.robot.2025.105057_b29","unstructured":"Xiaoyu Chen, Jiachen Hu, Chi Jin, Lihong Li, Liwei Wang, Understanding domain randomization for sim-to-real transfer, in: International Conference on Learning Representations, 2022."},{"key":"10.1016\/j.robot.2025.105057_b30","unstructured":"Adam Gleave, Michael Dennis, Cody Wild, Neel Kant, Sergey Levine, Stuart Russell, Adversarial Policies: Attacking Deep Reinforcement Learning, in: International Conference on Learning Representations, 2020."},{"key":"10.1016\/j.robot.2025.105057_b31","series-title":"AAAI Conference on Artificial Intelligence","first-page":"5431","article-title":"Robust adversarial reinforcement learning with dissipation inequation constraint","volume":"Vol. 36","author":"Zhai","year":"2022"},{"key":"10.1016\/j.robot.2025.105057_b32","article-title":"Veegan: Reducing mode collapse in gans using implicit variational learning","volume":"30","author":"Srivastava","year":"2017","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.robot.2025.105057_b33","unstructured":"Kanglin Liu, Wenming Tang, Fei Zhou, Guoping Qiu, Spectral regularization for combating mode collapse in gans, in: IEEE\/CVF International Conference on Computer Vision, 2019, pp. 6382\u20136390."},{"key":"10.1016\/j.robot.2025.105057_b34","doi-asserted-by":"crossref","unstructured":"Peide Huang, Mengdi Xu, Fei Fang, Ding Zhao, Robust reinforcement learning as a stackelberg game via adaptively-regularized adversarial training, in: International Joint Conference on Artificial Intelligence, 2022.","DOI":"10.24963\/ijcai.2022\/430"},{"key":"10.1016\/j.robot.2025.105057_b35","article-title":"Beyond confidence regions: Tight bayesian ambiguity sets for robust mdps","volume":"32","author":"Petrik","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"issue":"12","key":"10.1016\/j.robot.2025.105057_b36","doi-asserted-by":"crossref","first-page":"8018","DOI":"10.1109\/LRA.2023.3324590","article-title":"Trade-off between robustness and rewards adversarial training for deep reinforcement learning under large perturbations","volume":"8","author":"Huang","year":"2023","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.robot.2025.105057_b37","series-title":"Robotics: Science and Systems","article-title":"Preparing for the unknown: Learning a universal policy with online system identification","author":"Yu","year":"2017"},{"key":"10.1016\/j.robot.2025.105057_b38","series-title":"International Conference on Pattern Recognition","first-page":"2321","article-title":"Uncertainty aware system identification with universal policies","author":"Semage","year":"2022"},{"year":"2022","series-title":"Real-time sampling-based model predictive control based on reverse Kullback-Leibler divergence and its adaptive acceleration","author":"Kobayashi","key":"10.1016\/j.robot.2025.105057_b39"},{"key":"10.1016\/j.robot.2025.105057_b40","series-title":"Robotics: Science and Systems","article-title":"DeepMPC: Learning deep latent features for model predictive control.","volume":"Vol. 10","author":"Lenz","year":"2015"},{"issue":"13","key":"10.1016\/j.robot.2025.105057_b41","doi-asserted-by":"crossref","first-page":"807","DOI":"10.1080\/01691864.2023.2221715","article-title":"Sparse representation learning with modified q-VAE towards minimal realization of world model","volume":"37","author":"Kobayashi","year":"2023","journal-title":"Adv. Robot."},{"issue":"1","key":"10.1016\/j.robot.2025.105057_b42","doi-asserted-by":"crossref","first-page":"26","DOI":"10.1007\/s10458-022-09552-y","article-title":"A practical guide to multi-objective reinforcement learning and planning","volume":"36","author":"Hayes","year":"2022","journal-title":"Auton. Agents Multi- Agent Syst."},{"issue":"2","key":"10.1016\/j.robot.2025.105057_b43","doi-asserted-by":"crossref","first-page":"794","DOI":"10.1109\/TAC.2020.2982585","article-title":"A computationally efficient robust model predictive control framework for uncertain nonlinear systems","volume":"66","author":"K\u00f6hler","year":"2020","journal-title":"IEEE Trans. Autom. Control"},{"issue":"8","key":"10.1016\/j.robot.2025.105057_b44","doi-asserted-by":"crossref","first-page":"3638","DOI":"10.1109\/TAC.2020.3024161","article-title":"Safe reinforcement learning using robust MPC","volume":"66","author":"Zanon","year":"2020","journal-title":"IEEE Trans. Autom. Control"},{"key":"10.1016\/j.robot.2025.105057_b45","series-title":"IEEE\/SICE International Symposium on System Integration","first-page":"959","article-title":"Cooperative transport by manipulators with uncertainty-aware model-based reinforcement learning","author":"Aotani","year":"2024"},{"issue":"12","key":"10.1016\/j.robot.2025.105057_b46","doi-asserted-by":"crossref","first-page":"719","DOI":"10.1080\/01691864.2023.2208634","article-title":"Design of restricted normalizing flow towards arbitrary stochastic policy with computational efficiency","volume":"37","author":"Kobayashi","year":"2023","journal-title":"Adv. Robot."},{"issue":"59","key":"10.1016\/j.robot.2025.105057_b47","first-page":"1","article-title":"Domain-adversarial training of neural networks","volume":"17","author":"Ganin","year":"2016","journal-title":"J. Mach. Learn. Res."},{"key":"10.1016\/j.robot.2025.105057_b48","unstructured":"Irina Higgins, Loic Matthey, Arka Pal, Christopher P Burgess, Xavier Glorot, Matthew M Botvinick, Shakir Mohamed, Alexander Lerchner, beta-vae: Learning basic visual concepts with a constrained variational framework., in: International Conference on Learning Representations, 2017."},{"issue":"57","key":"10.1016\/j.robot.2025.105057_b49","first-page":"1","article-title":"Normalizing flows for probabilistic modeling and inference","volume":"22","author":"Papamakarios","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"10.1016\/j.robot.2025.105057_b50","doi-asserted-by":"crossref","first-page":"229","DOI":"10.1007\/BF00992696","article-title":"Simple statistical gradient-following algorithms for connectionist reinforcement learning","volume":"8","author":"Williams","year":"1992","journal-title":"Mach. Learn."},{"key":"10.1016\/j.robot.2025.105057_b51","first-page":"15871","article-title":"Advflow: Inconspicuous black-box adversarial attacks using normalizing flows","volume":"33","author":"Mohaghegh Dolatabadi","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.robot.2025.105057_b52","series-title":"International Conference on Machine Learning","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"Haarnoja","year":"2018"},{"key":"10.1016\/j.robot.2025.105057_b53","doi-asserted-by":"crossref","first-page":"148783","DOI":"10.1109\/ACCESS.2021.3125000","article-title":"Meta-optimization of bias-variance trade-off in stochastic model learning","volume":"9","author":"Aotani","year":"2021","journal-title":"IEEE Access"},{"year":"2003","series-title":"The Oxford Dictionary of Statistical Terms","author":"Dodge","key":"10.1016\/j.robot.2025.105057_b54"},{"key":"10.1016\/j.robot.2025.105057_b55","first-page":"29304","article-title":"Deep reinforcement learning at the edge of the statistical precipice","volume":"34","author":"Agarwal","year":"2021","journal-title":"Adv. Neural Inf. Process. Syst."},{"year":"2024","series-title":"Rethinking robustness assessment: Adversarial attacks on learning-based quadrupedal locomotion controllers","author":"Shi","key":"10.1016\/j.robot.2025.105057_b56"},{"issue":"29","key":"10.1016\/j.robot.2025.105057_b57","doi-asserted-by":"crossref","first-page":"861","DOI":"10.21105\/joss.00861","article-title":"UMAP: Uniform manifold approximation and projection","volume":"3","author":"McInnes","year":"2018","journal-title":"J. Open Sour. Softw."},{"key":"10.1016\/j.robot.2025.105057_b58","series-title":"IEEE\/RSJ International Conference on Intelligent Robots and Systems","first-page":"4032","article-title":"L2C2: Locally lipschitz continuous constraint towards stable and smooth reinforcement learning","author":"Kobayashi","year":"2022"},{"key":"10.1016\/j.robot.2025.105057_b59","first-page":"6514","article-title":"Hyperparameter ensembles for robustness and uncertainty quantification","volume":"33","author":"Wenzel","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.robot.2025.105057_b60","doi-asserted-by":"crossref","first-page":"49","DOI":"10.1016\/j.cobeha.2020.10.001","article-title":"Balancing exploration and exploitation with information and randomization","volume":"38","author":"Wilson","year":"2021","journal-title":"Curr. Opin. Behav. Sci."},{"key":"10.1016\/j.robot.2025.105057_b61","series-title":"International Conference on Machine Learning","first-page":"9443","article-title":"State entropy maximization with random encoders for efficient exploration","author":"Seo","year":"2021"},{"key":"10.1016\/j.robot.2025.105057_b62","unstructured":"Youngmin Oh, Jinwoo Shin, Eunho Yang, Sung Ju Hwang, Model-augmented prioritized experience replay, in: International Conference on Learning Representations, 2022."},{"key":"10.1016\/j.robot.2025.105057_b63","series-title":"Uncertainty in Artificial Intelligence","first-page":"1561","article-title":"Understanding and mitigating the limitations of prioritized experience replay","author":"Pan","year":"2022"},{"key":"10.1016\/j.robot.2025.105057_b64","series-title":"International Conference on Machine Learning","first-page":"30479","article-title":"Smooth tchebycheff scalarization for multi-objective optimization","author":"Lin","year":"2024"},{"issue":"5","key":"10.1016\/j.robot.2025.105057_b65","doi-asserted-by":"crossref","first-page":"122","DOI":"10.1007\/s10462-024-10726-1","article-title":"Automated machine learning: past, present and future","volume":"57","author":"Baratchi","year":"2024","journal-title":"Artif. Intell. Rev."},{"issue":"182","key":"10.1016\/j.robot.2025.105057_b66","first-page":"1","article-title":"A probabilistic interpretation of self-paced learning with applications to reinforcement learning","volume":"22","author":"Klink","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"10.1016\/j.robot.2025.105057_b67","series-title":"International Conference on Machine Learning","first-page":"6215","article-title":"Action robust reinforcement learning and applications in continuous control","author":"Tessler","year":"2019"},{"key":"10.1016\/j.robot.2025.105057_b68","series-title":"International Conference on Machine Learning","first-page":"24414","article-title":"Robust policy learning over multiple uncertainty sets","author":"Xie","year":"2022"},{"key":"10.1016\/j.robot.2025.105057_b69","series-title":"IEEE Conference on Decision and Control","first-page":"2036","article-title":"Wasserstein tube MPC with exact uncertainty propagation","author":"Aolaritei","year":"2023"},{"key":"10.1016\/j.robot.2025.105057_b70","article-title":"Pytorch: An imperative style, high-performance deep learning library","volume":"32","author":"Paszke","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.robot.2025.105057_b71","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2023.126692","article-title":"Adaterm: Adaptive t-distribution estimated robust moments for noise-robust stochastic gradient optimization","volume":"557","author":"Ilboudo","year":"2023","journal-title":"Neurocomputing"},{"year":"2021","series-title":"Squareplus: A softplus-like algebraic rectifier","author":"Barron","key":"10.1016\/j.robot.2025.105057_b72"},{"key":"10.1016\/j.robot.2025.105057_b73","article-title":"Root mean square layer normalization","volume":"32","author":"Zhang","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.robot.2025.105057_b74","series-title":"International Conference on Artificial Intelligence and Statistics","first-page":"4236","article-title":"Invertible generative modeling using linear rational splines","author":"Dolatabadi","year":"2020"},{"issue":"1\u20132","key":"10.1016\/j.robot.2025.105057_b75","doi-asserted-by":"crossref","first-page":"479","DOI":"10.1007\/BF01016429","article-title":"Possible generalization of Boltzmann-Gibbs statistics","volume":"52","author":"Tsallis","year":"1988","journal-title":"J. Stat. Phys."}],"container-title":["Robotics and Autonomous Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0921889025001435?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0921889025001435?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,6,4]],"date-time":"2025-06-04T03:02:42Z","timestamp":1749006162000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0921889025001435"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10]]},"references-count":75,"alternative-id":["S0921889025001435"],"URL":"https:\/\/doi.org\/10.1016\/j.robot.2025.105057","relation":{},"ISSN":["0921-8890"],"issn-type":[{"type":"print","value":"0921-8890"}],"subject":[],"published":{"date-parts":[[2025,10]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"LiRA: Light-Robust Adversary for model-based reinforcement learning in real world","name":"articletitle","label":"Article Title"},{"value":"Robotics and Autonomous Systems","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.robot.2025.105057","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2025 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"105057"}}