{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,31]],"date-time":"2025-12-31T12:13:31Z","timestamp":1767183211399,"version":"3.37.0"},"reference-count":18,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62073324"],"award-info":[{"award-number":["62073324"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2025,3]]},"DOI":"10.1109\/lra.2025.3536291","type":"journal-article","created":{"date-parts":[[2025,1,29]],"date-time":"2025-01-29T19:09:48Z","timestamp":1738177788000},"page":"2646-2653","source":"Crossref","is-referenced-by-count":1,"title":["Delayed Dynamic Model Scheduled Reinforcement Learning With Time-Varying Delays for Robotic Control"],"prefix":"10.1109","volume":"10","author":[{"given":"Zechang","family":"Wang","sequence":"first","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8251-9118","authenticated-orcid":false,"given":"Dengpeng","family":"Xing","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1359-0364","authenticated-orcid":false,"given":"Yiming","family":"Yang","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8265-9866","authenticated-orcid":false,"given":"Peng","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2010.2053736"},{"key":"ref2","first-page":"20208","article-title":"Interpretable reward redistribution in reinforcement learning: A causal approach","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Zhang","year":"2024"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2021.04.015"},{"key":"ref4","first-page":"53973","article-title":"Boosting long-delayed reinforcement learning with auxiliary short-delayed task","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wu","year":"2024"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2010.5650345"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-008-9056-7"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2003.809799"},{"key":"ref8","first-page":"678","article-title":"Belief projection-based reinforcement learning for environments with delayed feedback","volume-title":"Proc. 37th Int. Conf. Neural Inf. Process. Syst.","author":"Kim","year":"2023"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3459637.3482386"},{"key":"ref10","first-page":"1","article-title":"Acting in delayed environments with non-stationary Markov policies","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Derman","year":"2021"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9534358"},{"article-title":"At human speed: Deep reinforcement learning with action delay","year":"2018","author":"Firoiu","key":"ref12"},{"key":"ref13","first-page":"1","article-title":"Reinforcement learning with random delays","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bouteiller","year":"2021"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s10846-015-0222-2"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.32614\/cran.package.ocp"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref17","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja","year":"2018"},{"key":"ref18","first-page":"1","article-title":"Continuous control with deep reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lillicrap","year":"2016"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7083369\/10849592\/10857467.pdf?arnumber=10857467","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,7]],"date-time":"2025-02-07T18:46:36Z","timestamp":1738953996000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10857467\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3]]},"references-count":18,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/lra.2025.3536291","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"type":"electronic","value":"2377-3766"},{"type":"electronic","value":"2377-3774"}],"subject":[],"published":{"date-parts":[[2025,3]]}}}