{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,2]],"date-time":"2025-07-02T04:18:44Z","timestamp":1751429924846,"version":"3.41.0"},"reference-count":29,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"7","license":[{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T00:00:00Z","timestamp":1751328000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Science and Technology Major Project of China","award":["2022ZD0116700"],"award-info":[{"award-number":["2022ZD0116700"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62033006","62325305"],"award-info":[{"award-number":["62033006","62325305"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"BNRist Project","award":["BNR2024TD03003"],"award-info":[{"award-number":["BNR2024TD03003"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Automat. Contr."],"published-print":{"date-parts":[[2025,7]]},"DOI":"10.1109\/tac.2025.3543128","type":"journal-article","created":{"date-parts":[[2025,2,17]],"date-time":"2025-02-17T18:42:47Z","timestamp":1739817767000},"page":"4920-4927","source":"Crossref","is-referenced-by-count":0,"title":["Asynchronous Parallel Policy Gradient Methods for the Linear Quadratic Regulator"],"prefix":"10.1109","volume":"70","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3142-2903","authenticated-orcid":false,"given":"Feiran","family":"Zhao","sequence":"first","affiliation":[{"name":"Department of Automation and BNRist, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9673-302X","authenticated-orcid":false,"given":"Xingyu","family":"Sha","sequence":"additional","affiliation":[{"name":"Department of Automation and BNRist, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4355-5340","authenticated-orcid":false,"given":"Keyou","family":"You","sequence":"additional","affiliation":[{"name":"Department of Automation and BNRist, Tsinghua University, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/IRC.2019.00120"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.17775\/CSEEJPES.2019.00920"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2021.3054625"},{"key":"ref4","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"12","author":"Sutton","year":"1999"},{"key":"ref5","first-page":"1467","article-title":"Global convergence of policy gradient methods for the linear quadratic regulator","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Fazel","year":"2018"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2021.3087455"},{"issue":"21","key":"ref7","first-page":"1","article-title":"Derivative-free methods for policy optimization: Guarantees for linear quadratic systems","volume":"21","author":"Malik","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2021.3128592"},{"key":"ref9","article-title":"Policy optimization provably converges to Nash equilibria in zero-sum linear quadratic games","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Zhang","year":"2019"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2024.3455508"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2023.3234176"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/tac.2025.3569597"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.23919\/ACC50511.2021.9483417"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-control-042920-020021"},{"key":"ref15","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. 33rd Int. Conf. Mach. Learn.","author":"Mnih","year":"2016"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2023.3268475"},{"article-title":"Model-free learning with heterogeneous dynamical systems: A federated LQR approach","year":"2023","author":"Wang","key":"ref17"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2019.2930234"},{"key":"ref19","article-title":"Distributed delayed stochastic optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"24","author":"Agarwal","year":"2011"},{"key":"ref20","article-title":"Asynchronous parallel stochastic gradient for nonconvex optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Lian","year":"2015"},{"key":"ref21","first-page":"17202","article-title":"Sharper convergence guarantees for asynchronous SGD for distributed and federated learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Koloskova","year":"2022"},{"key":"ref22","first-page":"420","article-title":"Asynchronous SGD beats minibatch SGD under arbitrary delays","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Mishchenko","year":"2022"},{"volume-title":"Optimal Control: Linear Quadratic Methods","year":"2007","author":"Anderson","key":"ref23"},{"key":"ref24","first-page":"1407","article-title":"IMPALA: Scalable distributed deep-RL with importance weighted actor-learner architectures","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Espeholt","year":"2018"},{"key":"ref25","first-page":"1","article-title":"Seed RL: Scalable and efficient deep-RL with accelerated central inference","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Espeholt","year":"2019"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1137\/20M1329858"},{"key":"ref27","article-title":"A comprehensive linear speedup analysis for asynchronous stochastic parallel optimization from zeroth-order to first-order","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"27","author":"Lian","year":"2016"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1080\/0020718508961217"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1017\/9781108231596"}],"container-title":["IEEE Transactions on Automatic Control"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/9\/11060003\/10891460.pdf?arnumber=10891460","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T17:44:16Z","timestamp":1751391856000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10891460\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7]]},"references-count":29,"journal-issue":{"issue":"7"},"URL":"https:\/\/doi.org\/10.1109\/tac.2025.3543128","relation":{},"ISSN":["0018-9286","1558-2523","2334-3303"],"issn-type":[{"type":"print","value":"0018-9286"},{"type":"electronic","value":"1558-2523"},{"type":"electronic","value":"2334-3303"}],"subject":[],"published":{"date-parts":[[2025,7]]}}}