{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T15:35:47Z","timestamp":1773329747579,"version":"3.50.1"},"reference-count":44,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62206281"],"award-info":[{"award-number":["62206281"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62293541"],"award-info":[{"award-number":["62293541"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Beijing Natural Science Foundation","award":["4232056"],"award-info":[{"award-number":["4232056"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,3]]},"DOI":"10.1109\/tnnls.2024.3378913","type":"journal-article","created":{"date-parts":[[2024,4,15]],"date-time":"2024-04-15T13:35:52Z","timestamp":1713188152000},"page":"5644-5653","source":"Crossref","is-referenced-by-count":1,"title":["Boosting On-Policy Actor\u2013Critic With Shallow Updates in Critic"],"prefix":"10.1109","volume":"36","author":[{"given":"Luntong","family":"Li","sequence":"first","affiliation":[{"name":"State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5384-423X","authenticated-orcid":false,"given":"Yuanheng","family":"Zhu","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TETCI.2018.2823329"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-020-03051-4"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1126\/science.add4679"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/JAS.2021.1004395"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3116063"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3041469"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2021.3121546"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2927869"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2022.3172168"},{"key":"ref11","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/tcyb.2022.3179775"},{"key":"ref13","first-page":"1","article-title":"Shallow updates for deep reinforcement learning","volume-title":"Proc. 31st Conf. Neural Inf. Process. Syst. (NeurIPS)","author":"Levine"},{"key":"ref14","first-page":"1107","article-title":"Least-squares policy iteration","volume":"4","author":"Lagoudakis","year":"2003","journal-title":"J. Mach. Learn. Res."},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-01551-9"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v25i1.7903"},{"issue":"7553","key":"ref17","doi-asserted-by":"crossref","first-page":"436","DOI":"10.1038\/nature14539","article-title":"Deep learning","volume":"521","author":"Yoshua","year":"2015","journal-title":"Nature"},{"key":"ref18","first-page":"1","article-title":"On the generalization of representations in reinforcement learning","volume-title":"Proc. 25th Int. Conf. Artif. Intell. Statist. (AISTATS)","author":"Lan"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2024.104100"},{"key":"ref20","first-page":"1","article-title":"Two-timescale networks for nonlinear value function approximation","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Chung"},{"key":"ref21","volume-title":"Bayesian Inference in Statistical Analysis","author":"Box","year":"2011"},{"key":"ref22","first-page":"2020","article-title":"Phasic policy gradient","volume-title":"Proc. ICML","author":"Cobbe"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN55064.2022.9892303"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553501"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2019.2902342"},{"key":"ref26","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","volume-title":"Proc. Adv. Neural Inf. Process. Syst","author":"Sutton"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2009.07.008"},{"key":"ref28","first-page":"1609","article-title":"A convergent O(n) algorithm for off-policy temporal-difference learning with linear function approximation","volume-title":"Proc. Adv. Neural Inf. Process. Syst. Conf.","author":"Sutton"},{"key":"ref29","article-title":"Contrastive learning as goal-conditioned reinforcement learning","author":"Eysenbach","year":"2022","journal-title":"arXiv:2206.07568"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.2307\/j.ctt4cgngj.10"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i8.16880"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3119853"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/BF00114723"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-93-86279-38-5"},{"key":"ref35","article-title":"A deeper look at discounting mismatch in actor-critic algorithms","author":"Zhang","year":"2020","journal-title":"arXiv:2010.01069"},{"key":"ref36","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proc. 32nd Int. Conf. Mach. Learn.","volume":"37","author":"Schulman"},{"key":"ref37","first-page":"2048","article-title":"Leveraging procedural generation to benchmark reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"119","author":"Cobbe"},{"key":"ref38","first-page":"1407","article-title":"IMPALA: Scalable distributed deep-RL with importance weighted actor-learner architectures","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"80","author":"Espeholt"},{"key":"ref39","first-page":"1","article-title":"Reinforcement learning with augmented data","volume-title":"Proc. 34th Conf. Neural Inf. Process. Syst. (NeurIPS)","author":"Laskin"},{"key":"ref40","article-title":"Measuring sample efficiency and generalization in reinforcement learning benchmarks: NeurIPS 2020 procgen benchmark","author":"Mohanty","year":"2021","journal-title":"arXiv:2103.15332"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3055499"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2019.2899365"},{"key":"ref43","first-page":"7825","article-title":"Mirror learning: A unifying framework of policy optimisation","volume-title":"Proc. 39th Int. Conf. Mach. Learn.","volume":"162","author":"Grudzien"},{"key":"ref44","first-page":"1","article-title":"A finite-time analysis of two time-scale actor-critic methods","volume-title":"Proc. 34th Conf. Neural Inf. Process. Syst. (NeurIPS)","author":"Wu"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10908444\/10499902.pdf?arnumber=10499902","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,5]],"date-time":"2025-12-05T18:38:56Z","timestamp":1764959936000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10499902\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3]]},"references-count":44,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2024.3378913","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3]]}}}