{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,23]],"date-time":"2025-12-23T06:55:39Z","timestamp":1766472939557,"version":"3.48.0"},"reference-count":58,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"Spanish NSF","award":["MCIN\/AEI\/10.13039\/501100011033"],"award-info":[{"award-number":["MCIN\/AEI\/10.13039\/501100011033"]}]},{"name":"Spanish NSF","award":["PID2019-105032GB-I00"],"award-info":[{"award-number":["PID2019-105032GB-I00"]}]},{"name":"Spanish NSF","award":["TED2021-130347B-I00"],"award-info":[{"award-number":["TED2021-130347B-I00"]}]},{"name":"Spanish NSF","award":["PID2022-136887NB-I00"],"award-info":[{"award-number":["PID2022-136887NB-I00"]}]},{"name":"Community of Madrid via the Ellis Madrid Unit","award":["TEC-2024\/COM-89"],"award-info":[{"award-number":["TEC-2024\/COM-89"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Signal Process."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tsp.2025.3636071","type":"journal-article","created":{"date-parts":[[2025,11,24]],"date-time":"2025-11-24T19:02:43Z","timestamp":1764010963000},"page":"4906-4920","source":"Crossref","is-referenced-by-count":0,"title":["Multilinear Tensor Low-Rank Approximation for Policy-Gradient Methods in Reinforcement Learning"],"prefix":"10.1109","volume":"73","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1042-7502","authenticated-orcid":false,"given":"Sergio","family":"Rozada","sequence":"first","affiliation":[{"name":"Department of Signal Theory and Comms., King Juan Carlos University, Madrid, Spain"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4796-4483","authenticated-orcid":false,"given":"Hoi-To","family":"Wai","sequence":"additional","affiliation":[{"name":"Department of Systems Engineering and Engineering Management, The Chinese University of Hong Kong (CUHK), Hong Kong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4642-7718","authenticated-orcid":false,"given":"Antonio G.","family":"Marques","sequence":"additional","affiliation":[{"name":"Department of Signal Theory and Comms., King Juan Carlos University, Madrid, Spain"}]}],"member":"263","reference":[{"volume-title":"Reinforcement Learning: An Introduction.","year":"2018","author":"Sutton","key":"ref1"},{"volume-title":"Reinforcement Learning and Optimal Control.","year":"2019","author":"Bertsekas","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.13140\/RG.2.2.18893.74727"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref5","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"33","author":"Brown","year":"2020"},{"key":"ref6","volume-title":"Dynamic Programming and Optimal Control","volume":"1","author":"Bertsekas","year":"2000"},{"volume-title":"Neuro-Dynamic Programming.","year":"1996","author":"Bertsekas","key":"ref7"},{"key":"ref8","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation,\u201d","volume-title":"Proc. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"12","author":"Sutton","year":"1999"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2020.3029317"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TCNS.2021.3078100"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952667"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2743240"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10304"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2018.8430925"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2022.3207269"},{"key":"ref16","article-title":"Harnessing structures for value-based planning and reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Representations (ICLR)","author":"Yang","year":"2019"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2021.3055957"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2024.3379089"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10094802"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/BF02288367"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4471-2227-2"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1561\/2200000055"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1137\/07070111X"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2017.2690524"},{"key":"ref25","first-page":"20095","article-title":"Flambe: Structural complexity and representation learning of low rank MDPs","volume-title":"Proc. Conf. Neural Inf. Process. Syst. (NeurIPS)","author":"Agarwal","year":"2020"},{"key":"ref26","article-title":"Representation learning for online and offline RL in low-rank MDPs","volume-title":"Proc. Int. Conf. Learn. Representations (ICLR)","author":"Uehara","year":"2021"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10311"},{"key":"ref28","first-page":"1704","article-title":"Contextual decision processes with low Bellman rank are PAC-learnable","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","volume":"70","author":"Jiang","year":"2017"},{"key":"ref29","first-page":"193","article-title":"Reinforcement learning of POMDPs using spectral methods","volume-title":"Proc. Conf. Learn. Theory (CLT)","author":"Azizzadenesheli","year":"2016"},{"key":"ref30","first-page":"7301","article-title":"Tesseract: Tensorised actors for multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Mahajan","year":"2021"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/3589973"},{"key":"ref32","first-page":"12092","article-title":"Sample efficient reinforcement learning via low-rank matrix estimation","volume-title":"Proc. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"33","author":"Shah","year":"2020"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.23919\/EUSIPCO54536.2021.9616008"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CAMSAP58249.2023.10403480"},{"key":"ref35","first-page":"6820","article-title":"On the global convergence rates of softmax policy gradient methods","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Mei","year":"2020"},{"issue":"98","key":"ref36","first-page":"1","article-title":"On the theory of policy gradient methods: Optimality, approximation, and distribution shift","volume":"22","author":"Agarwal","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/s10107-022-01816-5"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2025.3540965"},{"key":"ref39","first-page":"2228","article-title":"On the convergence and sample efficiency of variance-reduced policy gradient method","volume-title":"Proc. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"34","author":"Zhang","year":"2021"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1137\/19M1288012"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1007\/s10208-018-09409-5"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2020.2976000"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992696"},{"article-title":"High-dimensional continuous control using generalized advantage estimation","year":"2015","author":"Schulman","key":"ref44"},{"issue":"9","key":"ref45","first-page":"1471","article-title":"Variance reduction techniques for gradient estimates in reinforcement learning","volume":"5","author":"Greensmith","year":"2004","journal-title":"J. Mach. Learn. Res."},{"key":"ref46","first-page":"1008","article-title":"Actor-critic algorithms","volume-title":"Proc. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"12","author":"Konda","year":"1999"},{"key":"ref47","first-page":"1531","article-title":"A natural policy gradient","volume-title":"Proc. Conf. Neural Inf. Process. Syst. (NeurIPS)","volume":"14","author":"Kakade","year":"2001"},{"key":"ref48","first-page":"267","article-title":"Approximately optimal approximate reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Kakade","year":"2002"},{"key":"ref49","first-page":"1889","article-title":"Trust region policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Schulman","year":"2015"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.12794\/metadc1505267"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1016\/S0169-7439(97)00032-4"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448465"},{"key":"ref53","volume-title":"Differential-Geometrical Methods in Statistics","volume":"28","author":"Amari","year":"2012"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1137\/18M1178244"},{"author":"Rozada","key":"ref55","article-title":"\u201cOnline code repository: Tensor low-rank approximation for policy-gradient methods in reinforcement learning.\u201d"},{"article-title":"OpenAI gym","year":"2016","author":"Brockman","key":"ref56"},{"key":"ref57","article-title":"Goddard\u2019s rocket problem"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1017\/S0962492900000015"}],"container-title":["IEEE Transactions on Signal Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/78\/10807692\/11264837.pdf?arnumber=11264837","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,23]],"date-time":"2025-12-23T06:53:54Z","timestamp":1766472834000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11264837\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":58,"URL":"https:\/\/doi.org\/10.1109\/tsp.2025.3636071","relation":{},"ISSN":["1053-587X","1941-0476"],"issn-type":[{"type":"print","value":"1053-587X"},{"type":"electronic","value":"1941-0476"}],"subject":[],"published":{"date-parts":[[2025]]}}}