{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,5]],"date-time":"2026-02-05T06:17:56Z","timestamp":1770272276471,"version":"3.49.0"},"reference-count":36,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,1]],"date-time":"2022-07-01T00:00:00Z","timestamp":1656633600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62073324"],"award-info":[{"award-number":["62073324"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2022,7]]},"DOI":"10.1109\/lra.2022.3186494","type":"journal-article","created":{"date-parts":[[2022,6,27]],"date-time":"2022-06-27T20:29:36Z","timestamp":1656361776000},"page":"7982-7989","source":"Crossref","is-referenced-by-count":7,"title":["Efficient Spatiotemporal Transformer for Robotic Reinforcement Learning"],"prefix":"10.1109","volume":"7","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1359-0364","authenticated-orcid":false,"given":"Yiming","family":"Yang","sequence":"first","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8251-9118","authenticated-orcid":false,"given":"Dengpeng","family":"Xing","sequence":"additional","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1111-1529","authenticated-orcid":false,"given":"Bo","family":"Xu","sequence":"additional","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]}],"member":"263","reference":[{"issue":"1","key":"ref1","doi-asserted-by":"crossref","first-page":"7","DOI":"10.1023\/A:1007694015589","article-title":"Relational reinforcement learning","volume":"43","author":"Deroski","year":"2001","journal-title":"Mach. Learn."},{"key":"ref2","article-title":"Relational inductive biases, deep learning, and graph networks","author":"Battaglia","year":"2018"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2019.2958211"},{"issue":"30","key":"ref4","first-page":"1","article-title":"A review of robot learning for manipulation: Challenges, representations, and algorithms","volume":"22","author":"Kroemer","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref5","article-title":"Recurrent experience replay in distributed reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kapturowski","year":"2019"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1177\/0278364919887447"},{"key":"ref7","article-title":"Open-ended learning leads to generally capable agents","author":"Team","year":"2021"},{"key":"ref8","article-title":"Decision transformer: Reinforcement learning via sequence modeling","author":"Chen","year":"2021"},{"key":"ref9","first-page":"7487","article-title":"Stabilizing transformers for reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Parisotto","year":"2020"},{"key":"ref10","first-page":"2489","article-title":"Catformer: Designing stable transformers via sensitivity analysis","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Davis","year":"2021"},{"key":"ref11","article-title":"Playing atari with deep reinforcement learning","author":"Mnih","year":"2013"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA40945.2020.9197468"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2021.103219"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475473"},{"key":"ref15","first-page":"568","article-title":"Two-stream convolutional networks for action recognition in videos","volume-title":"Adv. Neural Inf. Process. Syst.","volume":"27","author":"Simonyan","year":"2014"},{"key":"ref16","article-title":"What matters in on-policy reinforcement learning? a large-scale empirical study","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Andrychowicz","year":"2021"},{"key":"ref17","article-title":"Spatial-temporal transformer networks for traffic flow forecasting","author":"Xu","year":"2020"},{"key":"ref18","article-title":"General robot dynamics learning and gen2real","author":"Xing","year":"2021"},{"key":"ref19","first-page":"651","article-title":"Scalable deep reinforcement learning for vision-based robotic manipulation","volume-title":"Proc. Conf. Robot Learn.","author":"Kalashnikov","year":"2018"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-019-0070-z"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aao4900"},{"key":"ref22","article-title":"Dynamics learning with cascaded variational inference for multi-step manipulation","author":"Fang","year":"2019"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-56991-8_32"},{"key":"ref24","article-title":"Why generalization in RL is difficult: Epistemic POMDPS and implicit partial observability","author":"Ghosh","year":"2021"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC.2018.8569400"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-74690-4_71"},{"key":"ref27","first-page":"1691","article-title":"Generative pretraining from pixels","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Chen","year":"2020"},{"key":"ref28","first-page":"6000","article-title":"Attention is all you need","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Vaswani","year":"2017"},{"key":"ref29","first-page":"1273","article-title":"Offline reinforcement learning as one big sequence modeling problem","volume":"34","author":"Janner","year":"2021","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/s11633-022-1383-7"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3143518"},{"key":"ref32","article-title":"Language models are few-shot learners","author":"Brown","year":"2020"},{"key":"ref33","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017"},{"key":"ref34","article-title":"OpenAI Gym","author":"Brockman","year":"2016","journal-title":"arXiv:1606.01540"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref36","article-title":"Causalworld: A robotic manipulation benchmark for causal structure and transfer learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ahmed","year":"2021"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/9750005\/09807399.pdf?arnumber=9807399","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T04:17:50Z","timestamp":1706761070000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9807399\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7]]},"references-count":36,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/lra.2022.3186494","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,7]]}}}