{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,4]],"date-time":"2026-05-04T12:03:28Z","timestamp":1777896208526,"version":"3.51.4"},"reference-count":27,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"5","license":[{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"NSF","award":["1846043"],"award-info":[{"award-number":["1846043"]}]},{"name":"NSF","award":["2132972"],"award-info":[{"award-number":["2132972"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2025,5]]},"DOI":"10.1109\/lra.2025.3550849","type":"journal-article","created":{"date-parts":[[2025,3,12]],"date-time":"2025-03-12T17:48:04Z","timestamp":1741801684000},"page":"4898-4905","source":"Crossref","is-referenced-by-count":13,"title":["Autoregressive Action Sequence Learning for Robotic Manipulation"],"prefix":"10.1109","volume":"10","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6390-7131","authenticated-orcid":false,"given":"Xinyu","family":"Zhang","sequence":"first","affiliation":[{"name":"Department of Computer Science, Rutgers University, Piscataway, NJ, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-8119-828X","authenticated-orcid":false,"given":"Yuhan","family":"Liu","sequence":"additional","affiliation":[{"name":"Department of Computer Science, Rutgers University, Piscataway, NJ, USA"}]},{"given":"Haonan","family":"Chang","sequence":"additional","affiliation":[{"name":"Department of Computer Science, Rutgers University, Piscataway, NJ, USA"}]},{"given":"Liam","family":"Schramm","sequence":"additional","affiliation":[{"name":"Department of Computer Science, Rutgers University, Piscataway, NJ, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5587-4560","authenticated-orcid":false,"given":"Abdeslam","family":"Boularias","sequence":"additional","affiliation":[{"name":"Department of Computer Science, Rutgers University, Piscataway, NJ, USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3605943"},{"key":"ref2","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Mann","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref3","first-page":"70926","article-title":"Why think step by step? Reasoning emerges from the locality of experience","volume-title":"Proc. 37th Int. Conf. Neural Inf. Process. Syst.","author":"Prystawski","year":"2024"},{"key":"ref4","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Chen","year":"2021"},{"key":"ref5","first-page":"1273","article-title":"Offline reinforcement learning as one big sequence modeling problem","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Janner","year":"2021"},{"issue":"13","key":"ref6","first-page":"1","article-title":"Learning the variance of the reward-to-go","volume":"17","author":"Tamar","year":"2016","journal-title":"J. Mach. Learn. Res."},{"key":"ref7","article-title":"A generalist agent","author":"Reed","year":"2022","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref8","doi-asserted-by":"crossref","DOI":"10.1007\/978-981-16-8193-6","article-title":"VIMA: General robot manipulation with multimodal prompts","volume-title":"Proc. 40th Int. Conf. Mach. Learn.","author":"Jiang","year":"2022"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01710"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2023.XIX.016"},{"key":"ref11","article-title":"OpenVLA: An open-source vision-language-action model","author":"Kim","year":"2024","journal-title":"Proc. 8th Annu. Conf. Robot Learn."},{"key":"ref12","article-title":"Unleashing large-scale video generative pre-training for visual robot manipulation","volume-title":"Proc. 12th Int. Conf. Learn. Representations","author":"Wu","year":"2023"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2023.XIX.026"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2974707"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2024.3395626"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2024.XX.055"},{"key":"ref17","first-page":"26091","article-title":"Deep hierarchical planning from pixels","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Hafner","year":"2022"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.5772\/5015"},{"issue":"5","key":"ref19","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3453160","article-title":"Hierarchical reinforcement learning: A comprehensive survey","volume":"54","author":"Pateria","year":"2021","journal-title":"ACM Comput. Surv."},{"key":"ref20","first-page":"2113","article-title":"Hydra: Hybrid robot actions for imitation learning","volume-title":"Proc. Conf. Robot. Learn.","author":"Belkhale","year":"2023"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58536-5_24"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1989.1.2.270"},{"key":"ref23","article-title":"Lyken17\/PyTorch-Opcounter: Count the MACS \/ FLOPS of your PyTorch model","author":"Zhu","year":"2018"},{"key":"ref24","article-title":"Lerobot: State-of-the-art machine learning for real-world robotics in PyTorch","author":"Cadene","year":"2024"},{"key":"ref25","article-title":"3D diffuser actor: Policy diffusion with 3D scene representations","volume-title":"8th Annu. Conf. Robot Learn.","author":"Ke","year":"2024"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01692"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01337"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7083369\/10935293\/10923689.pdf?arnumber=10923689","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,8]],"date-time":"2025-04-08T05:26:18Z","timestamp":1744089978000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10923689\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5]]},"references-count":27,"journal-issue":{"issue":"5"},"URL":"https:\/\/doi.org\/10.1109\/lra.2025.3550849","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,5]]}}}