{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T13:33:30Z","timestamp":1743082410462,"version":"3.37.3"},"reference-count":27,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100007053","name":"Korea Institute of Energy Technology Evaluation and Planning (KETEP) and the Ministry of Trade, Industry & Energy (MOTIE) of the Republic of Korea","doi-asserted-by":"publisher","award":["RS-2024-00422103"],"award-info":[{"award-number":["RS-2024-00422103"]}],"id":[{"id":"10.13039\/501100007053","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001321","name":"National Research Foundation","doi-asserted-by":"crossref","id":[{"id":"10.13039\/501100001321","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Korean government","award":["RS-2024-00421129"],"award-info":[{"award-number":["RS-2024-00421129"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3455553","type":"journal-article","created":{"date-parts":[[2024,9,6]],"date-time":"2024-09-06T18:19:02Z","timestamp":1725646742000},"page":"128551-128558","source":"Crossref","is-referenced-by-count":1,"title":["Catching Robot: Predicting the Trajectory of a Rolling Ball Using Transformer"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0325-6883","authenticated-orcid":false,"given":"Namyeong","family":"Lee","sequence":"first","affiliation":[{"name":"Department of Artificial Intelligence, Hanyang University, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0758-1464","authenticated-orcid":false,"given":"Yuna","family":"Oh","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence, Hanyang University, Seoul, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8877-9519","authenticated-orcid":false,"given":"Jun","family":"Moon","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence, Hanyang University, Seoul, South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2012.05.022"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9340968"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2010.5651175"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8593986"},{"key":"ref5","article-title":"Policy learning in SE(3) action spaces","author":"Wang","year":"2020","journal-title":"arXiv:2010.02798"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2010.5509775"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2011.5980138"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TCST.2014.2380175"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICENCO.2015.7416356"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9560787"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1038\/nature14539"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"issue":"8","key":"ref13","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref14","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Chen"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202133"},{"key":"ref16","first-page":"334","article-title":"Transferring end-to-end visuomotor control from simulation to real world for a multi-stage task","volume-title":"Proc. Conf. Robot Learn.","author":"James"},{"key":"ref17","first-page":"1273","article-title":"Offline reinforcement learning as one big sequence modeling problem","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Janner"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3324458"},{"key":"ref19","article-title":"Behavior regularized offline reinforcement learning","author":"Wu","year":"2019","journal-title":"arXiv:1911.11361"},{"key":"ref20","first-page":"1179","article-title":"Conservative Q-learning for offline reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Kumar"},{"volume-title":"Improving language understanding by generative pre-training","year":"2018","author":"Radford","key":"ref21"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/566570.566636"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4612-4380-9_35"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4615-7566-5"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10380310\/10669029.pdf?arnumber=10669029","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,19]],"date-time":"2024-09-19T06:17:31Z","timestamp":1726726651000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10669029\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3455553","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2024]]}}}