{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,8]],"date-time":"2024-09-08T13:41:42Z","timestamp":1725802902416},"reference-count":12,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,6,24]],"date-time":"2024-06-24T00:00:00Z","timestamp":1719187200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,24]],"date-time":"2024-06-24T00:00:00Z","timestamp":1719187200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,6,24]]},"DOI":"10.1109\/ur61395.2024.10597529","type":"proceedings-article","created":{"date-parts":[[2024,7,26]],"date-time":"2024-07-26T17:23:58Z","timestamp":1722014638000},"page":"289-292","source":"Crossref","is-referenced-by-count":0,"title":["Seq2Act: A Sequence-to-Action Framework for Novel Shapes in Robotic Peg-in-Hole Assembly"],"prefix":"10.1109","author":[{"given":"Geonhyup","family":"Lee","sequence":"first","affiliation":[{"name":"School of Integrated Technology (SIT), Gwangju Institute of Science and Technology (GIST),Gwangju,Republic of Korea,61005"}]},{"given":"Joosoon","family":"Lee","sequence":"additional","affiliation":[{"name":"School of Integrated Technology (SIT), Gwangju Institute of Science and Technology (GIST),Gwangju,Republic of Korea,61005"}]},{"given":"Kyoobin","family":"Lee","sequence":"additional","affiliation":[{"name":"School of Integrated Technology (SIT), Gwangju Institute of Science and Technology (GIST),Gwangju,Republic of Korea,61005"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.1995.525545"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/COASE.2016.7743375"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2018.2791591"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IROS45743.2020.9341714"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2023.3237734"},{"key":"ref6","first-page":"1696","article-title":"Fast robust peg-in-hole insertion with continuous visual servoing","volume-title":"Conference on Robot Learning","author":"Haugaard","year":"2021"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.3390\/act12040144"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3076971"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9811798"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9812429"},{"key":"ref11","article-title":"Isaac gym: High performance gpu-based physics simulation for robot learning","author":"Makoviychuk","year":"2021","journal-title":"arXiv preprint"},{"key":"ref12","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv preprint"}],"event":{"name":"2024 21st International Conference on Ubiquitous Robots (UR)","start":{"date-parts":[[2024,6,24]]},"location":"New York, NY, USA","end":{"date-parts":[[2024,6,27]]}},"container-title":["2024 21st International Conference on Ubiquitous Robots (UR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10597436\/10597437\/10597529.pdf?arnumber=10597529","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,27]],"date-time":"2024-07-27T05:29:19Z","timestamp":1722058159000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10597529\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,24]]},"references-count":12,"URL":"https:\/\/doi.org\/10.1109\/ur61395.2024.10597529","relation":{},"subject":[],"published":{"date-parts":[[2024,6,24]]}}}