{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T15:30:00Z","timestamp":1774539000669,"version":"3.50.1"},"reference-count":43,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T00:00:00Z","timestamp":1706745600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T00:00:00Z","timestamp":1706745600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T00:00:00Z","timestamp":1706745600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100004543","name":"China Scholarship Council","doi-asserted-by":"publisher","award":["CSC202006540003"],"award-info":[{"award-number":["CSC202006540003"]}],"id":[{"id":"10.13039\/501100004543","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001602","name":"Science Foundation Ireland","doi-asserted-by":"publisher","award":["17\/FRL\/4832"],"award-info":[{"award-number":["17\/FRL\/4832"]}],"id":[{"id":"10.13039\/501100001602","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001602","name":"Science Foundation Ireland","doi-asserted-by":"publisher","award":["SFI\/12\/RC\/2289_P2"],"award-info":[{"award-number":["SFI\/12\/RC\/2289_P2"]}],"id":[{"id":"10.13039\/501100001602","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2024,2]]},"DOI":"10.1109\/lra.2023.3342559","type":"journal-article","created":{"date-parts":[[2023,12,13]],"date-time":"2023-12-13T19:54:00Z","timestamp":1702497240000},"page":"1294-1301","source":"Crossref","is-referenced-by-count":10,"title":["Identifying Expert Behavior in Offline Training Datasets Improves Behavioral Cloning of Robotic Manipulation Policies"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-1445-755X","authenticated-orcid":false,"given":"Qiang","family":"Wang","sequence":"first","affiliation":[{"name":"School of Electrical and Electronic Engineering, University College Dublin, Dublin, Ireland"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2140-6988","authenticated-orcid":false,"given":"Robert","family":"McCarthy","sequence":"additional","affiliation":[{"name":"CeADAR-Ireland&#x0027;s Centre for Applied AI, University College Dublin, Dublin, Ireland"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2466-7179","authenticated-orcid":false,"given":"David Cordova","family":"Bulens","sequence":"additional","affiliation":[{"name":"School of Electrical and Electronic Engineering, University College Dublin, Dublin, Ireland"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-5954-0971","authenticated-orcid":false,"given":"Francisco Roldan","family":"Sanchez","sequence":"additional","affiliation":[{"name":"School of Electronic Engineering, Dublin City University, D9 Dublin, Ireland"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1336-6477","authenticated-orcid":false,"given":"Kevin","family":"McGuinness","sequence":"additional","affiliation":[{"name":"School of Electronic Engineering, Dublin City University, D9 Dublin, Ireland"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4033-9135","authenticated-orcid":false,"given":"Noel E.","family":"O'Connor","sequence":"additional","affiliation":[{"name":"School of Electronic Engineering, Dublin City University, D9 Dublin, Ireland"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2630-5449","authenticated-orcid":false,"given":"Stephen J.","family":"Redmond","sequence":"additional","affiliation":[{"name":"School of Electrical and Electronic Engineering, University College Dublin, Dublin, Ireland"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aau5872"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/HUMANOIDS.2015.7363524"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1111\/exsy.13205"},{"key":"ref4","first-page":"1","article-title":"Benchmarking offline reinforcement learning on real-robot hardware","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Grtler","year":"2022"},{"key":"ref5","first-page":"1719","article-title":"PLAS: Latent action space for offline reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","author":"Zhou","year":"2021"},{"key":"ref6","first-page":"20132","article-title":"A minimalist approach to offline reinforcement learning","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Fujimoto","year":"2021"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1093\/oso\/9780198538677.003.0006"},{"key":"ref8","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Chen","year":"2021"},{"key":"ref9","article-title":"Manipulators and manipulation in high dimensional spaces","author":"Kumar","year":"2016"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.5772\/53940"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.rcim.2012.02.007"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1163\/156855306778522550"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/AMC.2010.5464018"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3347141"},{"key":"ref15","first-page":"2","article-title":"Hybrid position\/force control of manipulators. ASME","volume":"103","author":"Reibert","year":"1981","journal-title":"J. Dyn. Syst., Meas., Control"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2001.932857"},{"key":"ref17","article-title":"Grasp and motion planning for dexterous manipulation for the real robot challenge","author":"Yoneda","year":"2021"},{"key":"ref18","article-title":"Playing Atari with deep reinforcement learning","author":"Mnih","year":"2013"},{"key":"ref19","first-page":"1","article-title":"Solving the real robot challenge using deep reinforcement learning","volume-title":"Proc. 29th Irish Conf. Artif. Intell. Cogn. Sci.","author":"McCarthy","year":"2021"},{"key":"ref20","first-page":"1","article-title":"What matters in learning from offline human demonstrations for robot manipulation","volume-title":"Proc. 5th Annu. Conf. Robot Learn.","author":"Mandlekar","year":"2021"},{"key":"ref21","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto","year":"2019"},{"key":"ref22","first-page":"1","article-title":"Neural probabilistic motor primitives for humanoid control","volume-title":"Proc. 7th Int. Conf. Learn. Representations","author":"Merel","year":"2019"},{"key":"ref23","first-page":"4572","article-title":"Generative adversarial imitation learning","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Ho","year":"2016"},{"key":"ref24","first-page":"663","article-title":"Algorithms for inverse reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ng","year":"2000"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1201\/9781003278177-6"},{"key":"ref26","first-page":"965","article-title":"Mitigating covariate shift in imitation learning via offline data with partial coverage","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Chang","year":"2021"},{"key":"ref27","first-page":"330","article-title":"Better-than-demonstrator imitation learning via automatically-ranked demonstrations","volume-title":"Proc. Conf. Robot Learn.","author":"Brown","year":"2020"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1613\/jair.301"},{"key":"ref29","first-page":"1","article-title":"Offline reinforcement learning with implicit Q-learning","author":"Kostrikov","year":"2022","journal-title":"Proc. 10th Int. Conf. Learn. Representations"},{"key":"ref30","first-page":"1179","article-title":"Conservative Q-learning for offline reinforcement learning","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Kumar","year":"2020"},{"key":"ref31","first-page":"7436","article-title":"Uncertainty-based offline reinforcement learning with diversified Q-ensemble","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"An","year":"2021"},{"key":"ref32","first-page":"7768","article-title":"Critic regularized regression","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Wang","year":"2020"},{"key":"ref33","article-title":"AWAC: Accelerating online reinforcement learning with offline datasets","author":"Nair","year":"2020"},{"key":"ref34","first-page":"1","article-title":"Fitted Q-iteration by advantage weighted regression","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Neumann","year":"2008"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2023.3250269"},{"key":"ref36","first-page":"2839","article-title":"Domain adaptation with conditional transferable components","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Gong","year":"2016"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1016\/S0378-3758(00)00115-4"},{"key":"ref38","first-page":"907","article-title":"S4RL: Surprisingly simple self-supervision for offline reinforcement learning in robotics","volume-title":"Proc. Conf. Robot Learn.","author":"Sinha","year":"2022"},{"key":"ref39","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto","year":"2018"},{"issue":"1","key":"ref40","first-page":"14205","article-title":"D3RLPY: An offline deep reinforcement learning library","volume":"23","author":"Seno","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref41","article-title":"D4RL: Datasets for deep data-driven reinforcement learning","author":"Fu","year":"2020"},{"key":"ref42","first-page":"1","article-title":"RVS: What is essential for offline RL via supervised learning?","volume-title":"Proc. 10th Int. Conf. on Learn. Representations","author":"Emmons","year":"2022"},{"key":"ref43","first-page":"3851","article-title":"Improving behavioural cloning with positive unlabeled learning","volume-title":"Proc. Conf. Robot Learn.","author":"Wang","year":"2023"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/10360389\/10356825.pdf?arnumber=10356825","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,23]],"date-time":"2024-12-23T19:30:58Z","timestamp":1734982258000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10356825\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,2]]},"references-count":43,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/lra.2023.3342559","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,2]]}}}