{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,25]],"date-time":"2025-06-25T04:06:07Z","timestamp":1750824367095,"version":"3.41.0"},"reference-count":27,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"8","license":[{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2025,8]]},"DOI":"10.1109\/lra.2025.3578238","type":"journal-article","created":{"date-parts":[[2025,6,9]],"date-time":"2025-06-09T17:36:26Z","timestamp":1749490586000},"page":"7715-7722","source":"Crossref","is-referenced-by-count":0,"title":["A Reinforcement Learning Approach to Non-Prehensile Manipulation Through Sliding"],"prefix":"10.1109","volume":"10","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-1375-1901","authenticated-orcid":false,"given":"Hamidreza","family":"Raei","sequence":"first","affiliation":[{"name":"Human-Robot Interfaces and Interaction Lab, Istituto Italiano di Tecnologia, Genoa, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8819-2734","authenticated-orcid":false,"given":"Elena","family":"De Momi","sequence":"additional","affiliation":[{"name":"Department of Electronics, Information and Bioengineering, Politecnico di Milano, Milan, Italy"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1261-737X","authenticated-orcid":false,"given":"Arash","family":"Ajoudani","sequence":"additional","affiliation":[{"name":"Human-Robot Interfaces and Interaction Lab, Istituto Italiano di Tecnologia, Genoa, Italy"}]}],"member":"263","reference":[{"article-title":"HACMan: Learning hybrid actor-critic maps for 6D non-prehensile manipulation","year":"2023","author":"Zhou","key":"ref1"},{"key":"ref2","first-page":"336","article-title":"A data-efficient approach to precise and controlled pushing","volume-title":"Proc. Conf. Robot Learn.","author":"Bauza","year":"2018"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/tcst.2023.3277224"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/lra.2023.3324520"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2024.XX.012"},{"article-title":"Benchmarking Sim2Real gap: High-fidelity digital twinning of Agile manufacturing","year":"2024","author":"Katyara","key":"ref6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1103\/physreve.77.036123"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.3390\/app13020680"},{"article-title":"Isaac Gym: High performance GPU-Based physics simulation for robot learning","year":"2021","author":"Makoviychuk","key":"ref9"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/iros.2012.6386109"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/iros.2017.8202133"},{"article-title":"Understanding domain randomization for SIMtoReal transfer","year":"2021","author":"Chen","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1177\/02783649922067762"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487582"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2018.2887356"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.23919\/ecc.2019.8796077"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/tro.2009.2017085"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/iros55552.2023.10341629"},{"article-title":"Reinforcement learning to improve delta robot throws for sorting scrap metal","year":"2024","author":"Louette","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/icra.2015.7139807"},{"article-title":"Deep reinforcement learning for bipedal locomotion: A brief survey","year":"2024","author":"Bao","key":"ref22"},{"article-title":"Off-policy deep reinforcement learning algorithms for handling various robotic manipulator tasks","year":"2022","author":"Rzayev","key":"ref23"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.32657\/10356\/90191"},{"key":"ref25","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto","year":"2018"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1190"},{"article-title":"Domain randomization via entropy maximization","year":"2023","author":"Tiboni","key":"ref27"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7083369\/11045364\/11029127.pdf?arnumber=11029127","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,24]],"date-time":"2025-06-24T06:22:27Z","timestamp":1750746147000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11029127\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8]]},"references-count":27,"journal-issue":{"issue":"8"},"URL":"https:\/\/doi.org\/10.1109\/lra.2025.3578238","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"type":"electronic","value":"2377-3766"},{"type":"electronic","value":"2377-3774"}],"subject":[],"published":{"date-parts":[[2025,8]]}}}