{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T20:55:55Z","timestamp":1775163355564,"version":"3.50.1"},"reference-count":34,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100003452","name":"Innovation and Technology Commission of the HKSAR Government under the InnoHK Initiative","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003452","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Automat. Sci. Eng."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tase.2025.3527003","type":"journal-article","created":{"date-parts":[[2025,1,8]],"date-time":"2025-01-08T15:44:59Z","timestamp":1736351099000},"page":"10741-10752","source":"Crossref","is-referenced-by-count":4,"title":["Multi-Critic Reinforcement Learning for Garment Handling: Addressing Unpredictability in Temporal-Phase Continuous Contact Tasks"],"prefix":"10.1109","volume":"22","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-0778-4610","authenticated-orcid":false,"given":"Yukuan","family":"Zhang","sequence":"first","affiliation":[{"name":"Robotics Department, School of Engineering, Tohoku University, Sendai, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9644-7584","authenticated-orcid":false,"given":"Dayuan","family":"Chen","sequence":"additional","affiliation":[{"name":"Robotics Department, School of Engineering, Tohoku University, Sendai, Japan"}]},{"given":"Weizan","family":"He","sequence":"additional","affiliation":[{"name":"Robotics Department, School of Engineering, Tohoku University, Sendai, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2766-7599","authenticated-orcid":false,"given":"Alberto El\u00ed","family":"Petrilli Barcel\u00f3","sequence":"additional","affiliation":[{"name":"Robotics Department, School of Engineering, Tohoku University, Sendai, Japan"}]},{"given":"Jose Victorio","family":"Salazar Luces","sequence":"additional","affiliation":[{"name":"Robotics Department, School of Engineering, Tohoku University, Sendai, Japan"}]},{"given":"Yasuhisa","family":"Hirata","sequence":"additional","affiliation":[{"name":"Robotics Department, School of Engineering, Tohoku University, Sendai, Japan"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2016.2602376"},{"key":"ref2","article-title":"Learning robust bed making using deep imitation\n                        learning with DART","author":"Laskey","year":"2017","journal-title":"arXiv:1711.02525"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1080\/01691864.2019.1636715"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8594021"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/MRA.2022.3147415"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3272127.3275048"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.3389\/frobt.2017.00013"},{"key":"ref8","article-title":"Continuous control with deep\n                        reinforcement learning","author":"Lillicrap","year":"2015","journal-title":"arXiv:1509.02971"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6248110"},{"key":"ref10","first-page":"432","article-title":"SoftGym: Benchmarking deep reinforcement learning for\n                        deformable object manipulation","volume-title":"Proc.\n                        Conf. Robot Learn.","author":"Lin"},{"key":"ref11","article-title":"Reinforcement learning and control as probabilistic\n                        inference: Tutorial and review","author":"Levine","year":"2018","journal-title":"arXiv:1805.00909"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2016.2633383"},{"key":"ref13","article-title":"FlingBot: The unreasonable effectiveness of dynamic\n                        manipulation for cloth unfolding","author":"Ha","year":"2021","journal-title":"arXiv:2105.03655"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/IROS47612.2022.9981402"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2008.10.024"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2004.03.001"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/lra.2022.3187843"},{"key":"ref18","first-page":"734","article-title":"Sim-to-real reinforcement learning for deformable\n                        object manipulation","volume-title":"Proc. 2nd Conf.\n                        Robot Learn.","author":"Matas"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/IROS55552.2023.10342002"},{"key":"ref20","article-title":"Addressing function approximation error in\n                        actor-critic methods","author":"Fujimoto","year":"2018","journal-title":"arXiv:1802.09477"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3025194"},{"key":"ref22","article-title":"Multi-critic actor learning: Teaching RL policies to\n                        act with style","volume-title":"Proc. Int. Conf. Learn.\n                        Represent.","author":"Mysore"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3150866"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA57147.2024.10610630"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2004.1389727"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ELEKTRO49696.2020.9130233"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561391"},{"issue":"4","key":"ref28","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/2601097.2601132","article-title":"Adaptive tearing and cracking of thin\n                        sheets","volume":"33","author":"Pfaff","year":"2014","journal-title":"ACM Trans. Graph."},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1145\/3197517.3201308"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/2070781.2024173"},{"key":"ref31","article-title":"Learning particle dynamics for manipulating rigid\n                        bodies, deformable objects, and fluids","volume-title":"Proc. ICLR","author":"Li"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3270034"},{"key":"ref33","volume-title":"NVIDIA Isaac Sim","year":"2023"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CASE59546.2024.10711346"}],"container-title":["IEEE Transactions on Automation Science and Engineering"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/8856\/10839176\/10833866.pdf?arnumber=10833866","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:53:52Z","timestamp":1775159632000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10833866\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/tase.2025.3527003","relation":{},"ISSN":["1545-5955","1558-3783"],"issn-type":[{"value":"1545-5955","type":"print"},{"value":"1558-3783","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}