{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,8]],"date-time":"2026-03-08T03:40:50Z","timestamp":1772941250337,"version":"3.50.1"},"reference-count":40,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2021,1,1]],"date-time":"2021-01-01T00:00:00Z","timestamp":1609459200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100004663","name":"Ministry of Science and Technology (MOST), Taiwan","doi-asserted-by":"publisher","award":["MOST 108-2221-E-032-045"],"award-info":[{"award-number":["MOST 108-2221-E-032-045"]}],"id":[{"id":"10.13039\/501100004663","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004663","name":"Ministry of Science and Technology (MOST), Taiwan","doi-asserted-by":"publisher","award":["MOST 109-2221-E-032-038"],"award-info":[{"award-number":["MOST 109-2221-E-032-038"]}],"id":[{"id":"10.13039\/501100004663","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004663","name":"Ministry of Science and Technology (MOST), Taiwan","doi-asserted-by":"publisher","award":["MOST 109-2918-I-032-002"],"award-info":[{"award-number":["MOST 109-2918-I-032-002"]}],"id":[{"id":"10.13039\/501100004663","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2021]]},"DOI":"10.1109\/access.2021.3056903","type":"journal-article","created":{"date-parts":[[2021,2,3]],"date-time":"2021-02-03T22:00:06Z","timestamp":1612389606000},"page":"26871-26885","source":"Crossref","is-referenced-by-count":59,"title":["Motion Planning for Dual-Arm Robot Based on Soft Actor-Critic"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1095-728X","authenticated-orcid":false,"given":"Ching-Chang","family":"Wong","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9314-3112","authenticated-orcid":false,"given":"Shao-Yu","family":"Chien","sequence":"additional","affiliation":[]},{"given":"Hsuan-Ming","family":"Feng","sequence":"additional","affiliation":[]},{"given":"Hisasuki","family":"Aoyama","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCC.2007.913919"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/325334.325242"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.3390\/s20205911"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.3390\/app10020575"},{"key":"ref31","article-title":"Distributed soft actor-critic with multivariate reward representation and knowledge distillation","author":"akimov","year":"2019","journal-title":"arXiv 1911 13056"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8967946"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1115\/DETC2006-99307"},{"key":"ref36","article-title":"Empirical evaluation of rectified activations in convolutional network","author":"xu","year":"2015","journal-title":"arXiv 1505 00853"},{"key":"ref35","first-page":"3","article-title":"Rectifier nonlinearities improve neural network acoustic models","author":"maas","year":"2013","journal-title":"Proc 30th Int Conf Mach Learn"},{"key":"ref34","article-title":"Learning a decentralized multi-arm motion planner","author":"ha","year":"2020","journal-title":"arXiv 2011 02608"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/21.279000"},{"key":"ref40","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/70.143360"},{"key":"ref12","doi-asserted-by":"crossref","first-page":"160","DOI":"10.1109\/70.345949","article-title":"A dynamic programming approach to near minimum-time trajectory planning for two robots","volume":"11","author":"lee","year":"1995","journal-title":"IEEE Trans Robot Autom"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ROSE.2013.6698413"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/AIM.2014.6878217"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2012.6225245"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/BioRob.2012.6290917"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/EMBC.2012.6346870"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCC.2011.2106494"},{"key":"ref19","article-title":"Playing Atari with deep reinforcement learning","author":"mnih","year":"2013","journal-title":"arXiv 1312 5602"},{"key":"ref28","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"haarnoja","year":"2018","journal-title":"arXiv 1801 01290"},{"key":"ref4","first-page":"496","article-title":"Robust optimal inverse kinematics with self-collision avoidance for a humanoid robot","author":"chua","year":"2013","journal-title":"Proc IEEE Ro-man"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2020.07.033"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/SII.2013.6776637"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2019.2897145"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.2974445"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2020.3036693"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/s12541-017-0099-z"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1108\/01439911111106390"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386294"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.1987.289330"},{"key":"ref1","first-page":"1031","article-title":"Collision avoidance of two manipulators using RT-middleware","author":"zhou","year":"2011","journal-title":"Proc IEEE\/SICE Int Symp Syst Integr"},{"key":"ref20","article-title":"3D simulation for robot arm control with deep Q-learning","author":"james","year":"2016","journal-title":"arXiv 1609 03759"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989385"},{"key":"ref21","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"arXiv 1509 02971"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202134"},{"key":"ref23","article-title":"Leveraging demonstrations for deep reinforcement learning on robotics problems with sparse rewards","author":"vecerik","year":"2017","journal-title":"arXiv 1707 08817"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2019.2952810"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2016.2618771"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/9312710\/09345768.pdf?arnumber=9345768","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,26]],"date-time":"2022-01-26T08:34:31Z","timestamp":1643186071000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9345768\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021]]},"references-count":40,"URL":"https:\/\/doi.org\/10.1109\/access.2021.3056903","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021]]}}}