{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T20:43:30Z","timestamp":1764103410005,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":11,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,5,12]],"date-time":"2023-05-12T00:00:00Z","timestamp":1683849600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/100007219","name":"Natural Science Foundation of Shanghai","doi-asserted-by":"publisher","award":["21ZR1401100"],"award-info":[{"award-number":["21ZR1401100"]}],"id":[{"id":"10.13039\/100007219","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["2232022G-09"],"award-info":[{"award-number":["2232022G-09"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,5,12]]},"DOI":"10.1145\/3598151.3598170","type":"proceedings-article","created":{"date-parts":[[2023,7,19]],"date-time":"2023-07-19T00:58:44Z","timestamp":1689728324000},"page":"108-112","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["Robotic motion planning with obstacle avoidance based on hierarchical deep reinforcement learning"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-6482-2816","authenticated-orcid":false,"given":"Guoquan","family":"Zhao","sequence":"first","affiliation":[{"name":"College of Information Science and Technology, Donghua University, China and Engineering Research Center of Digitized Textile &amp; Fashion Technology, Ministry of Education, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1931-378X","authenticated-orcid":false,"given":"Fengkang","family":"Ying","sequence":"additional","affiliation":[{"name":"College of Information Science and Technology, Donghua University, China and Engineering Research Center of Digitized Textile &amp; Fashion Technology, Ministry of Education, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-3523-3048","authenticated-orcid":false,"given":"Zuowei","family":"Pang","sequence":"additional","affiliation":[{"name":"Huzhou Institute of Zhejiang University, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8209-4922","authenticated-orcid":false,"given":"Huashan","family":"Liu","sequence":"additional","affiliation":[{"name":"College of Information Science andTechnology, Donghua University, China and Engineering Research Center of DigitizedTextile &amp; Fashion Technology, China"}]}],"member":"320","published-online":{"date-parts":[[2023,7,18]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2020.3024655"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8968488"},{"volume-title":"Robotic Obstacle-avoidable Trajectory Planning Based on Deep Reinforcement Learning. Master\u2019s thesis","author":"Jiang Rongxin","key":"e_1_3_2_1_3_1","unstructured":"Rongxin Jiang . 2023. Robotic Obstacle-avoidable Trajectory Planning Based on Deep Reinforcement Learning. Master\u2019s thesis . Donghua University . Rongxin Jiang. 2023. Robotic Obstacle-avoidable Trajectory Planning Based on Deep Reinforcement Learning. Master\u2019s thesis. Donghua University."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2021.3125447"},{"key":"e_1_3_2_1_5_1","unstructured":"Timothy\u00a0P Lillicrap Jonathan\u00a0J Hunt Alexander Pritzel Nicolas Heess Tom Erez Yuval Tassa David Silver and Daan Wierstra. 2015. Continuous control with deep reinforcement learning. arxiv:1509.02971  Timothy\u00a0P Lillicrap Jonathan\u00a0J Hunt Alexander Pritzel Nicolas Heess Tom Erez Yuval Tassa David Silver and Daan Wierstra. 2015. Continuous control with deep reinforcement learning. arxiv:1509.02971"},{"key":"e_1_3_2_1_6_1","doi-asserted-by":"publisher","DOI":"10.1145\/3453160"},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2019.2909081"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2020.2997799"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICIBA50161.2020.9277001"},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2022.3143611"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1109\/TMECH.2022.3160605"}],"event":{"name":"RobCE 2023: 2023 3rd International Conference on Robotics and Control Engineering","acronym":"RobCE 2023","location":"Nanjing China"},"container-title":["Proceedings of the 2023 3rd International Conference on Robotics and Control Engineering"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3598151.3598170","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3598151.3598170","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T16:38:00Z","timestamp":1750178280000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3598151.3598170"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,5,12]]},"references-count":11,"alternative-id":["10.1145\/3598151.3598170","10.1145\/3598151"],"URL":"https:\/\/doi.org\/10.1145\/3598151.3598170","relation":{},"subject":[],"published":{"date-parts":[[2023,5,12]]},"assertion":[{"value":"2023-07-18","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}