{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,4]],"date-time":"2026-04-04T18:16:47Z","timestamp":1775326607139,"version":"3.50.1"},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key R&D Program of China","doi-asserted-by":"publisher","award":["2020YFC1511803"],"award-info":[{"award-number":["2020YFC1511803"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icra46639.2022.9812263","type":"proceedings-article","created":{"date-parts":[[2022,7,12]],"date-time":"2022-07-12T19:36:40Z","timestamp":1657654600000},"page":"1661-1667","source":"Crossref","is-referenced-by-count":29,"title":["Relative Distributed Formation and Obstacle Avoidance with Multi-agent Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Yuzi","family":"Yan","sequence":"first","affiliation":[{"name":"Tsinghua University,Department of Electronic Engineering,Beijing,China"}]},{"given":"Xiaoxiang","family":"Li","sequence":"additional","affiliation":[{"name":"Tsinghua University,Department of Electronic Engineering,Beijing,China"}]},{"given":"Xinyou","family":"Qiu","sequence":"additional","affiliation":[{"name":"Tsinghua University,Department of Electronic Engineering,Beijing,China"}]},{"given":"Jiantao","family":"Qiu","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory,Shanghai,China"}]},{"given":"Jian","family":"Wang","sequence":"additional","affiliation":[{"name":"Tsinghua University,Department of Electronic Engineering,Beijing,China"}]},{"given":"Yu","family":"Wang","sequence":"additional","affiliation":[{"name":"Tsinghua University,Department of Electronic Engineering,Beijing,China"}]},{"given":"Yuan","family":"Shen","sequence":"additional","affiliation":[{"name":"Tsinghua University,Department of Electronic Engineering,Beijing,China"}]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1177\/0142331213503864"},{"key":"ref38","author":"kingma","year":"2014","journal-title":"Adam A method for stochastic optimization"},{"key":"ref33","author":"rusu","year":"2015","journal-title":"Policy distillation"},{"key":"ref32","author":"green","year":"2019","journal-title":"Distillation strategies for proximal policy optimization"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682555"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TVT.2018.2890773"},{"key":"ref37","first-page":"1801","article-title":"A new framework for multi-agent reinforcement learning-centralized training and exploration with decentralized execution via policy distillation","author":"chen","year":"2020","journal-title":"Proc 19th Intern Conf Auto Agents and MultiAgent Syst"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33013796"},{"key":"ref35","author":"schulman","year":"2015","journal-title":"High-dimensional continuous control using generalized advantage estimation"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553380"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CEC.2015.7257266"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2020.3004893"},{"key":"ref12","first-page":"2902","article-title":"Re-formation of Mobile Robots using Genetic Algorithm and Reinforcement Learning","volume":"3","author":"f kobayashi","year":"2003","journal-title":"SICE 2003 Annual Conference (IEEE Cat No 03TH8734) SICE-03"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2018.8489066"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TFUZZ.2017.2787561"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ROMAN.2017.8172432"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IROS40897.2019.8967561"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2006.282421"},{"key":"ref18","article-title":"Model-free reinforcement learning approach for leader-follower formation using nonholonomic mobile robots","author":"miah","year":"2020","journal-title":"The 33rd Intl Flairs Conf"},{"key":"ref19","author":"qiu","year":"2021","journal-title":"A drl based distributed formation control scheme with stream based collision avoidance"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2021.3064216"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2003.1241967"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2010.2059720"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2013.02.055"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2017.2782229"},{"key":"ref29","first-page":"6379","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","author":"lowe","year":"2017","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1017\/S0263574707004092"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/j.jfranklin.2016.12.021"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCChina.2019.8855905"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2016.12.031"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2008.922880"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.automatica.2014.10.022"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2018.2800790"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2008.2010360"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2014.6907279"},{"key":"ref24","author":"schulman","year":"2017","journal-title":"Proximal policy optimization algorithms"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.4271\/2006-01-3638"},{"key":"ref26","author":"narvekar","year":"2020","journal-title":"Curriculum Learning for Reinforcement Learning Domains A Framework and Survey"},{"key":"ref25","author":"yu","year":"2021","journal-title":"The surprising effectiveness of mappo in cooperative multi -agent games"}],"event":{"name":"2022 IEEE International Conference on Robotics and Automation (ICRA)","location":"Philadelphia, PA, USA","start":{"date-parts":[[2022,5,23]]},"end":{"date-parts":[[2022,5,27]]}},"container-title":["2022 International Conference on Robotics and Automation (ICRA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9811522\/9811357\/09812263.pdf?arnumber=9812263","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,3]],"date-time":"2022-11-03T23:05:56Z","timestamp":1667516756000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9812263\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/icra46639.2022.9812263","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}