{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,10]],"date-time":"2026-04-10T08:32:42Z","timestamp":1775809962867,"version":"3.50.1"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,10,24]],"date-time":"2020-10-24T00:00:00Z","timestamp":1603497600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000915","name":"Richard King Mellon Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000915","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,10,24]]},"DOI":"10.1109\/iros45743.2020.9340876","type":"proceedings-article","created":{"date-parts":[[2021,3,15]],"date-time":"2021-03-15T14:49:56Z","timestamp":1615819796000},"page":"11748-11754","source":"Crossref","is-referenced-by-count":103,"title":["MAPPER: Multi-Agent Path Planning with Evolutionary Reinforcement Learning in Mixed Dynamic Environments"],"prefix":"10.1109","author":[{"given":"Zuxin","family":"Liu","sequence":"first","affiliation":[]},{"given":"Baiming","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Hongyi","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Guru","family":"Koushik","sequence":"additional","affiliation":[]},{"given":"Martial","family":"Hebert","sequence":"additional","affiliation":[]},{"given":"Ding","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989037"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793868"},{"key":"ref12","first-page":"15220","article-title":"Search on the replay buffer: Bridging planning and reinforcement learning","author":"eysenbach","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2011.2120810"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1177\/0278364915576234"},{"key":"ref15","article-title":"Baidu apollo em motion planner","author":"fan","year":"2018"},{"key":"ref16","doi-asserted-by":"crossref","first-page":"3","DOI":"10.1007\/978-3-642-19457-3_1","article-title":"Reciprocal n-body collision avoidance","author":"van den berg","year":"2011","journal-title":"Robotics Research"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2010.5654369"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989182"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202134"},{"key":"ref28","first-page":"117","article-title":"Cooperative pathfinding","author":"silver","year":"2005","journal-title":"AIIDE"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.2008.4543489"},{"key":"ref27","author":"simon","year":"2013","journal-title":"Evolutionary Optimization Algorithms"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2004.838026"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2018.8593871"},{"key":"ref29","article-title":"Intrinsically motivated goal exploration processes with automatic curriculum learning","author":"forestier","year":"2017"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202312"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2019.2903261"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2016.7759200"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2012.6225009"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2018.8461113"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2014.11.006"},{"key":"ref20","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"2016","journal-title":"International Conference on Machine Learning"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2006.889486"},{"key":"ref21","first-page":"183","article-title":"Multi-agent reinforce-? ment learning: An overview","author":"bu?oniu","year":"2010","journal-title":"Innovations in Multi-Agent Systems and Applications - 1"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2013.6629610"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8793813"},{"key":"ref26","first-page":"6379","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","author":"lowe","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref25","article-title":"Playing atari with deep reinforcement learning","author":"mnih","year":"2013"}],"event":{"name":"2020 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","location":"Las Vegas, NV, USA","start":{"date-parts":[[2020,10,24]]},"end":{"date-parts":[[2021,1,24]]}},"container-title":["2020 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9340668\/9340635\/09340876.pdf?arnumber=9340876","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T21:55:20Z","timestamp":1656453320000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9340876\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,10,24]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/iros45743.2020.9340876","relation":{},"subject":[],"published":{"date-parts":[[2020,10,24]]}}}