{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,15]],"date-time":"2026-04-15T17:55:34Z","timestamp":1776275734191,"version":"3.50.1"},"reference-count":48,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,10,14]],"date-time":"2024-10-14T00:00:00Z","timestamp":1728864000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,10,14]],"date-time":"2024-10-14T00:00:00Z","timestamp":1728864000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,10,14]]},"DOI":"10.1109\/iros58592.2024.10801914","type":"proceedings-article","created":{"date-parts":[[2024,12,25]],"date-time":"2024-12-25T19:17:39Z","timestamp":1735154259000},"page":"8047-8054","source":"Crossref","is-referenced-by-count":6,"title":["Ensembling Prioritized Hybrid Policies for Multi-agent Pathfinding"],"prefix":"10.1109","author":[{"given":"Huijie","family":"Tang","sequence":"first","affiliation":[{"name":"KAIST,Department of Industrial and Systems Engineering,South Korea"}]},{"given":"Federico","family":"Berto","sequence":"additional","affiliation":[{"name":"KAIST,Department of Industrial and Systems Engineering,South Korea"}]},{"given":"Jinkyoo","family":"Park","sequence":"additional","affiliation":[{"name":"KAIST,Department of Industrial and Systems Engineering,South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1609\/socs.v10i1.18510"},{"issue":"1","key":"ref2","first-page":"9","article-title":"Coordinating hundreds of cooperative, autonomous vehicles in warehouses","volume":"29","author":"Wurman","year":"2008","journal-title":"AI magazine"},{"key":"ref3","first-page":"608","article-title":"Planning, scheduling and monitoring for airport surface operations","volume-title":"AAAI Workshop: Planning for Hybrid Systems","author":"Morris"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1609\/aiide.v13i1.12919"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v27i1.8541"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2017.2715406"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2014.11.006"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1609\/socs.v5i1.18315"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i14.17466"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i13.17344"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2019.2903261"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3062803"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3139145"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9560748"},{"key":"ref15","author":"Mnih","year":"2013","journal-title":"Playing atari with deep reinforcement learning"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IROS55552.2023.10342305"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3292004"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1609\/icaps.v29i1.3471"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1609\/icaps.v30i1.6661"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-023-10670-6"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2021.3050960"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2020.04.028"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2022.3206745"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA46639.2022.9811643"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ROBIO55434.2022.10011833"},{"key":"ref26","author":"Gao","year":"2023","journal-title":"Rde: A hybrid policy framework for multi-agent path finding problem"},{"key":"ref27","article-title":"HiMAP: Learning heuristics-informed policies for large-scale multi-agent pathfinding","volume-title":"AAMAS","author":"Tang"},{"key":"ref28","first-page":"7487","article-title":"Stabilizing transformers for reinforcement learning","volume-title":"International conference on machine learning","author":"Parisotto"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref30","article-title":"Dueling network architectures for deep reinforcement learning","volume-title":"International conference on machine learning","author":"Wang"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref32","article-title":"Double q-learning","volume":"23","author":"Hasselt","year":"2010","journal-title":"Advances in neural information processing systems"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4615-4022-9"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1609\/aiide.v1i1.18726"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2022.103752"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33017643"},{"key":"ref37","author":"Schaul","year":"2015","journal-title":"Prioritized experience replay"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553380"},{"key":"ref39","author":"Schulman","year":"2017","journal-title":"Proximal policy optimization algorithms"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2013.6631119"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1023\/A:1022672621406"},{"key":"ref42","volume-title":"RL4CO: an Extensive Reinforcement Learning for Combinatorial Optimization Benchmark","author":"Berto","year":"2024"},{"key":"ref43","author":"Hottung","year":"2019","journal-title":"Neural large neighborhood search for the capacitated vehicle routing problem"},{"key":"ref44","first-page":"35","article-title":"The euro meets neurips 2022 vehicle routing competition","author":"Kool","year":"2022","journal-title":"NeurIPS 2022 Competition Track"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i18.30009"},{"key":"ref46","article-title":"Deepaco: Neural-enhanced ant systems for combinatorial optimization","volume":"36","author":"Ye","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref47","author":"Liu","year":"2024","journal-title":"An example of evolutionary computation+ large language model beating human: Design of efficient guided local search"},{"key":"ref48","volume-title":"Large language models as hyper-heuristics for combinatorial optimization","author":"Ye","year":"2024"}],"event":{"name":"2024 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","location":"Abu Dhabi, United Arab Emirates","start":{"date-parts":[[2024,10,14]]},"end":{"date-parts":[[2024,10,18]]}},"container-title":["2024 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10801246\/10801290\/10801914.pdf?arnumber=10801914","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,26]],"date-time":"2024-12-26T06:58:46Z","timestamp":1735196326000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10801914\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,14]]},"references-count":48,"URL":"https:\/\/doi.org\/10.1109\/iros58592.2024.10801914","relation":{},"subject":[],"published":{"date-parts":[[2024,10,14]]}}}