{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T16:34:47Z","timestamp":1773246887947,"version":"3.50.1"},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,7,8]],"date-time":"2025-07-08T00:00:00Z","timestamp":1751932800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,7,8]],"date-time":"2025-07-08T00:00:00Z","timestamp":1751932800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,7,8]]},"DOI":"10.23919\/acc63710.2025.11107498","type":"proceedings-article","created":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T18:17:51Z","timestamp":1755800271000},"page":"4115-4122","source":"Crossref","is-referenced-by-count":1,"title":["Hierarchical MARL with Stackelberg Games"],"prefix":"10.23919","author":[{"given":"Carmel","family":"Fiscko","sequence":"first","affiliation":[{"name":"Cornell University,Dept. of Systems Engineering,Ithaca,NY"}]},{"given":"Haoyu","family":"Yin","sequence":"additional","affiliation":[{"name":"Washington University,Dept. of Electrical and Systems Engineering,St. Louis,MO"}]},{"given":"Bruno","family":"Sinopoli","sequence":"additional","affiliation":[{"name":"Washington University,Dept. of Electrical and Systems Engineering,St. Louis,MO"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Multi-agent reinforcement learning in sequential social dilemmas","author":"Leibo","year":"2017"},{"key":"ref2","article-title":"D3c: Reducing the price of anarchy in multi-agent learning","author":"Gemp","year":"2020"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CEC.2013.6557607"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1613\/jair.4317"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3269"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TSG.2012.2223766"},{"key":"ref7","article-title":"Feudal reinforcement learning","volume":"5","author":"Dayan","year":"1992","journal-title":"Advances in neural information processing systems"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/s10458-006-7035-4"},{"key":"ref9","doi-asserted-by":"crossref","DOI":"10.1137\/1.9781611971132","volume-title":"Dynamic noncooperative game theory.","author":"Bas\u00b8ar","year":"1998"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v26i1.8234"},{"key":"ref11","article-title":"Can reinforcement learning find stackelberg-nash equilibria in general-sum markov games with myopic followers\u0192","author":"Zhong","year":"2021"},{"key":"ref12","first-page":"11658","article-title":"Zero-sum stochastic stackelberg games","volume":"35","author":"Goktas","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref13","first-page":"25799","article-title":"Sample-efficient learning of stackelberg equilibria in general-sum games","volume":"34","author":"Bai","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2022.3220512"},{"key":"ref15","first-page":"42304","article-title":"Online learning in stackelberg games with an omniscient follower","volume-title":"International Conference on Machine Learning","author":"Zhao"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/tac.2023.3330797"},{"key":"ref17","first-page":"11213","article-title":"Oracles & followers: Stackelberg equilibria in deep multi-agent reinforcement learning","volume-title":"International Conference on Machine Learning","author":"Gerstgrasser"},{"key":"ref18","article-title":"Coordinating the crowd: Inducing desirable equilibria in non-cooperative systems","author":"Mguni","year":"2019"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4612-4054-9"},{"key":"ref20","first-page":"232","article-title":"Learning nash equilibrium for general-sum markov games from batch data","author":"Perolat","year":"2017","journal-title":"Artificial intelligence and statistics."},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-60990-0_12"},{"key":"ref22","first-page":"4860","article-title":"Learning adversarial markov decision processes with bandit feedback and unknown transition","volume-title":"International Conference on Machine Learning","author":"Jin"},{"key":"ref23","volume-title":"Reinforcement learning: An introduction.","author":"Sutton","year":"2018"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-93031-2_27"},{"key":"ref25","article-title":"Gurobi Optimizer Reference Manual","year":"2023"},{"key":"ref26","first-page":"3679","article-title":"Adaptive model design for Markov decision process","volume-title":"Proceedings of the 39th International Conference on Machine Learning","author":"Chen"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/2897518.2897579"}],"event":{"name":"2025 American Control Conference (ACC)","location":"Denver, CO, USA","start":{"date-parts":[[2025,7,8]]},"end":{"date-parts":[[2025,7,10]]}},"container-title":["2025 American Control Conference (ACC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11107441\/11107442\/11107498.pdf?arnumber=11107498","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T05:24:22Z","timestamp":1755840262000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11107498\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,8]]},"references-count":27,"URL":"https:\/\/doi.org\/10.23919\/acc63710.2025.11107498","relation":{},"subject":[],"published":{"date-parts":[[2025,7,8]]}}}