{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:22:21Z","timestamp":1740100941978,"version":"3.37.3"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004147","name":"Tsinghua University","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100004147","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004147","name":"Tsinghua University","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100004147","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,8,21]]},"DOI":"10.1109\/cog51982.2022.9893722","type":"proceedings-article","created":{"date-parts":[[2022,9,20]],"date-time":"2022-09-20T19:33:31Z","timestamp":1663702411000},"page":"9-16","source":"Crossref","is-referenced-by-count":1,"title":["VMAPD: Generate Diverse Solutions for Multi-Agent Games with Recurrent Trajectory Discriminators"],"prefix":"10.1109","author":[{"given":"Shiyu","family":"Huang","sequence":"first","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Chao","family":"Yu","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Bin","family":"Wang","sequence":"additional","affiliation":[{"name":"Huawei Noah&#x2019;s Ark Lab,Beijing,China"}]},{"given":"Dong","family":"Li","sequence":"additional","affiliation":[{"name":"Huawei Noah&#x2019;s Ark Lab,Beijing,China"}]},{"given":"Yu","family":"Wang","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Ting","family":"Chen","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Jun","family":"Zhu","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.3390\/e22020131"},{"key":"ref32","article-title":"Probabilistic recursive reasoning for multi-agent reinforcement learning","author":"wen","year":"2019","journal-title":"International Conference on Learning Representations"},{"key":"ref31","article-title":"Rode: Learning roles to decompose multi-agent tasks","author":"wang","year":"2020","journal-title":"arXiv preprint arXiv 2010 00170"},{"key":"ref30","doi-asserted-by":"crossref","first-page":"350","DOI":"10.1038\/s41586-019-1724-z","article-title":"Grandmaster level in starcraft ii using multi-agent reinforcement learning","volume":"575","author":"vinyals","year":"2019","journal-title":"Nature"},{"key":"ref37","first-page":"1433","article-title":"Maximum entropy inverse reinforcement learning","volume":"8","author":"ziebart","year":"2008","journal-title":"AAAI"},{"key":"ref36","article-title":"The surprising effectiveness of mappo in cooperative, multi-agent games","author":"yu","year":"2021","journal-title":"arXiv preprint arXiv 2103 05767"},{"key":"ref35","first-page":"5571","article-title":"Mean field multi-agent reinforcement learning","author":"yang","year":"2018","journal-title":"International Conference on Machine Learning"},{"key":"ref34","article-title":"Latent skill planning for exploration and transfer","author":"xie","year":"2021","journal-title":"International Conference on Learning Representations"},{"key":"ref10","article-title":"Learning an embedding space for transferable robot skills","author":"hausman","year":"2018","journal-title":"International Conference on Learning Representations"},{"key":"ref11","article-title":"Skill discovery of coordination in multi-agent reinforcement learning","author":"he","year":"2020","journal-title":"arXiv preprint arXiv 2006 04989"},{"journal-title":"beta-vae Learning basic visual concepts with a constrained variational framework","year":"2016","author":"higgins","key":"ref12"},{"key":"ref13","article-title":"Svqn: Sequential variational soft q-learning networks","author":"huang","year":"2019","journal-title":"International Conference on Learning Representations"},{"key":"ref14","first-page":"2117","article-title":"Deep variational reinforcement learning for pomdps","author":"igl","year":"2018","journal-title":"International Conference on Machine Learning"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/BF02980577"},{"key":"ref16","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014","journal-title":"arXiv preprint arXiv 1412 6980"},{"key":"ref17","article-title":"Stochastic latent actor-critic: Deep reinforcement learning with a latent variable model","author":"lee","year":"2019","journal-title":"arXiv preprint arXiv 1907 09509"},{"key":"ref18","article-title":"Learning to coordinate manipulation skills via skill behavior diversification","author":"lee","year":"2019","journal-title":"International Conference on Learning Representations"},{"key":"ref19","article-title":"Reinforcement learning and control as probabilistic inference: Tutorial and review","author":"levine","year":"2018","journal-title":"arXiv preprint arXiv 1805 00909"},{"key":"ref28","article-title":"The starcraft multi-agent challenge","author":"samvelyan","year":"2019","journal-title":"arXiv preprint arXiv 1902 09080"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1179"},{"key":"ref27","first-page":"4295","article-title":"Qmix: Monotonic value function factorisation for deep multi-agent reinforcement learning","author":"rashid","year":"2018","journal-title":"International Conference on Machine Learning"},{"key":"ref3","article-title":"Dota 2 with large scale deep reinforcement learning","author":"berner","year":"2019","journal-title":"arXiv preprint arXiv 1912 06680"},{"key":"ref6","first-page":"241","article-title":"Variational methods for reinforcement learning","author":"furmston","year":"2010","journal-title":"Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics"},{"key":"ref29","article-title":"Dynamicvae: Decoupling reconstruction error and disentangled representation learning","author":"shao","year":"2020","journal-title":"arXiv preprint arXiv 2009 06795"},{"key":"ref5","article-title":"Diversity is all you need: Learning skills without a reward function","author":"eysenbach","year":"2018","journal-title":"arXiv preprint arXiv 1802 06360"},{"key":"ref8","article-title":"Soft actor-critic algorithms and applications","author":"haarnoja","year":"2018","journal-title":"arXiv preprint arXiv 1812 09111"},{"key":"ref7","first-page":"1851","article-title":"Latent space policies for hierarchical reinforcement learning","author":"haarnoja","year":"2018","journal-title":"International Conference on Machine Learning"},{"key":"ref2","volume":"461","author":"\u00e5str\u00f6m","year":"2006","journal-title":"Advanced PID Control"},{"key":"ref9","article-title":"Mastering atari with discrete world models","author":"hafner","year":"2020","journal-title":"arXiv preprint arXiv 2010 00170"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00889"},{"key":"ref20","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","author":"lowe","year":"2017","journal-title":"arXiv preprint arXiv 1706 02275"},{"key":"ref22","article-title":"Variational information maximisation for intrinsically motivated reinforcement learning","author":"mohamed","year":"2015","journal-title":"arXiv preprint arXiv 1509 08731"},{"key":"ref21","article-title":"Maven: Multi-agent variational exploration","author":"mahajan","year":"2019","journal-title":"arXiv preprint arXiv 1910 05656"},{"key":"ref24","doi-asserted-by":"crossref","DOI":"10.1007\/978-3-319-28929-8","author":"oliehoek","year":"2016","journal-title":"A Concise Introduction to Decentralized POMDPs"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CEC.2009.4983077"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.70"},{"key":"ref25","article-title":"Discovering diverse solutions in deep reinforcement learning","author":"osa","year":"2021","journal-title":"arXiv preprint arXiv 2103 05767"}],"event":{"name":"2022 IEEE Conference on Games (CoG)","start":{"date-parts":[[2022,8,21]]},"location":"Beijing, China","end":{"date-parts":[[2022,8,24]]}},"container-title":["2022 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9893561\/9893544\/09893722.pdf?arnumber=9893722","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,10]],"date-time":"2022-10-10T20:25:33Z","timestamp":1665433533000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9893722\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8,21]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/cog51982.2022.9893722","relation":{},"subject":[],"published":{"date-parts":[[2022,8,21]]}}}