{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:13:09Z","timestamp":1763190789402,"version":"3.45.0"},"reference-count":47,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100002367","name":"Chinese Academy of Sciences","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100002367","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100010450","name":"Nova","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100010450","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100010450","name":"Nova","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100010450","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11227369","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Diversity-Driven Offline-to-Online Multi-Player Policy Learning for Football Matches"],"prefix":"10.1109","author":[{"given":"Shijie","family":"Wang","sequence":"first","affiliation":[{"name":"School of Artificial Intelligence, UCAS Institute of Automation, CAS,Beijing,China"}]},{"given":"Zhiqiang","family":"Pu","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, UCAS Institute of Automation, CAS,Beijing,China"}]},{"given":"Tianyi","family":"Hu","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, UCAS Institute of Automation, CAS,Beijing,China"}]},{"given":"Hao","family":"Ma","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, UCAS Institute of Automation, CAS,Beijing,China"}]},{"given":"Huimu","family":"Wang","sequence":"additional","affiliation":[{"name":"JD.COM,Beijing,China"}]},{"given":"Jianqiang","family":"Yi","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, UCAS Institute of Automation, CAS,Beijing,China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"21937","article-title":"Lazy agents: a new perspective on solving sparse reward problem in multi-agent reinforcement learning","volume-title":"International Conference on Machine Learning","author":"Liu"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2023.3239815"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-32060-6_34"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5878"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/s11214-015-0169-4"},{"issue":"1","key":"ref7","first-page":"7234","article-title":"Monotonic value function factorisation for deep multi-agent reinforcement learning","volume":"21","author":"Rashid","year":"2020","journal-title":"The Journal of Machine Learning Research"},{"key":"ref8","first-page":"3991","article-title":"Celebrating diversity in shared multi-agent reinforcement learning","volume":"34","author":"Li","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Rode: Learning roles to decompose multi-agent tasks","year":"2020","author":"Wang","key":"ref9"},{"article-title":"Roma: Multi-agent reinforcement learning with emergent roles","year":"2020","author":"Wang","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.anbehav.2005.03.004"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1038\/380121a0"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/0040-1625(88)90013-3"},{"key":"ref14","first-page":"9041","article-title":"Policy diagnosis via measuring role diversity in cooperative multi-agent rl","volume-title":"International Conference on Machine Learning","author":"Hu"},{"key":"ref15","article-title":"Maven: Multi-agent variational exploration","volume":"32","author":"Mahajan","year":"2019","journal-title":"Advances in neural information processing systems"},{"article-title":"Influence-based multi-agent exploration","year":"2019","author":"Wang","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1142\/S0218194018500043"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v24i1.7679"},{"article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","year":"2020","author":"Levine","key":"ref19"},{"article-title":"Boosting offline reinforcement learning via data rebalancing","year":"2022","author":"Yue","key":"ref20"},{"key":"ref21","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"International conference on machine learning","author":"Fujimoto"},{"article-title":"Behavior regularized offline reinforcement learning","year":"2019","author":"Wu","key":"ref22"},{"key":"ref23","article-title":"Stabilizing off-policy q-learning via bootstrapping error reduction","volume":"32","author":"Kumar","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref24","first-page":"20132","article-title":"A minimalist approach to offline reinforcement learning","volume":"34","author":"Fujimoto","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref25","first-page":"5774","article-title":"Offline reinforcement learning with fisher divergence critic regularization","volume-title":"International Conference on Machine Learning","author":"Kostrikov"},{"key":"ref26","first-page":"1179","article-title":"Conservative q-learning for offline reinforcement learning","volume":"33","author":"Kumar","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.14428\/esann\/2022.ES2022-110"},{"article-title":"Awac: Accelerating online reinforcement learning with offline datasets","year":"2020","author":"Nair","key":"ref28"},{"key":"ref29","first-page":"1702","article-title":"Offline-to-online reinforcement learning via balanced replay and pessimistic q-ensemble","volume-title":"Conference on Robot Learning","author":"Lee"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2023.3302804"},{"key":"ref31","doi-asserted-by":"crossref","first-page":"331","DOI":"10.1016\/S0927-0507(05)80172-0","article-title":"Markov decision processes","volume":"2","author":"Puterman","year":"1990","journal-title":"Handbooks in Operations Research and Management Science"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-0716-0368-0_530"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/BF01065789"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.5555\/2986459.2986721"},{"article-title":"Understanding diffusion models: A unified perspective","year":"2022","author":"Luo","key":"ref36"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1016\/j.jmva.2006.03.007"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1002\/widm.30"},{"key":"ref39","first-page":"16509","article-title":"Multi-agent reinforcement learning is a sequence modeling problem","volume":"35","author":"Wen","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1017\/S0962492900002919"},{"article-title":"Soft actor-critic algorithms and applications","year":"2018","author":"Haarnoja","key":"ref41"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.12794\/metadc1505267"},{"article-title":"High-dimensional continuous control using generalized advantage estimation","year":"2015","author":"Schulman","key":"ref43"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1037\/a0029550"},{"key":"ref45","first-page":"24611","article-title":"The surprising effectiveness of ppo in cooperative multi-agent games","volume":"35","author":"Yu","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref46","first-page":"1046","article-title":"Trust region policy optimisation in multi-agent reinforcement learning","volume-title":"ICLR 2022-10th International Conference on Learning Representations. The International Conference on Learning Representations (ICLR)","author":"Kuba"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1186\/s40064-016-3108-2"}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11227369.pdf?arnumber=11227369","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:09:46Z","timestamp":1763190586000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11227369\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":47,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11227369","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}