{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T14:00:04Z","timestamp":1766066404506,"version":"3.37.3"},"reference-count":38,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"7","license":[{"start":{"date-parts":[[2015,7,1]],"date-time":"2015-07-01T00:00:00Z","timestamp":1435708800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/OAPA.html"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61175042","61035003","61321491","61202212"],"award-info":[{"award-number":["61175042","61035003","61321491","61202212"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"973 Program of Jiangsu, China","award":["BK2011005"],"award-info":[{"award-number":["BK2011005"]}]},{"DOI":"10.13039\/501100004602","name":"Program for New Century Excellent Talents in University","doi-asserted-by":"publisher","award":["NCET-10-0476"],"award-info":[{"award-number":["NCET-10-0476"]}],"id":[{"id":"10.13039\/501100004602","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Program for Research and Innovation of Graduate Students in General Colleges and Universities, Jiangsu","award":["CXLX13_049"],"award-info":[{"award-number":["CXLX13_049"]}]},{"name":"Collaborative Innovation Center of Novel Software Technology and Industrialization"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Cybern."],"published-print":{"date-parts":[[2015,7]]},"DOI":"10.1109\/tcyb.2014.2349152","type":"journal-article","created":{"date-parts":[[2014,8,29]],"date-time":"2014-08-29T18:32:48Z","timestamp":1409337168000},"page":"1289-1302","source":"Crossref","is-referenced-by-count":41,"title":["Accelerating Multiagent Reinforcement Learning by Equilibrium Transfer"],"prefix":"10.1109","volume":"45","author":[{"given":"Yujing","family":"Hu","sequence":"first","affiliation":[]},{"given":"Yang","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Bo","family":"An","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref38","first-page":"1141","article-title":"Fast adaptive learning in repeated stochastic games by game abstraction","author":"elidrisi","year":"2014","journal-title":"Proc Int Conf Auton Agents and Multi Agent Syst"},{"doi-asserted-by":"publisher","key":"ref33","DOI":"10.1145\/1015330.1015410"},{"doi-asserted-by":"publisher","key":"ref32","DOI":"10.1109\/TSMCB.2008.920998"},{"year":"2014","journal-title":"Sic Bo","key":"ref31"},{"key":"ref30","first-page":"690","article-title":"Heuristic selection of actions in multiagent reinforcement learning","author":"bianchi","year":"2007","journal-title":"Proc 20th Int Joint Conf Artif Intell"},{"doi-asserted-by":"publisher","key":"ref37","DOI":"10.1145\/2229012.2229079"},{"key":"ref36","first-page":"757","article-title":"Integrating organizational control into multi-agent learning","author":"zhang","year":"2009","journal-title":"Proc Int Conf Autonomous Agents and Multiagent Systems"},{"key":"ref35","first-page":"715","article-title":"Learning multi-agent state space representations","author":"de hauwere","year":"2010","journal-title":"Proc Int Conf Autonomous Agents and Multiagent Systems"},{"key":"ref34","first-page":"773","article-title":"Learning of coordination: Exploiting sparse interactions in multiagent systems","author":"melo","year":"2009","journal-title":"Proc Int Conf Autonomous Agents and Multiagent Systems"},{"doi-asserted-by":"publisher","key":"ref10","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref11","first-page":"322","article-title":"Friend-or-foe Q-learning in general-sum games","author":"littman","year":"2001","journal-title":"Proc 18th Int Conf Mach Learn"},{"key":"ref12","first-page":"1039","article-title":"Nash Q-learning for general-sum stochastic games","volume":"4","author":"hu","year":"2003","journal-title":"J Mach Learn Res"},{"key":"ref13","first-page":"242","article-title":"Correlated Q-learning","author":"greenwald","year":"2003","journal-title":"Proc 20th Int Conf Mach Learn"},{"doi-asserted-by":"publisher","key":"ref14","DOI":"10.1016\/0925-2312(94)00027-P"},{"key":"ref15","doi-asserted-by":"crossref","first-page":"311","DOI":"10.3233\/AIC-2010-0476","article-title":"Generalized learning automata for multi-agent reinforcement learning","volume":"23","author":"de hauwere","year":"2010","journal-title":"AI Commun"},{"key":"ref16","first-page":"764","article-title":"Coordinated multi-agent reinforcement learning in networked distributed POMDPs","author":"zhang","year":"2011","journal-title":"Proc 25th AAAI Conf Artif Intell"},{"key":"ref17","first-page":"1101","article-title":"Coordinating multi-agent reinforcement learning with limited communication","author":"zhang","year":"2013","journal-title":"Proc Int Conf Autonomous Agents and Multiagent Systems"},{"key":"ref18","first-page":"707","article-title":"Quick polytope approximation of all correlated equilibria in stochastic games","author":"macdermed","year":"2011","journal-title":"Proc 25th AAAI Conf Artif Intell"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1109\/TSMCC.2007.913919"},{"key":"ref28","first-page":"2635","article-title":"Multi-agent reinforcement learning in common interest and fixed sum stochastic games: An experimental study","volume":"9","author":"bab","year":"2008","journal-title":"J Mach Learn Res"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1109\/TSMCC.2003.817354"},{"doi-asserted-by":"publisher","key":"ref27","DOI":"10.1162\/089976699300016070"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1016\/S1389-1286(00)00090-6"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1109\/TSMCC.2010.2099654"},{"doi-asserted-by":"publisher","key":"ref29","DOI":"10.1145\/545056.545106"},{"doi-asserted-by":"publisher","key":"ref5","DOI":"10.1109\/JSAC.2012.120118"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.1016\/j.future.2010.10.009"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.1007\/s11227-009-0318-1"},{"doi-asserted-by":"publisher","key":"ref2","DOI":"10.1007\/s10462-011-9244-8"},{"doi-asserted-by":"publisher","key":"ref9","DOI":"10.1109\/TSMCC.2012.2218596"},{"year":"1998","author":"sutton","journal-title":"Reinforcement Learning An Introduction","key":"ref1"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1137\/070699652"},{"doi-asserted-by":"publisher","key":"ref22","DOI":"10.1016\/j.geb.2006.03.015"},{"key":"ref21","first-page":"1571","article-title":"Reinforcement learning to play an optimal nash equilibrium in team Markov games","volume":"15","author":"wang","year":"2002","journal-title":"Proc Adv Neural Inf Process Syst"},{"year":"1994","author":"osborne","journal-title":"A Course in Game Theory","key":"ref24"},{"key":"ref23","first-page":"1633","article-title":"Transfer learning for reinforcement learning domains: A survey","volume":"10","author":"taylor","year":"2009","journal-title":"J Mach Learn Res"},{"doi-asserted-by":"publisher","key":"ref26","DOI":"10.1109\/IAT.2003.1241094"},{"key":"ref25","first-page":"1186","article-title":"Solving stochastic games","author":"dermed","year":"2009","journal-title":"Proc Adv Neural Inf Process Syst"}],"container-title":["IEEE Transactions on Cybernetics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6221036\/7122378\/06888505.pdf?arnumber=6888505","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,1,12]],"date-time":"2022-01-12T16:28:57Z","timestamp":1642004937000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/6888505\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2015,7]]},"references-count":38,"journal-issue":{"issue":"7"},"URL":"https:\/\/doi.org\/10.1109\/tcyb.2014.2349152","relation":{},"ISSN":["2168-2267","2168-2275"],"issn-type":[{"type":"print","value":"2168-2267"},{"type":"electronic","value":"2168-2275"}],"subject":[],"published":{"date-parts":[[2015,7]]}}}