{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,10]],"date-time":"2025-12-10T09:04:56Z","timestamp":1765357496213,"version":"3.37.3"},"reference-count":43,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2024,8,1]],"date-time":"2024-08-01T00:00:00Z","timestamp":1722470400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,8,1]],"date-time":"2024-08-01T00:00:00Z","timestamp":1722470400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,8,1]],"date-time":"2024-08-01T00:00:00Z","timestamp":1722470400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62073323"],"award-info":[{"award-number":["62073323"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005090","name":"Beijing Nova Program","doi-asserted-by":"publisher","award":["20220484077","20230484435"],"award-info":[{"award-number":["20220484077","20230484435"]}],"id":[{"id":"10.13039\/501100005090","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Strategic Priority Research Program of Chinese Academy of Sciences","award":["XDA27030204"],"award-info":[{"award-number":["XDA27030204"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Cogn. Dev. Syst."],"published-print":{"date-parts":[[2024,8]]},"DOI":"10.1109\/tcds.2023.3345735","type":"journal-article","created":{"date-parts":[[2023,12,22]],"date-time":"2023-12-22T19:58:20Z","timestamp":1703275100000},"page":"1302-1314","source":"Crossref","is-referenced-by-count":4,"title":["QFuture: Learning Future Expectation Cognition in Multiagent Reinforcement Learning"],"prefix":"10.1109","volume":"16","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4391-9960","authenticated-orcid":false,"given":"Boyin","family":"Liu","sequence":"first","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4841-4048","authenticated-orcid":false,"given":"Zhiqiang","family":"Pu","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-5417-6013","authenticated-orcid":false,"given":"Yi","family":"Pan","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3268-9482","authenticated-orcid":false,"given":"Jianqiang","family":"Yi","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1938-2603","authenticated-orcid":false,"given":"Min","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8429-9449","authenticated-orcid":false,"given":"Shijie","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2020.3030571"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2018.2885813"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2023.3239815"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10827"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TEVC.2022.3197298"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2021.3092715"},{"article-title":"Guided deep reinforcement learning for swarm systems","year":"2017","author":"H\u00fcttenrauch","key":"ref7"},{"key":"ref8","article-title":"Hierarchical multiagent reinforcement learning for maritime traffic management","author":"Singh","year":"2020","journal-title":"Adaptive Agents and Multi-Agent Systems"},{"key":"ref9","first-page":"1","article-title":"Maven: Multi-agent variational exploration","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Mahajan","year":"2019"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"ref11","article-title":"On-Line Q-Learning Using Connectionist Systems","author":"Rummery","year":"1994","journal-title":"Eng. Depart., Cambridge Univ., Tech. Rep."},{"key":"ref12","first-page":"1","article-title":"Actor-critic algorithms","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"12","author":"Konda","year":"1999"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/BF00115009"},{"issue":"4","key":"ref14","article-title":"Introduction to reinforcement learning","volume":"2","author":"Sutton","year":"1998"},{"key":"ref15","first-page":"12","article-title":"Robot learning from demonstration","volume-title":"Proc. ICML","volume":"97","author":"Atkeson","year":"1997"},{"article-title":"Dream to control: Learning behaviors by latent imagination","year":"2019","author":"Hafner","key":"ref16"},{"key":"ref17","first-page":"2555","article-title":"Learning latent dynamics for planning from pixels","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Hafner","year":"2019"},{"key":"ref18","first-page":"1","article-title":"Sample-efficient reinforcement learning with stochastic ensemble value expansion","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Buckman","year":"2018"},{"article-title":"Learning dynamics model in reinforcement learning by incorporating the long term future","year":"2019","author":"Ke","key":"ref19"},{"key":"ref20","first-page":"575","article-title":"Learning latent representations to influence multi-agent interaction","volume-title":"Proc. Conf. Robot Learn.","author":"Xie","year":"2021"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00240"},{"article-title":"Prediction-aware and reinforcement learning based altruistic cooperative driving","year":"2022","author":"Valiente","key":"ref22"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/s00426-008-0197-8"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.jebo.2021.04.029"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.physa.2019.03.031"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1016\/j.amc.2019.01.057"},{"article-title":"The starcraft multi-agent challenge","year":"2019","author":"Samvelyan","key":"ref27"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5878"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-28929-8"},{"key":"ref30","first-page":"4295","article-title":"QMIX: Monotonic value function factorisation for deep multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rashid","year":"2018"},{"article-title":"QPLEX: Duplex dueling multi-agent q-learning","year":"2020","author":"Wang","key":"ref31"},{"article-title":"Qatten: A general framework for cooperative multiagent reinforcement learning","year":"2020","author":"Yang","key":"ref32"},{"key":"ref33","first-page":"10199","article-title":"Weighted QMIX: Expanding monotonic value function factorisation for deep multi-agent reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Rashid","year":"2020"},{"article-title":"Learning to communicate with deep multi-agent reinforcement learning","year":"2016","author":"Foerster","key":"ref34"},{"key":"ref35","first-page":"5887","article-title":"Qtran: Learning to factorize with transformation for cooperative multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Son","year":"2019"},{"article-title":"Value-decomposition networks for cooperative multi-agent learning","year":"2017","author":"Sunehag","key":"ref36"},{"key":"ref37","first-page":"5887","article-title":"Learning to factorize with transformation for cooperative multi-agent reinforcement learning","volume-title":"Proc. 31st Int. Conf. Mach. Learn.","author":"Hostallero","year":"2019"},{"key":"ref38","volume-title":"Elements of Information Theory","volume":"3","author":"C.","year":"1991"},{"article-title":"ROMA: Multi-agent reinforcement learning with emergent roles","year":"2020","author":"Wang","key":"ref39"},{"article-title":"Celebrating diversity in shared multi-agent reinforcement learning","year":"2021","author":"Li","key":"ref40"},{"article-title":"The StarCraft multi-agent challenge","year":"2019","author":"Samvelyan","key":"ref41"},{"key":"ref42","first-page":"1","article-title":"Wide open spaces: A statistical technique for measuring space creation in professional soccer","volume":"2018","author":"Fernandez","year":"2018","journal-title":"Sloan Sports Anal. Conf."},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TG.2022.3207068"}],"container-title":["IEEE Transactions on Cognitive and Developmental Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7274989\/10633860\/10372209.pdf?arnumber=10372209","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,13]],"date-time":"2024-08-13T05:11:45Z","timestamp":1723525905000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10372209\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8]]},"references-count":43,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tcds.2023.3345735","relation":{},"ISSN":["2379-8920","2379-8939"],"issn-type":[{"type":"print","value":"2379-8920"},{"type":"electronic","value":"2379-8939"}],"subject":[],"published":{"date-parts":[[2024,8]]}}}