{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T01:09:54Z","timestamp":1740100194596,"version":"3.37.3"},"reference-count":28,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,7,18]],"date-time":"2021-07-18T00:00:00Z","timestamp":1626566400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,7,18]],"date-time":"2021-07-18T00:00:00Z","timestamp":1626566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,7,18]],"date-time":"2021-07-18T00:00:00Z","timestamp":1626566400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2018AAA0102404"],"award-info":[{"award-number":["2018AAA0102404"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100002367","name":"Chinese Academy of Sciences","doi-asserted-by":"publisher","award":["CXYJJ19-ZD-02,CXYJJ20-QN-05"],"award-info":[{"award-number":["CXYJJ19-ZD-02,CXYJJ20-QN-05"]}],"id":[{"id":"10.13039\/501100002367","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,7,18]]},"DOI":"10.1109\/ijcnn52387.2021.9533484","type":"proceedings-article","created":{"date-parts":[[2021,9,20]],"date-time":"2021-09-20T21:27:41Z","timestamp":1632173261000},"page":"1-7","source":"Crossref","is-referenced-by-count":1,"title":["Multi-Agent Cognition Difference Reinforcement Learning for Multi-Agent Cooperation"],"prefix":"10.1109","author":[{"given":"Huimu","family":"Wang","sequence":"first","affiliation":[]},{"given":"Tenghai","family":"Qiu","sequence":"additional","affiliation":[]},{"given":"Zhen","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Zhiqiang","family":"Pu","sequence":"additional","affiliation":[]},{"given":"Jianqiang","family":"Yi","sequence":"additional","affiliation":[]},{"given":"Wanmai","family":"Yuan","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Counterfactual multi-agent policy gradients","author":"foerster","year":"0","journal-title":"Thirty-Second AAAI Conference on Artificial Intelligence"},{"key":"ref11","first-page":"5567","article-title":"Mean field multi-agent reinforcement learning","author":"yang","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref12","first-page":"2244","article-title":"Learning multiagent communication with backpropagation","author":"sukhbaatar","year":"2016","journal-title":"Advances in neural information processing systems"},{"year":"2017","author":"kong","journal-title":"Revisiting the master-slave architecture in multi-agent deep reinforcement learning","key":"ref13"},{"key":"ref14","article-title":"Multiagent bidirectionally-coordinated nets: Emergence of human-level coordination in learning to play starcraft combat games","author":"peng","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref15","first-page":"7254","article-title":"Learning attentional communication for multi-agent cooperation","author":"jiang","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref16","first-page":"2961","article-title":"Actor-attention-critic for multi-agent reinforcement learning","author":"iqbal","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref17","first-page":"1538","article-title":"Tarmac: Targeted multi-agent communication","author":"das","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref18","article-title":"Learning transferable cooperative behavior in multi-agent teams","author":"agarwal","year":"2019","journal-title":"ArXiv Preprint"},{"year":"1997","author":"kullback","journal-title":"Information Theory and Statistics","key":"ref19"},{"key":"ref28","article-title":"Graph attention networks","author":"veli?kovi?","year":"0","journal-title":"International Conference on Learning Representations"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1109\/TNNLS.2020.3029475"},{"key":"ref27","article-title":"Categorical reparameterization with gumbel-softmax","author":"jang","year":"2016","journal-title":"ArXiv Preprint"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref6","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","author":"mnih","year":"0","journal-title":"International Conference on Machine Learning"},{"doi-asserted-by":"publisher","key":"ref5","DOI":"10.1038\/nature14236"},{"year":"2016","author":"gu","journal-title":"Deep reinforcement learning for robotic manipulation with asynchronous off-policy updates","key":"ref8"},{"key":"ref7","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015","journal-title":"ArXiv Preprint"},{"key":"ref2","first-page":"980","article-title":"A cooperative multi-agent reinforcement learning framework for resource balancing in complex logistics network","author":"li","year":"0","journal-title":"Proc of International Conference on Autonomous Agents and Multiagent Systems"},{"key":"ref9","first-page":"6379","article-title":"Multi-agent actor-critic for mixed cooperative-competitive environments","author":"lowe","year":"2017","journal-title":"Advances in neural information processing systems"},{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.24963\/ijcai.2018\/79"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1016\/B978-1-55860-335-6.50027-1"},{"key":"ref22","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"ArXiv Preprint"},{"year":"2018","author":"sutton","journal-title":"Reinforcement Learning An Introduction","key":"ref21"},{"doi-asserted-by":"publisher","key":"ref24","DOI":"10.1109\/REDUNDANCY48165.2019.9003345"},{"year":"2019","author":"liu","journal-title":"Multi-agent game abstraction via graph attention neural network","key":"ref23"},{"year":"2014","author":"kingma","journal-title":"Auto-encoding variational bayes","key":"ref26"},{"doi-asserted-by":"publisher","key":"ref25","DOI":"10.1080\/01621459.2017.1285773"}],"event":{"name":"2021 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2021,7,18]]},"location":"Shenzhen, China","end":{"date-parts":[[2021,7,22]]}},"container-title":["2021 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9533266\/9533267\/09533484.pdf?arnumber=9533484","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T15:45:52Z","timestamp":1652197552000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9533484\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,7,18]]},"references-count":28,"URL":"https:\/\/doi.org\/10.1109\/ijcnn52387.2021.9533484","relation":{},"subject":[],"published":{"date-parts":[[2021,7,18]]}}}