{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T15:47:50Z","timestamp":1776181670464,"version":"3.50.1"},"reference-count":34,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2022,10,1]],"date-time":"2022-10-01T00:00:00Z","timestamp":1664582400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,10,1]],"date-time":"2022-10-01T00:00:00Z","timestamp":1664582400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,10,1]],"date-time":"2022-10-01T00:00:00Z","timestamp":1664582400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Major Science and Technology Innovation 2030","award":["2020AAA0104803"],"award-info":[{"award-number":["2020AAA0104803"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2022,10]]},"DOI":"10.1109\/lra.2022.3196782","type":"journal-article","created":{"date-parts":[[2022,8,5]],"date-time":"2022-08-05T19:27:05Z","timestamp":1659727625000},"page":"11362-11369","source":"Crossref","is-referenced-by-count":8,"title":["CRMRL: Collaborative Relationship Meta Reinforcement Learning for Effectively Adapting to Type Changes in Multi-Robotic System"],"prefix":"10.1109","volume":"7","author":[{"given":"Hongda","family":"Jia","sequence":"first","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]},{"given":"Yong","family":"Zhao","sequence":"additional","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1385-0074","authenticated-orcid":false,"given":"Yuanzhao","family":"Zhai","sequence":"additional","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1236-8318","authenticated-orcid":false,"given":"Bo","family":"Ding","sequence":"additional","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]},{"given":"Huaimin","family":"Wang","sequence":"additional","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]},{"given":"Qingtong","family":"Wu","sequence":"additional","affiliation":[{"name":"College of Computer, National University of Defense Technology, Changsha, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1","article-title":"Learning to adapt in dynamic, real-world environments through meta-reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Nagabandi","year":"2019"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2021.3102636"},{"key":"ref3","first-page":"5331","article-title":"Efficient off-policy meta-reinforcement learning via probabilistic context variables","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rakelly","year":"2019"},{"key":"ref4","first-page":"1","article-title":"Rode: Learning roles to decompose multi-agent tasks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wang","year":"2021"},{"key":"ref5","first-page":"1","article-title":"UPDeT: Universal multi-agent reinforcement learning via policy decoupling with transformers","volume-title":"Proc. 9th Int. Conf. Learn. Representations","author":"Hu","year":"2021"},{"key":"ref6","first-page":"2085","article-title":"Value-decomposition networks for cooperative multi-agent learning","volume-title":"Proc. Int. Joint Conf. Auton. Agents Multi-agent Syst.","author":"Sunehag","year":"2018"},{"key":"ref7","first-page":"4295","article-title":"Monotonic value function factorisation for deep multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Rashid","year":"2020"},{"key":"ref8","first-page":"5887","article-title":"QTRAN: Learning to factorize with transformation for cooperative multi-agent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Son","year":"2019"},{"key":"ref9","first-page":"1349","article-title":"Investigating human priors for playing video games","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Dubey","year":"2018"},{"key":"ref10","first-page":"195","article-title":"One-shot learning with a hierarchical nonparametric bayesian model","volume-title":"Proc. ICML Workshop Unsupervised Transfer Learn.","author":"Salakhutdinov","year":"2012"},{"key":"ref11","first-page":"1","article-title":"Meaning and compositionality as statistical induction of categories and constraints","author":"Schmidt","year":"2009"},{"key":"ref12","first-page":"1842","article-title":"Meta-learning with memory-augmented neural networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Santoro","year":"2016"},{"key":"ref13","first-page":"3981","article-title":"Learning to learn by gradient descent by gradient descent","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Andrychowicz","year":"2016"},{"key":"ref14","first-page":"5463","article-title":"Learning to explore via meta-policy gradient","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Xu","year":"2018"},{"key":"ref15","article-title":"Learning to learn: Meta-critic networks for sample efficient learning","author":"Sung","year":"2017"},{"key":"ref16","first-page":"5405","article-title":"Evolved policy gradients","volume-title":"Proc. Neural Inf. Process. Syst.","author":"Houthooft","year":"2018"},{"key":"ref17","first-page":"557","article-title":"A greedy approach to adapting the trace parameter for temporal difference learning","volume-title":"Proc. 15th Int. Con. Auton. Agents Multiagent Syst.","author":"White","year":"2016"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-48881-3_56"},{"key":"ref19","first-page":"3630","article-title":"Matching networks for one shot learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Vinyals","year":"2016"},{"key":"ref20","first-page":"1126","article-title":"Model-agnostic meta-learning for fast adaptation of deep networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Finn","year":"2017"},{"key":"ref21","article-title":"On first-order meta-learning algorithms","author":"Nichol","year":"2018"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1038\/s41593-018-0147-8"},{"key":"ref23","first-page":"1","article-title":"RL2: Fast reinforcement learning via slow reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Duan","year":"2017"},{"key":"ref24","first-page":"1","article-title":"A simple neural attentive meta-learner","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Mishra","year":"2018"},{"key":"ref25","first-page":"1","article-title":"Improving generalization in meta reinforcement learning using learned objectives","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kirsch","year":"2020"},{"key":"ref26","first-page":"1","article-title":"Meta-Q-learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Fakoor","year":"2020"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0172395"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11794"},{"key":"ref29","first-page":"1","article-title":"QATTEN: A general framework for cooperative multiagent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Yang","year":"2020"},{"key":"ref30","first-page":"1","article-title":"QPLEX: Duplex dueling multi-agent Q-learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wang","year":"2021"},{"key":"ref31","first-page":"10706","article-title":"Q-value path decomposition for deep multiagent reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Yang","year":"2020"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/W14-4012"},{"key":"ref33","first-page":"2186","article-title":"The starcraft multi-agent challenge","volume-title":"Proc. 18th Int. Conf. Auton. Agents MultiAgent Syst.","author":"Samvelyan","year":"2019"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.5772\/5618"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/9831196\/09851500.pdf?arnumber=9851500","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T11:19:54Z","timestamp":1706786394000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9851500\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10]]},"references-count":34,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/lra.2022.3196782","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,10]]}}}