{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T07:50:12Z","timestamp":1767340212384,"version":"3.37.3"},"reference-count":20,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,16]],"date-time":"2022-05-16T00:00:00Z","timestamp":1652659200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,16]],"date-time":"2022-05-16T00:00:00Z","timestamp":1652659200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,16]]},"DOI":"10.1109\/icc45855.2022.9838583","type":"proceedings-article","created":{"date-parts":[[2022,8,11]],"date-time":"2022-08-11T19:37:11Z","timestamp":1660246631000},"page":"413-418","source":"Crossref","is-referenced-by-count":5,"title":["Cross-Domain Communications Between Agents Via Adversarial-Based Domain Adaptation in Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Lichao","family":"Meng","sequence":"first","affiliation":[{"name":"University of Electronic Science and Technology of China,Chengdu,China"}]},{"given":"Jingjing","family":"Li","sequence":"additional","affiliation":[{"name":"University of Electronic Science and Technology of China,Chengdu,China"}]},{"given":"Ke","family":"Lu","sequence":"additional","affiliation":[{"name":"University of Electronic Science and Technology of China,Chengdu,China"}]}],"member":"263","reference":[{"key":"ref10","first-page":"1989","article-title":"Cycada: Cycle-consistent adversarial domain adaptation","author":"hoffman","year":"2018","journal-title":"International Conference on Machine Learning"},{"key":"ref11","article-title":"Transfer learning in deep reinforcement learning: A survey","author":"zhu","year":"2020","journal-title":"CoRR"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-019-01527-z"},{"key":"ref13","first-page":"224","article-title":"Generalization and equilibrium in generative adversarial nets (gans)","author":"arora","year":"2017","journal-title":"International Conference on Machine Learning"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3350902"},{"key":"ref15","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v35i12.17251","article-title":"Domain adaptation in reinforcement learning via latent unified state representation","author":"xing","year":"2021"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01219-9_49"},{"key":"ref17","article-title":"Proximal policy optimization algorithms","author":"schulman","year":"2017","journal-title":"CoRR"},{"key":"ref18","first-page":"1480","article-title":"Darla: Improving zero-shot transfer in reinforcement learning","author":"higgins","year":"2017","journal-title":"International Conference on Machine Learning"},{"key":"ref19","article-title":"CURL: contrastive unsupervised representations for reinforcement learning","author":"srinivas","year":"2020","journal-title":"CoRR"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1177\/0278364917710318"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CCWC.2019.8666545"},{"key":"ref6","first-page":"2063","article-title":"Transfer learning for related reinforcement learning tasks via image-to-image translation","author":"gamrian","year":"2019","journal-title":"International Conference on Machine Learning"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2020.113420"},{"key":"ref8","article-title":"Maximum density divergence for domain adaptation","author":"li","year":"2020","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.316"},{"article-title":"Playing atari with deep reinforcement learning","year":"2013","author":"mnih","key":"ref2"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3912"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01274"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.244"}],"event":{"name":"ICC 2022 - IEEE International Conference on Communications","start":{"date-parts":[[2022,5,16]]},"location":"Seoul, Korea, Republic of","end":{"date-parts":[[2022,5,20]]}},"container-title":["ICC 2022 - IEEE International Conference on Communications"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9837954\/9838246\/09838583.pdf?arnumber=9838583","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,2,14]],"date-time":"2023-02-14T11:07:12Z","timestamp":1676372832000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9838583\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,16]]},"references-count":20,"URL":"https:\/\/doi.org\/10.1109\/icc45855.2022.9838583","relation":{},"subject":[],"published":{"date-parts":[[2022,5,16]]}}}