{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,7]],"date-time":"2024-09-07T15:46:49Z","timestamp":1725724009101},"reference-count":15,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,8,21]],"date-time":"2022-08-21T00:00:00Z","timestamp":1661040000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,8,21]]},"DOI":"10.1109\/cog51982.2022.9893655","type":"proceedings-article","created":{"date-parts":[[2022,9,20]],"date-time":"2022-09-20T19:33:31Z","timestamp":1663702411000},"page":"516-519","source":"Crossref","is-referenced-by-count":2,"title":["Regularized Soft Actor-Critic for Behavior Transfer Learning"],"prefix":"10.1109","author":[{"given":"Mingxi","family":"Tan","sequence":"first","affiliation":[{"name":"Ubisoft La Forge Ubisoft,Chengdu,China"}]},{"given":"Andong","family":"Tian","sequence":"additional","affiliation":[{"name":"Ubisoft La Forge Ubisoft,Chengdu,China"}]},{"given":"Ludovic","family":"Denoyer","sequence":"additional","affiliation":[{"name":"Ubisoft La Forge Ubisoft,Chengdu,China"}]}],"member":"263","reference":[{"key":"ref10","article-title":"DThe gan landscape: Losses, architectures, regularization, and normalization","author":"kurach","year":"2018","journal-title":"arXiv preprint arXiv 1807 04720"},{"key":"ref11","article-title":"Constrained Markov decision processes","author":"altman","year":"1999","journal-title":"CRC Press"},{"key":"ref12","article-title":"Projection-based constrained policy optimization","author":"yang","year":"2020","journal-title":"arXiv preprint arXiv 2010 00170"},{"key":"ref13","article-title":"First order constrained optimization in policy space","author":"zhang","year":"2020","journal-title":"arXiv preprint arXiv 2002 05155"},{"key":"ref14","article-title":"Safe reinforcement learning via curriculum induction","author":"turchetta","year":"2020","journal-title":"arXiv preprint arXiv 2006 12661"},{"key":"ref15","article-title":"Reinforcement learning with deep energy-based policies","author":"haarnoja","year":"2017","journal-title":"Computer Research Repository (CoRR)"},{"key":"ref4","article-title":"Learning robust rewards with adversarial inverse reinforcement learning","author":"fu","year":"2017","journal-title":"arXiv preprint arXiv 1710 11248"},{"key":"ref3","article-title":"SQIL: Imitation Learning via Reinforcement Learning with Sparse Rewards","author":"reddy","year":"2020","journal-title":"International Conference on Learning Representations"},{"key":"ref6","first-page":"627","article-title":"A reduction of imitation learning and structured prediction to no-regret online learning","author":"ross","year":"2011","journal-title":"In Proceedings of the Fourteenth International Conference on Artificial Intelligence and Statistics"},{"key":"ref5","first-page":"4565","article-title":"Generative adversarial imitation learning","author":"ho","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref8","first-page":"tab","article-title":"Dynamic weights in multi-objective deep reinforcement learning","author":"abels","year":"2019","journal-title":"Proc of the International Conference on Machine Learning (ICML)"},{"key":"ref7","article-title":"Multi-objective deep reinforcement learning","author":"mossalam","year":"2016","journal-title":"Computer Research Repository (CoRR)"},{"key":"ref2","first-page":"663","article-title":"Algorithms for inverse reinforcement learning","author":"ng","year":"2000","journal-title":"Proc of the International Conference on Machine Learning (ICML)"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1991.3.1.88"},{"key":"ref9","first-page":"14610","article-title":"A Generalized Algorithm for Multi-Objective Reinforcement Learning and Policy Adaptation","author":"yang","year":"2019","journal-title":"Neural Information Processing Systems"}],"event":{"name":"2022 IEEE Conference on Games (CoG)","start":{"date-parts":[[2022,8,21]]},"location":"Beijing, China","end":{"date-parts":[[2022,8,24]]}},"container-title":["2022 IEEE Conference on Games (CoG)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9893561\/9893544\/09893655.pdf?arnumber=9893655","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,14]],"date-time":"2022-10-14T20:53:15Z","timestamp":1665780795000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9893655\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8,21]]},"references-count":15,"URL":"https:\/\/doi.org\/10.1109\/cog51982.2022.9893655","relation":{},"subject":[],"published":{"date-parts":[[2022,8,21]]}}}