{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T14:25:18Z","timestamp":1766067918978,"version":"3.37.3"},"reference-count":33,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3361030","type":"journal-article","created":{"date-parts":[[2024,2,1]],"date-time":"2024-02-01T18:48:40Z","timestamp":1706813320000},"page":"19942-19951","source":"Crossref","is-referenced-by-count":4,"title":["Combined Constraint on Behavior Cloning and Discriminator in Offline Reinforcement Learning"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-7049-5882","authenticated-orcid":false,"given":"Shunya","family":"Kidera","sequence":"first","affiliation":[{"name":"Electrical Engineering Department, Kanazawa University, Kanazawa, Japan"}]},{"given":"Kosuke","family":"Shintani","sequence":"additional","affiliation":[{"name":"Electrical Engineering Department, Kanazawa University, Kanazawa, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1913-9179","authenticated-orcid":false,"given":"Toi","family":"Tsuneda","sequence":"additional","affiliation":[{"name":"Electrical Engineering Department, Kanazawa University, Kanazawa, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7883-4054","authenticated-orcid":false,"given":"Satoshi","family":"Yamane","sequence":"additional","affiliation":[{"name":"Electrical Engineering Department, Kanazawa University, Kanazawa, Japan"}]}],"member":"263","reference":[{"volume-title":"AlphaStar: Mastering the Real-Time Strategy Game StarCraft II","key":"ref1"},{"key":"ref2","article-title":"QT-Opt: Scalable deep reinforcement learning for vision-based robotic manipulation","author":"Kalashnikov","year":"2018","journal-title":"arXiv:1806.10293"},{"doi-asserted-by":"publisher","key":"ref3","DOI":"10.1145\/1772690.1772758"},{"key":"ref4","volume-title":"Introduction To Reinforcement Learning","volume":"135","author":"Sutton","year":"1998"},{"key":"ref5","article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","author":"Levine","year":"2020","journal-title":"arXiv:2005.01643"},{"key":"ref6","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto"},{"key":"ref7","article-title":"Way off-policy batch deep reinforcement learning of implicit human preferences in dialog","author":"Jaques","year":"2019","journal-title":"arXiv:1907.00456"},{"key":"ref8","article-title":"Behavior regularized offline reinforcement learning","author":"Wu","year":"2019","journal-title":"arXiv:1911.11361"},{"key":"ref9","article-title":"A minimalist approach to offline reinforcement learning","author":"Fujimoto","year":"2021","journal-title":"arXiv:2106.06860"},{"key":"ref10","first-page":"15","article-title":"Generative adversarial nets","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"27","author":"Goodfellow"},{"doi-asserted-by":"publisher","key":"ref11","DOI":"10.1109\/tnnls.2023.3250269"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref13","article-title":"D4RL: Datasets for deep data-driven reinforcement learning","author":"Fu","year":"2020","journal-title":"arXiv:2004.07219"},{"doi-asserted-by":"publisher","key":"ref14","DOI":"10.1109\/tnn.1998.712192"},{"doi-asserted-by":"publisher","key":"ref15","DOI":"10.1007\/BF00115009"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.7554\/elife.20944.012"},{"issue":"3","key":"ref17","doi-asserted-by":"crossref","first-page":"279","DOI":"10.1007\/BF00992698","article-title":"Q-learning","volume":"8","author":"Watkins","year":"1992","journal-title":"Mach. Learn."},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.7551\/mitpress\/7503.001.0001"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1109\/ACC.2012.6315022"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1038\/nature14236"},{"doi-asserted-by":"publisher","key":"ref21","DOI":"10.1145\/279943.279964"},{"key":"ref22","article-title":"A connection between generative adversarial networks, inverse reinforcement learning, and energy-based models","author":"Finn","year":"2016","journal-title":"arXiv:1611.03852"},{"doi-asserted-by":"publisher","key":"ref23","DOI":"10.1016\/j.robot.2008.10.024"},{"key":"ref24","first-page":"4565","article-title":"Generative adversarial imitation learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Ho"},{"key":"ref25","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto"},{"volume-title":"Proc. 30th Int. Conf. Artif. Intell. Statist.","author":"Ross","article-title":"Efficient reductions for imitation learning","key":"ref26"},{"key":"ref27","first-page":"627","article-title":"A reduction of imitation learning and structured prediction to no-regret online learning","volume-title":"Proc. 14th Int. Conf. Artif. Intell. Statist.","author":"Ross"},{"key":"ref28","article-title":"Conservative Q-learning for offline reinforcement learning","author":"Kumar","year":"2020","journal-title":"arXiv:2006.04779"},{"key":"ref29","article-title":"Stabilizing off-policy Q-learning via bootstrapping error reduction","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Kumar"},{"volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Haarnoja","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","key":"ref30"},{"key":"ref31","article-title":"OpenAI gym","author":"Brockman","year":"2016","journal-title":"arXiv:1606.01540"},{"doi-asserted-by":"publisher","key":"ref32","DOI":"10.1016\/j.simpa.2020.100022"},{"key":"ref33","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/10380310\/10418100.pdf?arnumber=10418100","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,13]],"date-time":"2024-02-13T08:28:18Z","timestamp":1707812898000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10418100\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":33,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3361030","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2024]]}}}