{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T16:01:35Z","timestamp":1772553695592,"version":"3.50.1"},"reference-count":79,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"6","license":[{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,1]],"date-time":"2025-06-01T00:00:00Z","timestamp":1748736000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,6]]},"DOI":"10.1109\/tnnls.2024.3497667","type":"journal-article","created":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T18:55:55Z","timestamp":1732820155000},"page":"11399-11413","source":"Crossref","is-referenced-by-count":5,"title":["ACL-QL: Adaptive Conservative Level in <i>Q<\/i>-Learning for Offline Reinforcement Learning"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2095-2140","authenticated-orcid":false,"given":"Kun","family":"Wu","sequence":"first","affiliation":[{"name":"Department of Electrical Engineering and Computer Science, Syracuse University, Syracuse, NY, USA"}]},{"given":"Yinuo","family":"Zhao","sequence":"additional","affiliation":[{"name":"Beijing Institute of Technology, Beijing, China"}]},{"given":"Zhiyuan","family":"Xu","sequence":"additional","affiliation":[{"name":"Beijing Innovation Center of Humanoid Robotics, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6818-1125","authenticated-orcid":false,"given":"Zhengping","family":"Che","sequence":"additional","affiliation":[{"name":"Beijing Innovation Center of Humanoid Robotics, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3238-960X","authenticated-orcid":false,"given":"Chengxiang","family":"Yin","sequence":"additional","affiliation":[{"name":"Department of Electrical Engineering and Computer Science, Syracuse University, Syracuse, NY, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0252-329X","authenticated-orcid":false,"given":"Chi","family":"Harold Liu","sequence":"additional","affiliation":[{"name":"Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-8612-5022","authenticated-orcid":false,"given":"Feifei","family":"Feng","sequence":"additional","affiliation":[{"name":"Midea Group, Shanghai, China"}]},{"given":"Jian","family":"Tang","sequence":"additional","affiliation":[{"name":"Beijing Innovation Center of Humanoid Robotics, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.13140\/RG.2.2.18893.74727"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2790981"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2891792"},{"key":"ref4","first-page":"651","article-title":"Scalable deep reinforcement learning for vision-based robotic manipulation","volume-title":"Proc. Conf. Robot Learn.","author":"Kalashnikov"},{"key":"ref5","article-title":"Solving Rubik\u2019s cube with a robot hand","author":"Akkaya","year":"2019","journal-title":"arXiv:1910.07113"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3124466"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3109284"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3116063"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20259"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-27645-3_2"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3250269"},{"key":"ref12","first-page":"11784","article-title":"Stabilizing off-policy Q-learning via bootstrapping error reduction","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kumar"},{"key":"ref13","article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","author":"Levine","year":"2020","journal-title":"arXiv:2005.01643"},{"key":"ref14","article-title":"Way off-policy batch deep reinforcement learning of implicit human preferences in dialog","author":"Jaques","year":"2019","journal-title":"arXiv:1907.00456"},{"key":"ref15","article-title":"Behavior regularized offline reinforcement learning","author":"Wu","year":"2019","journal-title":"arXiv:1911.11361"},{"key":"ref16","article-title":"Keep doing what worked: Behavior modelling priors for offline reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Siegel"},{"key":"ref17","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto"},{"key":"ref18","first-page":"1179","article-title":"Conservative Q-learning for offline reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kumar"},{"key":"ref19","first-page":"11501","article-title":"Conservative data sharing for multi-task offline reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yu"},{"key":"ref20","first-page":"28954","article-title":"COMBO: Conservative offline model-based policy optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yu"},{"key":"ref21","first-page":"19235","article-title":"Conservative offline distributional reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ma"},{"key":"ref22","first-page":"18353","article-title":"BAIL: Best-action imitation learning for batch deep reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chen"},{"key":"ref23","first-page":"7768","article-title":"Critic regularized regression","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Wang"},{"key":"ref24","first-page":"6266","article-title":"Curriculum offline imitating learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref25","article-title":"Issues in using function approximation for reinforcement learning","volume-title":"Proceedings of the 1993 Connectionist Models Summer School","author":"Thrun"},{"key":"ref26","first-page":"2021","article-title":"Diagnosing bottlenecks in deep Q-learning algorithms","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fu"},{"key":"ref27","article-title":"Towards characterizing divergence in deep Q-learning","author":"Achiam","year":"2019","journal-title":"arXiv:1903.08894"},{"key":"ref28","first-page":"9949","article-title":"Non-delusional Q-learning and value-iteration","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Lu"},{"key":"ref29","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref30","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2018.2821369"},{"key":"ref32","article-title":"Prioritized experience replay","author":"Schaul","year":"2015","journal-title":"arXiv:1511.05952"},{"key":"ref33","article-title":"Playing Atari with deep reinforcement learning","author":"Mnih","year":"2013","journal-title":"arXiv:1312.5602"},{"key":"ref34","article-title":"Distributed prioritized experience replay","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Horgan"},{"key":"ref35","first-page":"14219","article-title":"An equivalence between loss functions and non-uniform sampling in experience replay","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Fujimoto"},{"key":"ref36","article-title":"Model-augmented prioritized experience replay","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Oh"},{"key":"ref37","article-title":"Demystifying reinforcement learning in time-varying systems","author":"Hamadanian","year":"2022","journal-title":"arXiv:2201.05560"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.13673"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2020.11.050"},{"key":"ref40","first-page":"151","article-title":"Understanding the impact of entropy on policy optimization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ahmed"},{"key":"ref41","article-title":"Offline reinforcement learning with implicit Q-learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Kostrikov"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3217189"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2023.3309906"},{"key":"ref44","first-page":"1719","article-title":"PLAS: Latent action space for offline reinforcement learning","volume-title":"Proc. Conf. Robot Learn.","author":"Zhou"},{"key":"ref45","first-page":"5774","article-title":"Offline reinforcement learning with Fisher divergence critic regularization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kostrikov"},{"key":"ref46","first-page":"20132","article-title":"A minimalist approach to offline reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Fujimoto"},{"issue":"73","key":"ref47","first-page":"1","article-title":"An emphatic approach to the problem of off-policy temporal-difference learning","volume":"17","author":"Sutton","year":"2016","journal-title":"J. Mach. Learn. Res."},{"key":"ref48","article-title":"AlgaeDICE: Policy gradient from arbitrary experience","author":"Nachum","year":"2019","journal-title":"arXiv:1912.02074"},{"key":"ref49","article-title":"AWAC: Accelerating online reinforcement learning with offline datasets","author":"Nair","year":"2020","journal-title":"arXiv:2006.09359"},{"key":"ref50","article-title":"Advantage-weighted regression: Simple and scalable off-policy reinforcement learning","author":"Bin Peng","year":"2019","journal-title":"arXiv:1910.00177"},{"key":"ref51","article-title":"Safe policy improvement with an estimated baseline policy","volume-title":"Proc. 19th Int. Conf. Auto. Agents MultiAgent Syst.","author":"Sim\u00e3o"},{"key":"ref52","first-page":"104","article-title":"An optimistic perspective on offline reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Agarwal"},{"key":"ref53","first-page":"907","article-title":"S4RL: Surprisingly simple self-supervision for offline reinforcement learning in robotics","volume-title":"Proc. Conf. Robot Learn.","author":"Sinha"},{"key":"ref54","first-page":"4933","article-title":"Offline RL without off-policy evaluation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Brandfonbrener"},{"key":"ref55","first-page":"1273","article-title":"Offline reinforcement learning as one big sequence modeling problem","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Janner"},{"key":"ref56","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chen"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3293508"},{"key":"ref59","first-page":"12519","article-title":"When to trust your model: Model-based policy optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Janner"},{"key":"ref60","first-page":"21810","article-title":"Morel: Model-based offline reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kidambi"},{"key":"ref61","first-page":"14129","article-title":"MOPO: Model-based offline policy optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yu"},{"key":"ref62","article-title":"Deployment-efficient reinforcement learning via model-based offline optimization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Matsushima"},{"key":"ref63","first-page":"1905","article-title":"Agnostic system identification for model-based reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ross"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386025"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/TCIAIG.2012.2186810"},{"key":"ref66","first-page":"1563","article-title":"Near-optimal regret bounds for reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Auer"},{"key":"ref67","first-page":"4033","article-title":"Deep exploration via bootstrapped DQN","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Osband"},{"key":"ref68","article-title":"Semi-supervised classification with graph convolutional networks","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Kipf"},{"key":"ref69","article-title":"Deep learning using rectified linear units (ReLU)","author":"Agarap","year":"2018","journal-title":"arXiv:1803.08375"},{"key":"ref70","article-title":"D4RL: Datasets for deep data-driven reinforcement learning","author":"Fu","year":"2020","journal-title":"arXiv:2004.07219"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref72","article-title":"OpenAI gym","author":"Brockman","year":"2016","journal-title":"arXiv:1606.01540"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2018.XIV.049"},{"key":"ref74","first-page":"305","article-title":"ALVINN: An autonomous land vehicle in a neural network","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Pomerleau"},{"key":"ref75","article-title":"Reproducibility of benchmarked deep reinforcement learning tasks for continuous control","author":"Islam","year":"2017","journal-title":"arXiv:1708.04133"},{"key":"ref76","article-title":"A workflow for offline model-free robotic reinforcement learning","author":"Kumar","year":"2021","journal-title":"arXiv:2109.10813"},{"key":"ref77","article-title":"Relay policy learning: Solving long horizon tasks via imitation and reinforcement learning","volume-title":"Proc. Conf. Robot Learn. (CoRL)","author":"Gupta"},{"key":"ref78","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1109\/TAC.2016.2616644"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/5962385\/11022714\/10771594.pdf?arnumber=10771594","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,4]],"date-time":"2025-06-04T17:57:26Z","timestamp":1749059846000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10771594\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6]]},"references-count":79,"journal-issue":{"issue":"6"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2024.3497667","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,6]]}}}