{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T12:40:36Z","timestamp":1766061636906,"version":"3.48.0"},"reference-count":46,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iros60139.2025.11246521","type":"proceedings-article","created":{"date-parts":[[2025,11,27]],"date-time":"2025-11-27T18:54:45Z","timestamp":1764269685000},"page":"13468-13475","source":"Crossref","is-referenced-by-count":0,"title":["Diffusion Policies with Value-Conditional Optimization for Offline Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Yunchang","family":"Ma","sequence":"first","affiliation":[{"name":"National University of Defense Technology,College of Intelligence Science and Technology,Changsha,China"}]},{"given":"Tenglong","family":"Liu","sequence":"additional","affiliation":[{"name":"National University of Defense Technology,College of Intelligence Science and Technology,Changsha,China"}]},{"given":"Yixing","family":"Lan","sequence":"additional","affiliation":[{"name":"National University of Defense Technology,College of Intelligence Science and Technology,Changsha,China"}]},{"given":"Xin","family":"Yin","sequence":"additional","affiliation":[{"name":"National University of Defense Technology,College of Intelligence Science and Technology,Changsha,China"}]},{"given":"Changxin","family":"Zhang","sequence":"additional","affiliation":[{"name":"National University of Defense Technology,College of Intelligence Science and Technology,Changsha,China"}]},{"given":"Xinglong","family":"Zhang","sequence":"additional","affiliation":[{"name":"National University of Defense Technology,College of Intelligence Science and Technology,Changsha,China"}]},{"given":"Xin","family":"Xu","sequence":"additional","affiliation":[{"name":"National University of Defense Technology,College of Intelligence Science and Technology,Changsha,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2025.XXI.066"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2025.XXI.064"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1126\/science.add4679"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-023-06419-4"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1126\/scirobotics.aau5872"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1177\/0278364919887447"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2018.xiv.049"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.13140\/RG.2.2.18893.74727"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-020-03051-4"},{"key":"ref11","article-title":"Jiangjun: Mastering xiangqi by tackling non-transitivity in two-player zero-sum games","author":"Li","year":"2023","journal-title":"Transactions on Machine Learning Research"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2021.3054625"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-control-030323-022510"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3477600"},{"article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","year":"2020","author":"Levine","key":"ref15"},{"key":"ref16","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"International conference on machine learning","author":"Fujimoto"},{"key":"ref17","first-page":"20132","article-title":"A minimalist approach to offline reinforcement learning","volume":"34","author":"Fujimoto","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref18","article-title":"Revisiting the minimalist approach to offline reinforcement learning","author":"Tarasov","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref19","first-page":"5774","article-title":"Offline reinforcement learning with fisher divergence critic regularization","volume-title":"International Conference on Machine Learning","author":"Kostrikov"},{"article-title":"Behavior regularized offline reinforcement learning","year":"2019","author":"Wu","key":"ref20"},{"article-title":"Way off-policy batch deep reinforcement learning of implicit human preferences in dialog","year":"2019","author":"Jaques","key":"ref21"},{"key":"ref22","article-title":"Stabilizing off-policy q-learning via bootstrapping error reduction","volume":"32","author":"Kumar","year":"2019","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref23","article-title":"Learning structured output representation using deep conditional generative models","volume":"28","author":"Sohn","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref24","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume":"33","author":"Ho","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref25","first-page":"31406","article-title":"Adaptive advantage-guided policy regularization for offline reinforcement learning","volume-title":"International Conference on Machine Learning","volume":"235","author":"Liu"},{"key":"ref26","first-page":"31278","article-title":"Supported policy optimization for offline reinforcement learning","volume":"35","author":"Wu","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref27","first-page":"1719","article-title":"Plas: Latent action space for offline reinforcement learning","volume-title":"Conference on Robot Learning","author":"Zhou"},{"key":"ref28","first-page":"22955","article-title":"Behavior transformers: Cloning k modes with one stone","volume":"35","author":"Shafiullah","year":"2022","journal-title":"Advances in neural information processing systems"},{"article-title":"Diffusion policies as an expressive policy class for offline reinforcement learning","volume-title":"The Eleventh International Conference on Learning Representations","author":"Wang","key":"ref29"},{"article-title":"Idql: Implicit q-learning as an actor-critic method with diffusion policies","year":"2023","author":"Hansen-Estruch","key":"ref30"},{"article-title":"Offline reinforcement learning via high-fidelity generative behavior modeling","year":"2022","author":"Chen","key":"ref31"},{"key":"ref32","article-title":"Generative modeling by estimating gradients of the data distribution","volume":"32","author":"Song","year":"2019","journal-title":"Advances in neural information processing systems"},{"article-title":"Diffusion actor-critic: Formulating constrained policy iteration as diffusion noise regression for offline reinforcement learning","year":"2024","author":"Fang","key":"ref33"},{"key":"ref34","article-title":"D4RL: datasets for deep data-driven reinforcement learning","author":"Fu","year":"2020","journal-title":"CoRR"},{"key":"ref35","first-page":"67195","article-title":"Efficient diffusion policies for offline reinforcement learning","volume":"36","author":"Kang","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.52202\/079017-1585"},{"key":"ref37","first-page":"5775","article-title":"Dpm-solver: A fast ode solver for diffusion probabilistic model sampling in around 10 steps","volume":"35","author":"Lu","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Offline reinforcement learning with implicit Q-learning","volume-title":"ICLR","author":"Kostrikov","key":"ref38"},{"key":"ref39","first-page":"29304","article-title":"Deep reinforcement learning at the edge of the statistical precipice","volume":"34","author":"Agarwal","year":"2021","journal-title":"Advances in neural information processing systems"},{"article-title":"Score-based generative modeling through stochastic differential equations","year":"2020","author":"Song","key":"ref40"},{"article-title":"Skill expansion and composition in parameter space","volume-title":"International Conference on Learning Representations","author":"Liu","key":"ref41"},{"article-title":"Lora: Low-rank adaptation of large language models","year":"2021","author":"Hu","key":"ref42"},{"key":"ref43","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"International conference on machine learning","author":"Fujimoto"},{"key":"ref44","article-title":"AWAC: Accelerating online reinforcement learning with offline datasets","author":"Nair","year":"2020","journal-title":"CoRR"},{"article-title":"Latent-variable advantage-weighted policy optimization for offline rl","year":"2022","author":"Chen","key":"ref45"},{"article-title":"Adam: A method for stochastic optimization","year":"2014","author":"Kingma","key":"ref46"}],"event":{"name":"2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)","start":{"date-parts":[[2025,10,19]]},"location":"Hangzhou, China","end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11245651\/11245652\/11246521.pdf?arnumber=11246521","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T12:36:36Z","timestamp":1766061396000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11246521\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/iros60139.2025.11246521","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}