{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T10:22:09Z","timestamp":1773310929966,"version":"3.50.1"},"reference-count":43,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"JSPS KAKENHI","award":["JP90542217"],"award-info":[{"award-number":["JP90542217"]}]},{"DOI":"10.13039\/501100001863","name":"New Energy and Industrial Technology Development Organization","doi-asserted-by":"publisher","award":["JPNP16007"],"award-info":[{"award-number":["JPNP16007"]}],"id":[{"id":"10.13039\/501100001863","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004410","name":"T\u00fcrkiye Bilimsel ve Teknolojik Ara\u015ft\u0131rma Kurumu","doi-asserted-by":"publisher","award":["118E923"],"award-info":[{"award-number":["118E923"]}],"id":[{"id":"10.13039\/501100004410","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100000780","name":"European Commission","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100000780","id-type":"DOI","asserted-by":"publisher"}]},{"name":"INVERSE Project","award":["101136067"],"award-info":[{"award-number":["101136067"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Robot. Autom. Lett."],"published-print":{"date-parts":[[2024,4]]},"DOI":"10.1109\/lra.2024.3363530","type":"journal-article","created":{"date-parts":[[2024,2,7]],"date-time":"2024-02-07T18:57:58Z","timestamp":1707332278000},"page":"3116-3123","source":"Crossref","is-referenced-by-count":11,"title":["Diffusion Policies for Out-of-Distribution Generalization in Offline Reinforcement Learning"],"prefix":"10.1109","volume":"9","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1901-8473","authenticated-orcid":false,"given":"Suzan Ece","family":"Ada","sequence":"first","affiliation":[{"name":"Department of Computer Engineering, Bogazici University, Istanbul, T&#x00FC;rkiye"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3051-6038","authenticated-orcid":false,"given":"Erhan","family":"Oztop","sequence":"additional","affiliation":[{"name":"SISReC, OTRI, Osaka University, Osaka, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9597-2731","authenticated-orcid":false,"given":"Emre","family":"Ugur","sequence":"additional","affiliation":[{"name":"Department of Computer Engineering, Bogazici University, Istanbul, T&#x00FC;rkiye"}]}],"member":"263","reference":[{"key":"ref1","article-title":"D4rl: Datasets for deep data-driven reinforcement learning","author":"Fu","year":"2020"},{"key":"ref2","article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","author":"Levine","year":"2020"},{"key":"ref3","first-page":"1455","article-title":"Dealing with the unknown: Pessimistic offline reinforcement learning","volume-title":"Proc. 5th Annu. Conf. Robot Learn.","author":"Li","year":"2021"},{"key":"ref4","first-page":"2023","article-title":"Diffusion policy: Visuomotor policy learning via action diffusion","volume-title":"Proc. Robot.: Sci. Syst.","author":"Chi"},{"key":"ref5","first-page":"158","article-title":"Implicit behavioral cloning","volume-title":"Proc. 5th Annu. Conf. Robot Learn.","author":"Florence","year":"2021"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2023.XIX.016"},{"key":"ref7","article-title":"Diffusion policies as an expressive policy class for offline reinforcement learning","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Wang","year":"2023"},{"key":"ref8","first-page":"9902","article-title":"Planning with diffusion for flexible behavior synthesis","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Janner","year":"2022"},{"key":"ref9","first-page":"2256","article-title":"Deep unsupervised learning using nonequilibrium thermodynamics","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Sohl-Dickstein","year":"2015"},{"key":"ref10","article-title":"Generative modeling by estimating gradients of the data distribution","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Song","year":"2019"},{"key":"ref11","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. Advances Neural Inf. Process. Syst.","author":"Ho","year":"2020"},{"key":"ref12","first-page":"11918","article-title":"Improving reconstruction autoencoder out-of-distribution detection with mahalanobis distance","author":"Denouden","year":"2018"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2020.107320"},{"key":"ref14","article-title":"Stabilizing off-policy Q-learning via bootstrapping error reduction","volume":"32","author":"Kumar","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref15","article-title":"Behavior regularized offline reinforcement learning","author":"Wu","year":"2019"},{"key":"ref16","article-title":"Accelerating online reinforcement learning with offline datasets","author":"Nair","year":"2020"},{"key":"ref17","article-title":"Offline reinforcement learning with implicit Q-learning","author":"Kostrikov","year":"2022"},{"key":"ref18","first-page":"1179","article-title":"Conservative Q-learning for offline reinforcement learning","volume":"33","author":"Kumar","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref19","first-page":"11194","article-title":"GradientDICE: Rethinking generalized offline estimation of stationary values","volume-title":"Proc. 37th Int. Conf. Mach. Learn.","author":"Zhang","year":"2020"},{"key":"ref20","first-page":"652","article-title":"Doubly robust off-policy evaluation for reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jiang","year":"2015"},{"key":"ref21","first-page":"14129","article-title":"MOPO: Model-based offline policy optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yu","year":"2020"},{"key":"ref22","first-page":"28954","article-title":"COMBO: Conservative offline model-based policy optimization","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yu","year":"2021"},{"key":"ref23","first-page":"1273","article-title":"Reinforcement learning as one big sequence modeling problem","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Janner","year":"2021"},{"key":"ref24","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chen","year":"2021"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2023.3322046"},{"key":"ref26","article-title":"Raindiffusion: When unsupervised learning meets diffusion models for real-world image deraining","author":"Wei","year":"2023"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pcbi.1011890"},{"key":"ref28","article-title":"All are worth words: A vit backbone for score-based diffusion models","volume-title":"Proc. NeurIPS Workshop","author":"Bao","year":"2022"},{"key":"ref29","article-title":"Schrdingers Bat: Diffusion models sometimes generate polysemous words in superposition","author":"White","year":"2022"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3626235"},{"key":"ref31","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"ref33","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto","year":"2018"},{"key":"ref34","first-page":"2829","article-title":"Continuous control with deep reinforcement learning","volume-title":"Proc. 4th Int. Conf. Learn. Representations","author":"Lillicrap","year":"2015"},{"key":"ref35","article-title":"Diffusion policies as an expressive policy class for offline reinforcement learning","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Wang","year":"2023"},{"key":"ref36","first-page":"20132","article-title":"A minimalist approach to offline reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Fujimoto","year":"2021"},{"key":"ref37","article-title":"Offline retraining for online RL: Decoupled policy learning to mitigate exploration bias","author":"Mark","year":"2023"},{"key":"ref38","article-title":"Openai gym","author":"Brockman","year":"2016"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1017\/S0263574722000625"},{"key":"ref41","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume-title":"Proc. Adv. Neural Inf. Process. syst.","author":"Chen","year":"2021"},{"key":"ref42","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja","year":"2018"},{"key":"ref43","first-page":"4933","article-title":"Offline RL without off-policy evaluation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Brandfonbrener","year":"2021"}],"container-title":["IEEE Robotics and Automation Letters"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7083369\/10440130\/10423845.pdf?arnumber=10423845","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,14]],"date-time":"2024-03-14T03:22:26Z","timestamp":1710386546000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10423845\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4]]},"references-count":43,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/lra.2024.3363530","relation":{},"ISSN":["2377-3766","2377-3774"],"issn-type":[{"value":"2377-3766","type":"electronic"},{"value":"2377-3774","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4]]}}}