{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T18:11:11Z","timestamp":1764785471090,"version":"3.28.0"},"reference-count":42,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,6,30]],"date-time":"2024-06-30T00:00:00Z","timestamp":1719705600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,6,30]],"date-time":"2024-06-30T00:00:00Z","timestamp":1719705600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,6,30]]},"DOI":"10.1109\/ijcnn60899.2024.10651451","type":"proceedings-article","created":{"date-parts":[[2024,9,9]],"date-time":"2024-09-09T17:35:05Z","timestamp":1725903305000},"page":"1-7","source":"Crossref","is-referenced-by-count":1,"title":["High-quality Synthetic Data is Efficient for Model-based Offline Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Qichao","family":"Zhang","sequence":"first","affiliation":[{"name":"Chinese Academy of Sciences,The State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation,Beijing,China,100190"}]},{"given":"Xing","family":"Fang","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,The State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation,Beijing,China,100190"}]},{"given":"Kaixuan","family":"Xu","sequence":"additional","affiliation":[{"name":"University of Chinese Academy of Sciences,School of Artificial Intelligence,Beijing,China,100049"}]},{"given":"Weixin","family":"Zhao","sequence":"additional","affiliation":[{"name":"ZHEJIANG SUPCON TECHNOLOGY CO., LTD,Hangzhou,China,310059"}]},{"given":"Haoran","family":"Li","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,The State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation,Beijing,China,100190"}]},{"given":"Dongbin","family":"Zhao","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences,The State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation,Beijing,China,100190"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2022.3215788"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/MCI.2019.2901089"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3202185"},{"article-title":"Semi-markov offline reinforcement learning for healthcare","year":"2022","author":"Fatemi","key":"ref4"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2927869"},{"article-title":"Offline reinforcement learning: Tutorial, review, and perspectives on open problems","year":"2020","author":"Levine","key":"ref6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2023.3250269"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ITSC55140.2022.9922100"},{"article-title":"Continuous control with deep reinforcement learning","volume-title":"International Conference on Learning Representations","author":"Lillicrap","key":"ref9"},{"key":"ref10","first-page":"1582","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"International Conference on Machine Learning","author":"Fujimoto"},{"key":"ref11","first-page":"2052","article-title":"Off-policy deep reinforcement learning without exploration","volume-title":"International Conference on Machine Learning","author":"Fujimoto"},{"key":"ref12","first-page":"11 784","article-title":"Stabilizing off-policy q-learning via bootstrapping error reduction","author":"Kumar","year":"2019","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Behavior regularized offline reinforcement learning","year":"2019","author":"Wu","key":"ref13"},{"key":"ref14","article-title":"A minimalist approach to offline reinforcement learning","author":"Fujimoto","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref15","first-page":"1179","article-title":"Conservative q-learning for offline reinforcement learning","author":"Kumar","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref16","article-title":"Combo: Conservative offline model-based policy optimization","author":"Yu","year":"2021","journal-title":"Neural Information Processing Systems"},{"key":"ref17","first-page":"7436","article-title":"Uncertainty-based offline reinforcement learning with diversified q-ensemble","volume":"34","author":"An","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Offline reinforcement learning with implicit q-learning","volume-title":"International Conference on Learning Representations","author":"Kostrikov","key":"ref18"},{"article-title":"Offline reinforcement learning with value-based episodic memory","year":"2021","author":"Ma","key":"ref19"},{"key":"ref20","first-page":"9902","article-title":"Planning with diffusion for flexible behavior synthesis","volume-title":"International Conference on Learning Representations","author":"Janner"},{"article-title":"Diffusion policies as an expressive policy class for offline reinforcement learning","volume-title":"The Eleventh International Conference on Learning Representations","author":"Wang","key":"ref21"},{"article-title":"Boosting continuous control with consistency policy","volume-title":"International Conference on Autonomous Agents and Multiagent Systems","author":"Chen","key":"ref22"},{"key":"ref23","article-title":"Double check your state before trusting it: Confidence-aware bidirectional offline model-based imagination","author":"Lyu","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref24","first-page":"14 129","article-title":"MOPO: Model-based offline policy optimization","author":"Yu","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref25","first-page":"470","article-title":"A dataset perspective on offline reinforcement learning","volume-title":"Conference on Lifelong Learning Agents","author":"Schweighofer"},{"article-title":"Don\u2019t change the algorithm, change the data: Exploratory data for offline reinforcement learning","year":"2022","author":"Yarats","key":"ref26"},{"key":"ref27","first-page":"21 810","article-title":"MOReL: Model-based offline reinforcement learning","author":"Kidambi","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.3233\/faia230618"},{"article-title":"Offline reinforcement learning via high-fidelity generative behavior modeling","volume-title":"The Eleventh International Conference on Learning Representations","author":"Chen","key":"ref29"},{"article-title":"Mildly conservative q-learning for offline reinforcement learning","year":"2022","author":"Lyu","key":"ref30"},{"key":"ref31","article-title":"A policy-guided imitation approach for offline reinforcement learning","author":"Xu","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref32","first-page":"15 084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume":"34","author":"Chen","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref33","first-page":"1678","article-title":"What matters in learning from offline human demonstrations for robot manipulation","volume-title":"Conference on Robot Learning","author":"Mandlekar"},{"key":"ref34","first-page":"104","article-title":"An optimistic perspective on offline reinforcement learning","volume-title":"International Conference on Machine Learning","author":"Agarwal"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/516"},{"article-title":"Value memory graph: A graph-structured world model for offline reinforcement learning","volume-title":"The Eleventh International Conference on Learning Representations","author":"Zhu","key":"ref36"},{"key":"ref37","article-title":"Offline model-based adaptable policy learning","author":"Chen","year":"2021","journal-title":"Neural Information Processing Systems"},{"key":"ref38","article-title":"Offline reinforcement learning with reverse model-based imagination","author":"Wang","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"article-title":"Auto-encoding variational bayes","year":"2013","author":"Kingma","key":"ref39"},{"article-title":"D4RL: Datasets for deep data-driven reinforcement learning","year":"2020","author":"Fu","key":"ref40"},{"key":"ref41","first-page":"305","article-title":"Alvinn : An autonomous land vehicle in a neural network","author":"Pomerleau","year":"1989","journal-title":"Advances in Neural Information Processing Systems"},{"issue":"11","key":"ref42","article-title":"Visualizing data using t-sne","volume":"9","author":"Van der Maaten","year":"2008","journal-title":"Journal of Machine Learning Research"}],"event":{"name":"2024 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2024,6,30]]},"location":"Yokohama, Japan","end":{"date-parts":[[2024,7,5]]}},"container-title":["2024 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10649807\/10649898\/10651451.pdf?arnumber=10651451","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,10]],"date-time":"2024-09-10T06:34:20Z","timestamp":1725950060000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10651451\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,30]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/ijcnn60899.2024.10651451","relation":{},"subject":[],"published":{"date-parts":[[2024,6,30]]}}}