{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T18:55:32Z","timestamp":1771959332447,"version":"3.50.1"},"reference-count":21,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100006190","name":"Research and Development","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100006190","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10447775","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"7265-7269","source":"Crossref","is-referenced-by-count":2,"title":["P2DT: Mitigating Forgetting in Task-Incremental Learning with Progressive Prompt Decision Transformer"],"prefix":"10.1109","author":[{"given":"Zhiyuan","family":"Wang","sequence":"first","affiliation":[{"name":"Tsinghua University,Tsinghua Shenzhen International Graduate School,China"}]},{"given":"Xiaoyang","family":"Qu","sequence":"additional","affiliation":[{"name":"Ping An Technology (Shenzhen) Co., Ltd.,Shenzhen,China"}]},{"given":"Jing","family":"Xiao","sequence":"additional","affiliation":[{"name":"Ping An Technology (Shenzhen) Co., Ltd.,Shenzhen,China"}]},{"given":"Bokui","family":"Chen","sequence":"additional","affiliation":[{"name":"Tsinghua University,Tsinghua Shenzhen International Graduate School,China"}]},{"given":"Jianzong","family":"Wang","sequence":"additional","affiliation":[{"name":"Ping An Technology (Shenzhen) Co., Ltd.,Shenzhen,China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"15084","article-title":"Decision transformer: Reinforcement learning via sequence modeling","volume":"34","author":"Chen","year":"2021","journal-title":"Advances in neural information processing systems"},{"key":"ref2","first-page":"27921","article-title":"Multi-game decision transformers","volume":"35","author":"Lee","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref3","first-page":"27042","article-title":"Online decision transformer","volume-title":"international conference on machine learning","author":"Zheng"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.tics.2020.09.004"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2019.01.012"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2017.2773081"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1073\/pnas.1611835114"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2023\/443"},{"key":"ref9","article-title":"Gradient episodic memory for continual learning","volume":"30","author":"Lopez-Paz","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.587"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/DAC56929.2023.10247821"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acllong.353"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.tics.2016.05.004"},{"key":"ref15","article-title":"Progressive prompts: Continual learning for language models","volume-title":"The Eleventh International Conference on Learning Representations","author":"Razdaibiedina"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref17","article-title":"D4rl: Datasets for deep data-driven reinforcement learning","author":"Fu","year":"2020"},{"key":"ref18","first-page":"1179","article-title":"Conservative q-learning for offline reinforcement learning","volume":"33","author":"Kumar","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref19","article-title":"Stabilizing off-policy q-learning via bootstrapping error reduction","volume":"32","author":"Kumar","year":"2019","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref20","article-title":"Behavior regularized offline reinforcement learning","author":"Wu","year":"2019"},{"key":"ref21","article-title":"Advantage-weighted regression: Simple and scalable off-policy reinforcement learning","author":"Peng","year":"2019"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Seoul, Korea, Republic of","start":{"date-parts":[[2024,4,14]]},"end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10447775.pdf?arnumber=10447775","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T06:08:51Z","timestamp":1722578931000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10447775\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":21,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10447775","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}