{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,27]],"date-time":"2026-02-27T04:58:05Z","timestamp":1772168285469,"version":"3.50.1"},"reference-count":25,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2020,8,7]],"date-time":"2020-08-07T00:00:00Z","timestamp":1596758400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2020,8,7]],"date-time":"2020-08-07T00:00:00Z","timestamp":1596758400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Basic Research Program of China","doi-asserted-by":"publisher","award":["2018YFC0832300"],"award-info":[{"award-number":["2018YFC0832300"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2021,1]]},"DOI":"10.1007\/s10489-020-01786-1","type":"journal-article","created":{"date-parts":[[2020,8,7]],"date-time":"2020-08-07T12:03:11Z","timestamp":1596801791000},"page":"185-201","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":19,"title":["SLER: Self-generated long-term experience replay for continual reinforcement learning"],"prefix":"10.1007","volume":"51","author":[{"given":"Chunmao","family":"Li","sequence":"first","affiliation":[]},{"given":"Yang","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5155-2137","authenticated-orcid":false,"given":"Yinliang","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Peng","family":"Peng","sequence":"additional","affiliation":[]},{"given":"Xupeng","family":"Geng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,8,7]]},"reference":[{"key":"1786_CR1","volume-title":"Reinforcement learning: An introduction","author":"RS Sutton","year":"2018","unstructured":"Sutton RS, Barto AG (2018) Reinforcement learning: An introduction. MIT Press, Cambridge"},{"issue":"7587","key":"1786_CR2","doi-asserted-by":"publisher","first-page":"484","DOI":"10.1038\/nature16961","volume":"529","author":"D Silver","year":"2016","unstructured":"Silver D, Huang A, Maddison CJ, Guez A, Sifre L, Van Den Driessche G, Schrittwieser J, Antonoglou I, Panneershelvam V, Lanctot M, et al (2016) Mastering the game of go with deep neural networks and tree search. Nature 529(7587):484","journal-title":"Nature"},{"issue":"7676","key":"1786_CR3","doi-asserted-by":"publisher","first-page":"354","DOI":"10.1038\/nature24270","volume":"550","author":"D Silver","year":"2017","unstructured":"Silver D, Schrittwieser J, Simonyan K, Antonoglou I, Huang A, Guez A, Hubert T, Baker L, Lai M, Bolton A, et al (2017) Mastering the game of go without human knowledge. Nature 550(7676):354","journal-title":"Nature"},{"issue":"7540","key":"1786_CR4","doi-asserted-by":"publisher","first-page":"529","DOI":"10.1038\/nature14236","volume":"518","author":"V Mnih","year":"2015","unstructured":"Mnih V, Kavukcuoglu K, Silver D, Rusu AA, Veness J, Bellemare MG, Graves A, Riedmiller M, Fidjeland AK, Ostrovski G, et al (2015) Human-level control through deep reinforcement learning. Nature 518(7540):529","journal-title":"Nature"},{"issue":"7782","key":"1786_CR5","doi-asserted-by":"publisher","first-page":"350","DOI":"10.1038\/s41586-019-1724-z","volume":"575","author":"O Vinyals","year":"2019","unstructured":"Vinyals O, Babuschkin I, Czarnecki WM, Mathieu M, Dudzik A, Chung J, Choi DH, Powell R, Ewalds T, Georgiev P, et al (2019) Grandmaster level in starcraft ii using multi-agent reinforcement learning. Nature 575(7782):350","journal-title":"Nature"},{"issue":"1","key":"1786_CR6","doi-asserted-by":"publisher","first-page":"77","DOI":"10.1023\/A:1007331723572","volume":"28","author":"MB Ring","year":"1997","unstructured":"Ring MB (1997) Child: A first step towards continual learning. Mach Learn 28(1):77","journal-title":"Mach Learn"},{"key":"1786_CR7","doi-asserted-by":"crossref","unstructured":"Thrun S (1995) A lifelong learning perspective for mobile robot control. In: Intelligent Robots and Systems. Elsevier, New York, pp 201\u2013214","DOI":"10.1016\/B978-044482250-5\/50015-3"},{"key":"1786_CR8","unstructured":"Rusu AA, Rabinowitz NC, Desjardins G, Soyer H, Kirkpatrick J, Kavukcuoglu K, PascanuR., Hadsell R (2016) Progressive neural networks. arXiv:1606.04671"},{"key":"1786_CR9","doi-asserted-by":"crossref","unstructured":"Yin H, Pan SJ (2017) Knowledge transfer for deep reinforcement learning with hierarchical experience replay. In Thirty-First AAAI conference on artificial intelligence","DOI":"10.1609\/aaai.v31i1.10733"},{"key":"1786_CR10","doi-asserted-by":"crossref","unstructured":"Tessler C, Givony S, Zahavy T, Mankowitz DJ, Mannor S (2017) A deep hierarchical approach to lifelong learning in minecraft. In Thirty-First AAAI conference on artificial intelligence","DOI":"10.1609\/aaai.v31i1.10744"},{"issue":"3","key":"1786_CR11","doi-asserted-by":"publisher","first-page":"419","DOI":"10.1037\/0033-295X.102.3.419","volume":"102","author":"JL McClelland","year":"1995","unstructured":"McClelland JL, McNaughton BL, O\u2019reilly RC (1995) Why there are complementary learning systems in the hippocampus and neocortex: insights from the successes and failures of connectionist models of learning and memory. Psychological review 102(3):419","journal-title":"Psychological review"},{"key":"1786_CR12","doi-asserted-by":"crossref","unstructured":"Parisi GI, Kemker R, Part JL, Kanan C, Wermter S (2019) Continual lifelong learning with neural networks: A review. Neural Networks","DOI":"10.1016\/j.neunet.2019.01.012"},{"key":"1786_CR13","doi-asserted-by":"crossref","unstructured":"Kirkpatrick J, Pascanu R, Rabinowitz N, Veness J, Desjardins G, Rusu AA, Milan K, Quan J, Ramalho T, Grabska-Barwinska A, et al (2017) Overcoming catastrophic forgetting in neural networks. Proceedings of the national academy of sciences p 201611835","DOI":"10.1073\/pnas.1611835114"},{"key":"1786_CR14","doi-asserted-by":"crossref","unstructured":"Aljundi R, Babiloni F, Elhoseiny M, Rohrbach M, Tuytelaars T (2018) Memory aware synapses: Learning what (not) to forget. In Proceedings of the European Conference on Computer Vision (ECCV), 139\u2013154","DOI":"10.1007\/978-3-030-01219-9_9"},{"key":"1786_CR15","unstructured":"Schwarz J, Luketina J, Czarnecki WM, Grabska-Barwinska A, Teh YW, Pascanu R, Hadsell R (2018) Progress & compress: A scalable framework for continual learning. arXiv:1805.06370"},{"key":"1786_CR16","doi-asserted-by":"crossref","unstructured":"Isele D, Cosgun A (2018) Selective experience replay for lifelong learning. In Thirty-Second AAAI conference on artificial intelligence","DOI":"10.1609\/aaai.v32i1.11595"},{"key":"1786_CR17","doi-asserted-by":"crossref","unstructured":"Lesort T, Caselles-Dupr\u00e9 H, Garcia-Ortiz M, Stoian A, Filliat D (2019) Generative models from the perspective of continual learning. In 2019 International Joint Conference on Neural Networks (IJCNN) IEEE 1\u20138","DOI":"10.1109\/IJCNN.2019.8851986"},{"key":"1786_CR18","unstructured":"Wu C, Herranz L, Liu X, van de Weijer J, Raducanu B, et al (2018) Memory replay gans: Learning to generate new categories without forgetting. In Advances in Neural Information Processing Systems 5962\u20135972"},{"key":"1786_CR19","unstructured":"Shin H, Lee JK, Kim J, Kim J (2017) Continual learning with deep generative replay. In Advances in Neural Information Processing Systems, 2990\u20132999"},{"key":"1786_CR20","unstructured":"Tanaka F, Yamamura M (1997) An approach to lifelong reinforcement learning through multiple environments. In 6th European Workshop on Learning Robots, 93\u201399"},{"key":"1786_CR21","unstructured":"Rusu AA, Colmenarejo SG, Gulcehre C, Desjardins G, Kirkpatrick J, Pascanu R, Mnih V, Kavukcuoglu K, Hadsell R (2015) Policy distillation. arXiv:1511.06295"},{"key":"1786_CR22","unstructured":"Maas AL, Hannun AY, NG AY (2013) Rectifier nonlinearities improve neural network acoustic models. In Proc. icml, 30:3"},{"key":"1786_CR23","unstructured":"Schaul T, Quan J, Antonoglou I, Silver D (2015) Prioritized experience replay. arXiv:1511.059521511.05952"},{"issue":"1","key":"1786_CR24","first-page":"1929","volume":"15","author":"N Srivastava","year":"2014","unstructured":"Srivastava N, Hinton G, Krizhevsky A, Sutskever I, Salakhutdinov R (2014) Dropout: a simple way to prevent neural networks from overfitting. The Journal of Machine Learning Research 15(1):1929","journal-title":"The Journal of Machine Learning Research"},{"key":"1786_CR25","unstructured":"Vinyals O, Ewalds T, Bartunov S, Georgiev P, Vezhnevets AS, Yeo M, Makhzani A, K\u00fcttler H, Agapiou J, Schrittwieser J, et al (2017) Starcraft ii: A new challenge for reinforcement learning. arXiv:1708.04782"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-020-01786-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-020-01786-1\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-020-01786-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,6]],"date-time":"2022-11-06T01:23:19Z","timestamp":1667697799000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-020-01786-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,8,7]]},"references-count":25,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2021,1]]}},"alternative-id":["1786"],"URL":"https:\/\/doi.org\/10.1007\/s10489-020-01786-1","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020,8,7]]},"assertion":[{"value":"7 August 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Compliance with Ethical Standards"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Conflict of interests"}}]}}