{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,10]],"date-time":"2026-01-10T00:22:40Z","timestamp":1768004560758,"version":"3.49.0"},"reference-count":54,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"European Union within the framework of the Artificial Intelligence National Laboratory","award":["RRF-2.3.1-21-2022-00004"],"award-info":[{"award-number":["RRF-2.3.1-21-2022-00004"]}]},{"name":"European Commission through the H2020 project EPIC","award":["739592"],"award-info":[{"award-number":["739592"]}]},{"name":"Government of France and the Government of Hungary in the framework of \u201cCampus France Bourse du gouvernement fran\u00e7ais-Bourse Excellence Hongrie"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3427012","type":"journal-article","created":{"date-parts":[[2024,7,11]],"date-time":"2024-07-11T18:08:02Z","timestamp":1720721282000},"page":"100102-100119","source":"Crossref","is-referenced-by-count":3,"title":["HiER: Highlight Experience Replay for Boosting Off-Policy Reinforcement Learning Agents"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6450-5193","authenticated-orcid":false,"given":"D\u00e1niel","family":"Horv\u00e1th","sequence":"first","affiliation":[{"name":"Center for Robotics, Mines Paris, PSL University, Paris, France"}]},{"given":"Jes\u00fas","family":"Bujalance Mart\u00edn","sequence":"additional","affiliation":[{"name":"Center for Robotics, Mines Paris, PSL University, Paris, France"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3531-3803","authenticated-orcid":false,"given":"Ferenc","family":"G\u00e0bor Erdos","sequence":"additional","affiliation":[{"name":"Centre of Excellence in Production Informatics and Control, Institute for Computer Science and Control, Hungarian Research Network, Budapest, Hungary"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0169-4791","authenticated-orcid":false,"given":"Zolt\u00e1n","family":"Istenes","sequence":"additional","affiliation":[{"name":"CoLocation Center for Academic and Industrial Cooperation, E&#x00F6;tv&#x00F6;s Lor&#x00E1;nd University, Budapest, Hungary"}]},{"given":"Fabien","family":"Moutarde","sequence":"additional","affiliation":[{"name":"Center for Robotics, Mines Paris, PSL University, Paris, France"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/tkde.2009.191"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1186\/s40537-016-0043-6"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/access.2021.3126658"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2022.3147337"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TRO.2022.3207619"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2023.10.121"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.23919\/ECC.2019.8796140"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2019.8803726"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/tnn.1998.712192"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3038605"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3027923"},{"key":"ref12","article-title":"Mastering chess and shogi by self-play with a general reinforcement learning algorithm","author":"Silver","year":"2017","journal-title":"arXiv:1712.01815"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"issue":"1","key":"ref15","first-page":"387","article-title":"Deterministic policy gradient algorithms","volume-title":"Proc. 31st Int. Conf. Mach. Learn.","volume":"32","author":"Silver"},{"key":"ref16","article-title":"Continuous control with deep reinforcement learning","author":"Lillicrap","year":"2015","journal-title":"arXiv:1509.02971"},{"key":"ref17","article-title":"Addressing function approximation error in actor-critic methods","author":"Fujimoto","year":"2018","journal-title":"arXiv:1802.09477"},{"key":"ref18","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","author":"Haarnoja","year":"2018","journal-title":"arXiv:1801.01290"},{"key":"ref19","article-title":"Active domain randomization","author":"Mehta","year":"2019","journal-title":"arXiv:1904.04762"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN48605.2020.9207427"},{"key":"ref21","article-title":"Reverse curriculum generation for reinforcement learning","author":"Florensa","year":"2017","journal-title":"arXiv:1707.05300"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794206"},{"key":"ref23","article-title":"Learning Montezuma\u2019s revenge from a single demonstration","author":"Salimans","year":"2018","journal-title":"arXiv:1812.03381"},{"key":"ref24","article-title":"Intrinsic motivation and automatic curricula via asymmetric self-play","author":"Sukhbaatar","year":"2017","journal-title":"arXiv:1703.05407"},{"key":"ref25","article-title":"Automatic goal generation for reinforcement learning agents","author":"Florensa","year":"2017","journal-title":"arXiv:1705.06366"},{"key":"ref26","article-title":"Skew-fit: State-covering self-supervised reinforcement learning","author":"Pong","year":"2019","journal-title":"arXiv:1903.03698"},{"key":"ref27","article-title":"Automated curricula through setter-solver interactions","author":"Racaniere","year":"2019","journal-title":"arXiv:1909.12892"},{"key":"ref28","article-title":"Prioritized experience replay","author":"Schaul","year":"2015","journal-title":"arXiv:1511.05952"},{"key":"ref29","article-title":"Self-imitation learning","author":"Oh","year":"2018","journal-title":"arXiv:1806.05635"},{"key":"ref30","article-title":"Self-imitation advantage learning","author":"Ferret","year":"2020","journal-title":"arXiv:2012.11989"},{"key":"ref31","article-title":"Boosting soft actor-critic: Emphasizing recent experience without forgetting the past","author":"Wang","year":"2019","journal-title":"arXiv:1906.04009"},{"key":"ref32","article-title":"Hindsight experience replay","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Andrychowicz"},{"key":"ref33","first-page":"2565","article-title":"Reward relabelling for combined reinforcement and imitation learning on sparse-reward tasks","volume-title":"Proc. Int. Conf. Auto. Agents Multiagent Syst.","author":"Bujalance"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/671"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1145\/1553374.1553380"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2021.3069908"},{"key":"ref37","article-title":"Panda-gym: Open-source goal-conditioned environments for robotic learning","author":"Gallou\u00e9dec","year":"2021","journal-title":"arXiv:2106.13687"},{"key":"ref38","article-title":"Multi-goal reinforcement learning: Challenging robotics environments and request for research","author":"Plappert","year":"2018","journal-title":"arXiv:1802.09464"},{"key":"ref39","article-title":"D4RL: Datasets for deep data-driven reinforcement learning","author":"Fu","year":"2020","journal-title":"arXiv:2004.07219"},{"key":"ref40","author":"Coumans","year":"2016","journal-title":"Pybullet, a Python Module for Physics Simulation for Games, Robotics and Machine Learning"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/iros.2012.6386109"},{"key":"ref42","first-page":"1312","article-title":"Universal value function approximators","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schaul"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-021-10085-1"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2016.89"},{"key":"ref45","volume-title":"Measuring the Reliability of Reinforcement Learning Algorithms","author":"Chan","year":"2020"},{"key":"ref46","article-title":"How many random seeds? Statistical power analysis in deep reinforcement learning experiments","author":"Colas","year":"2018","journal-title":"arXiv:1806.08295"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11694"},{"key":"ref48","first-page":"29304","article-title":"Deep reinforcement learning at the edge of the statistical precipice","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Agarwal"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3912"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1007\/bf00115009"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1016\/j.tics.2016.05.004"},{"key":"ref52","volume-title":"Soft Actor-Critic\u2014Spinning Up Documentation"},{"key":"ref53","first-page":"104","article-title":"An optimistic perspective on offline reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Agarwal"},{"key":"ref54","first-page":"507","article-title":"Agent57: Outperforming the Atari human benchmark","volume-title":"Proc. 37th Int. Conf. Mach. Learn.","author":"Badia"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10380310\/10595054.pdf?arnumber=10595054","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,25]],"date-time":"2024-07-25T17:31:52Z","timestamp":1721928712000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10595054\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":54,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3427012","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}