{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,21]],"date-time":"2026-01-21T09:47:32Z","timestamp":1768988852448,"version":"3.49.0"},"reference-count":47,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Guangzhou Science and Technology Program","award":["202206030008"],"award-info":[{"award-number":["202206030008"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2024,11]]},"DOI":"10.1109\/tnnls.2023.3296765","type":"journal-article","created":{"date-parts":[[2023,8,1]],"date-time":"2023-08-01T18:16:28Z","timestamp":1690913788000},"page":"16602-16615","source":"Crossref","is-referenced-by-count":9,"title":["AHEGC: Adaptive Hindsight Experience Replay With Goal-Amended Curiosity Module for Robot Control"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4228-311X","authenticated-orcid":false,"given":"Hongliang","family":"Zeng","sequence":"first","affiliation":[{"name":"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6238-7963","authenticated-orcid":false,"given":"Ping","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4195-9475","authenticated-orcid":false,"given":"Fang","family":"Li","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"}]},{"given":"Chubin","family":"Lin","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3520-740X","authenticated-orcid":false,"given":"Junkang","family":"Zhou","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, South China University of Technology, Guangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.13140\/RG.2.2.18893.74727"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/nature24270"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref4","article-title":"Playing Atari with deep reinforcement learning","author":"Mnih","year":"2013","journal-title":"arXiv:1312.5602"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-92040-5_19"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3054685"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICTC49870.2020.9289571"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3084685"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3109284"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9561187"},{"key":"ref11","article-title":"Deep reinforcement learning for contact-rich skills using compliant movement primitives","author":"Spector","year":"2020","journal-title":"arXiv:2008.13223"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3059912"},{"key":"ref13","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","volume-title":"Proc. ICML","volume":"99","author":"Ng"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/IROS51168.2021.9636020"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2022.3172754"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2936863"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1177\/1729881419898342"},{"key":"ref18","first-page":"1","article-title":"Hindsight experience replay","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Andrychowicz"},{"key":"ref19","article-title":"Continuous control with deep reinforcement learning","author":"Lillicrap","year":"2015","journal-title":"arXiv:1509.02971"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2020.2990722"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1093\/oxfordhb\/9780195399820.013.0010"},{"key":"ref22","first-page":"1","article-title":"Unifying count-based exploration and intrinsic motivation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Bellemare"},{"key":"ref23","article-title":"Large-scale study of curiosity-driven learning","author":"Burda","year":"2018","journal-title":"arXiv:1808.04355"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2019.2957051"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992699"},{"key":"ref26","article-title":"Prioritized experience replay","author":"Schaul","year":"2015","journal-title":"arXiv:1511.05952"},{"key":"ref27","first-page":"113","article-title":"Energy-based hindsight experience prioritization","volume-title":"Proc. Conf. Robot Learn.","author":"Zhao"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/DEVLRN.2019.8850705"},{"key":"ref29","article-title":"ARCHER: Aggressive rewards to counter bias in hindsight experience replay","author":"Lanka","year":"2018","journal-title":"arXiv:1809.02070"},{"key":"ref30","first-page":"1","article-title":"DHER: Hindsight experience replay for dynamic goals","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Fang"},{"key":"ref31","first-page":"6","article-title":"What is intrinsic motivation? A typology of computational approaches","volume":"1","author":"Oudeyer","year":"2009","journal-title":"Frontiers Neurorobot."},{"key":"ref32","first-page":"1","article-title":"Exploration in model-based reinforcement learning by empirically estimating learning progress","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"25","author":"Lopes"},{"key":"ref33","first-page":"1","article-title":"#Exploration: A study of count-based exploration for deep reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Tang"},{"key":"ref34","first-page":"213","article-title":"R-MAX\u2014A general polynomial time algorithm for near-optimal reinforcement learning","volume":"3","author":"Brafman","year":"2002","journal-title":"J. Mach. Learn. Res."},{"key":"ref35","first-page":"1","article-title":"VIME: Variational information maximizing exploration","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Houthooft"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA48506.2021.9562098"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.70"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ROBIO49542.2019.8961529"},{"key":"ref39","first-page":"1312","article-title":"Universal value function approximators","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Schaul"},{"key":"ref40","article-title":"Multi-goal reinforcement learning: Challenging robotics environments and request for research","author":"Plappert","year":"2018","journal-title":"arXiv:1802.09464"},{"key":"ref41","first-page":"387","article-title":"Deterministic policy gradient algorithms","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Silver"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"key":"ref43","article-title":"OpenAI gym","author":"Brockman","year":"2016","journal-title":"arXiv:1606.01540"},{"key":"ref44","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"},{"key":"ref45","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref46","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2017.8202133"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10737991\/10198675.pdf?arnumber=10198675","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T19:25:09Z","timestamp":1732735509000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10198675\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11]]},"references-count":47,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2023.3296765","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11]]}}}