{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,14]],"date-time":"2026-02-14T03:06:41Z","timestamp":1771038401269,"version":"3.50.1"},"reference-count":63,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100002241","name":"Japan Science and Technology Agency (JST), CREST, Japan","doi-asserted-by":"publisher","award":["JPMJCR21D4"],"award-info":[{"award-number":["JPMJCR21D4"]}],"id":[{"id":"10.13039\/501100002241","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2023]]},"DOI":"10.1109\/access.2023.3246267","type":"journal-article","created":{"date-parts":[[2023,2,16]],"date-time":"2023-02-16T23:07:44Z","timestamp":1676588864000},"page":"17116-17137","source":"Crossref","is-referenced-by-count":5,"title":["Learning Potential in Subgoal-Based Reward Shaping"],"prefix":"10.1109","volume":"11","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7218-7842","authenticated-orcid":false,"given":"Takato","family":"Okudo","sequence":"first","affiliation":[{"name":"Department of Informatics, The Graduate University for Advanced Studies (SOKENDAI), Tokyo, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5907-7382","authenticated-orcid":false,"given":"Seiji","family":"Yamada","sequence":"additional","affiliation":[{"name":"Department of Informatics, The Graduate University for Advanced Studies (SOKENDAI), Tokyo, Japan"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/817"},{"key":"ref2","first-page":"4415","article-title":"Reward-rational (implicit) choice: A unifying formalism for reward learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Jeon"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3357236.3395525"},{"key":"ref4","first-page":"77","article-title":"Effective transfer via demonstrations in reinforcement learning: A preliminary study","volume-title":"Proc. AAAI Spring Symp.","author":"Wang"},{"key":"ref5","first-page":"2625","article-title":"Policy shaping: Integrating human feedback with reinforcement learning","volume-title":"Adv. Neural Inf. Process. Syst.","volume":"26","author":"Griffith","year":"2013"},{"key":"ref6","first-page":"4299","article-title":"Deep reinforcement learning from human preferences","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Christiano"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v29i1.9628"},{"key":"ref8","first-page":"663","article-title":"Algorithms for inverse reinforcement learning","volume-title":"Proc. 17th Int. Conf. Mach. Learn.","author":"Ng"},{"key":"ref9","first-page":"4565","article-title":"Generative adversarial imitation learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Ho"},{"key":"ref10","article-title":"Variational inverse control with events: A general framework for data-driven reward definition","volume-title":"Advances in Neural Information Processing Systems","volume":"31","author":"Fu","year":"2018"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3090364"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1162\/1064546053278973"},{"key":"ref13","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","volume-title":"Proc. 16th Int. Conf. Mach. Learn. (ICML)","author":"Ng"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2010.01.001"},{"key":"ref15","first-page":"433","article-title":"Dynamic potential-based reward shaping","volume-title":"Proc. 11th Int. Conf. Auto. Agents Multiagent Syst. (AAMAS)","author":"Devlin"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2017\/534"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1609\/aimag.v35i4.2513"},{"key":"ref18","doi-asserted-by":"crossref","DOI":"10.1016\/j.artint.2021.103500","article-title":"A survey of inverse reinforcement learning: Challenges, methods and progress","volume":"297","author":"Arora","year":"2021","journal-title":"Artif. Intell."},{"key":"ref19","article-title":"Concrete problems in AI safety","author":"Amodei","year":"2016","journal-title":"arXiv:1606.06565"},{"key":"ref20","volume-title":"Artificial Intelligence: A Modern Approach","author":"Russell","year":"2009"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1080\/09540091.2018.1443318"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/1597735.1597738"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/S0004-3702(99)00052-1"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10916"},{"key":"ref25","first-page":"3540","article-title":"Feudal networks for hierarchical reinforcement learning","volume-title":"Proc. 34th Int. Conf. Mach. Learn. (ICML)","volume":"70","author":"Vezhnevets"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-30484-3_9"},{"key":"ref27","first-page":"329","article-title":"Introduction and control of subgoals in reinforcement learning","volume-title":"Proc. 25th IASTED Int. Multi-Conf., Artif. Intell. Appl.","author":"Murata"},{"key":"ref28","article-title":"Multi-goal reinforcement learning: Challenging robotics environments and request for research","author":"Plappert","year":"2018","journal-title":"arXiv:1802.09464"},{"key":"ref29","first-page":"1312","article-title":"Universal value function approximators","volume-title":"Proc. 32nd Int. Conf. Mach. Learn.","author":"Schaul"},{"key":"ref30","first-page":"1","article-title":"Visual reinforcement learning with imagined goals","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Nair"},{"key":"ref31","first-page":"14843","article-title":"Planning with goal-conditioned policies","volume-title":"Proc. 33rd Int. Conf. Neural Inf. Process. Syst.","author":"Nasiriany"},{"key":"ref32","first-page":"5048","article-title":"Hindsight experience replay","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Andrychowicz"},{"key":"ref33","first-page":"1","article-title":"Learning subgoal representations with slow dynamics","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Li"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11741"},{"key":"ref35","first-page":"1922","article-title":"Landmark based reward shaping in reinforcement learning with hidden states","volume-title":"Proc. 18th Int. Conf. Auto. Agents MultiAgent Syst. (AAMAS)","author":"Demir"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/IS.2008.4670492"},{"key":"ref37","first-page":"1409","article-title":"Hierarchical reinforcement learning with advantage-based auxiliary rewards","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Li"},{"key":"ref38","first-page":"8411","article-title":"Learning from trajectories via subgoal discovery","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Paul"},{"key":"ref39","first-page":"792","article-title":"Principled methods for advising reinforcement learning agents","volume-title":"Proc. 28th Int. Conf. Mach. Learn. (ICML)","author":"Wiewiora"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICMLA.2009.33"},{"key":"ref41","first-page":"440","article-title":"The influence of reward on the speed of reinforcement learning: An analysis of shaping","volume-title":"Proc. 20th Int. Conf. Int. Conf. Mach. Learn.","author":"Laud"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.2200\/s00268ed1v01y201005aim009"},{"key":"ref43","first-page":"1057","article-title":"Policy gradient methods for reinforcement learning with function approximation","volume-title":"Advances in Neural Information Processing Systems","volume":"12","author":"Sutton","year":"2000"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"ref45","first-page":"1008","article-title":"Actor-critic algorithms","volume-title":"Advances in Neural Information Processing Systems","volume":"12","author":"Konda","year":"2000"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1190"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-87536-9_37"},{"key":"ref48","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton","year":"2018"},{"key":"ref49","first-page":"1633","article-title":"Transfer learning for reinforcement learning domains: A survey","volume":"10","author":"Taylor","year":"2009","journal-title":"J. Mach. Learn. Res."},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1145\/1273496.1273607"},{"key":"ref51","volume-title":"JS-Star 2012","author":"Tanaka","year":"2012"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v25i1.7903"},{"key":"ref53","article-title":"OpenAI gym","author":"Brockman","year":"2016","journal-title":"arXiv:1606.01540"},{"key":"ref54","first-page":"1","article-title":"Continuous control with deep reinforcement learning","volume-title":"Proc. ICLR","author":"Lillicrap"},{"key":"ref55","volume-title":"OpenAI Baselines","author":"Dhariwal","year":"2017"},{"key":"ref56","first-page":"1","article-title":"Exploration by random network distillation","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Burda"},{"key":"ref57","first-page":"8024","article-title":"PyTorch: An imperative style, high-performance deep learning library","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Paszke"},{"key":"ref58","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"},{"key":"ref59","article-title":"Deep learning using rectified linear units (ReLU)","author":"Agarap","year":"2018","journal-title":"arXiv:1803.08375"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1137\/0330046"},{"key":"ref61","first-page":"115","article-title":"Making a science of model search: Hyperparameter optimization in hundreds of dimensions for vision architectures","volume-title":"Proc. 30th Int. Conf. Mach. Learn.","volume":"28","author":"Bergstra"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330701"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1145\/1597735.1597738"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/10005208\/10047888.pdf?arnumber=10047888","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,13]],"date-time":"2024-02-13T15:22:02Z","timestamp":1707837722000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10047888\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"references-count":63,"URL":"https:\/\/doi.org\/10.1109\/access.2023.3246267","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]}}}