{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,28]],"date-time":"2026-02-28T18:29:45Z","timestamp":1772303385418,"version":"3.50.1"},"reference-count":56,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,1]],"date-time":"2024-11-01T00:00:00Z","timestamp":1730419200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62022094"],"award-info":[{"award-number":["62022094"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61873350"],"award-info":[{"award-number":["61873350"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Zhejiang Laboratory","award":["2021NB0AB01"],"award-info":[{"award-number":["2021NB0AB01"]}]},{"DOI":"10.13039\/501100004735","name":"Hunan Provincial Natural Science Foundation of China","doi-asserted-by":"publisher","award":["2020JJ2049"],"award-info":[{"award-number":["2020JJ2049"]}],"id":[{"id":"10.13039\/501100004735","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2024,11]]},"DOI":"10.1109\/tnnls.2023.3289315","type":"journal-article","created":{"date-parts":[[2023,7,7]],"date-time":"2023-07-07T18:37:01Z","timestamp":1688755021000},"page":"15735-15744","source":"Crossref","is-referenced-by-count":37,"title":["Human-in-the-Loop Reinforcement Learning in Continuous-Action Space"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3353-2586","authenticated-orcid":false,"given":"Biao","family":"Luo","sequence":"first","affiliation":[{"name":"School of Automation, Central South University, Changsha, China"}]},{"given":"Zhengke","family":"Wu","sequence":"additional","affiliation":[{"name":"School of Automation, Central South University, Changsha, China"}]},{"given":"Fei","family":"Zhou","sequence":"additional","affiliation":[{"name":"School of Automation, Central South University, Changsha, China"}]},{"given":"Bing-Chuan","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Automation, Central South University, Changsha, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.13140\/RG.2.2.18893.74727"},{"key":"ref3","article-title":"Dota 2 with large scale deep reinforcement learning","author":"Berner","year":"2019","journal-title":"arXiv:1912.06680"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.artint.2021.103535"},{"key":"ref6","first-page":"2546","article-title":"Reward is enough for convex MDPs","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NeurIPS)","author":"Zahavy"},{"key":"ref7","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Haarnoja"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2021.3055499"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2927227"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2743240"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2891792"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2021.3087733"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2008.10.024"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/884"},{"key":"ref16","first-page":"216","article-title":"Feature expansive reward learning: Rethinking human input","volume-title":"Proc. 16th ACM\/IEEE Int. Conf. Hum.-Robot Interact. (HRI)","author":"Bobu"},{"key":"ref17","first-page":"4299","article-title":"Deep reinforcement learning from human preferences","volume-title":"Proc. Neural Inf. Process. Syst. (NeurIPS)","author":"Christiano"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/THMS.2019.2912447"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/tnnls.2021.3106818"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/thms.2022.3163185"},{"key":"ref21","article-title":"Accelerating the learning of TAMER with counterfactual explanations","author":"Karalus","year":"2021","journal-title":"arXiv:2108.01358"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-33950-0_31"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/s10846-018-0839-z"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2013.6630809"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/1329125.1329407"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-72348-6_6"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/DEVLRN.2008.4640845"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICORR.2011.5975338"},{"key":"ref29","first-page":"663","article-title":"Algorithms for inverse reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Ng"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/THMS.2016.2558630"},{"key":"ref31","first-page":"6152","article-title":"PEBBLE: Feedback-efficient interactive reinforcement learning via relabeling experience and unsupervised pre-training","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","volume":"139","author":"Lee"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/THMS.2021.3116119"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1145\/1597735.1597738"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/s12369-012-0163-x"},{"key":"ref35","article-title":"DQN-TAMER: Human-in-the-loop reinforcement learning with intractable feedback","author":"Arakawa","year":"2018","journal-title":"arXiv:1810.11748"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11485"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.15607\/RSS.2019.XV.023"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3084198"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794412"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2020.3004787"},{"key":"ref41","first-page":"1053","article-title":"Teaching on a budget: Agents advising agents in reinforcement learning","volume-title":"Proc. Int. Conf. Auto. Agents Multi-Agent Syst. (AAMAS)","author":"Torrey"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.6036"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2019.8794328"},{"key":"ref44","first-page":"2625","article-title":"Policy shaping: Integrating human feedback with reinforcement learning","volume-title":"Proc. Neural Inf. Process. Syst. (NeurIPS)","author":"Griffith"},{"key":"ref45","first-page":"804","article-title":"Interactive teaching strategies for agent training","volume-title":"Proc. Int. Joint Conf. Artif. Intell. (IJCAI)","author":"Amir"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1145\/2449396.2449422"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1145\/1390156.1390286"},{"key":"ref48","first-page":"627","article-title":"A reduction of imitation learning and structured prediction to no-regret online learning","volume-title":"Proc. 14th Int. Conf. Artif. Intell. Statist.","author":"Ross"},{"key":"ref49","first-page":"920","article-title":"Generative adversarial imitation learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"29","author":"Ho"},{"key":"ref50","article-title":"Learning robust rewards with adversarial inverse reinforcement learning","author":"Fu","year":"2017","journal-title":"arXiv:1710.11248"},{"key":"ref51","first-page":"1259","article-title":"A divergence minimization perspective on imitation learning methods","volume-title":"Proc. Conf. Robot Learn.","author":"Ghasemipour"},{"key":"ref52","first-page":"1582","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn., (ICML)","author":"Fujimoto"},{"key":"ref53","first-page":"331","article-title":"Markov decision processes","volume-title":"Handbooks in Operations Research and Management Science","volume":"2","author":"Puterman"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1145\/3472291"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2021\/634"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/s11390-020-9487-4"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10737991\/10175618.pdf?arnumber=10175618","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T19:25:36Z","timestamp":1732735536000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10175618\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11]]},"references-count":56,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2023.3289315","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11]]}}}