{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,21]],"date-time":"2025-11-21T06:30:13Z","timestamp":1763706613805,"version":"3.37.3"},"reference-count":46,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2023,12,1]],"date-time":"2023-12-01T00:00:00Z","timestamp":1701388800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,12,1]],"date-time":"2023-12-01T00:00:00Z","timestamp":1701388800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,12,1]],"date-time":"2023-12-01T00:00:00Z","timestamp":1701388800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2018AAA0101503"],"award-info":[{"award-number":["2018AAA0101503"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100013148","name":"Science and Technology Project of State Grid Corporation of China (SGCC): Fundamental Theory of Human-in-the-Loop Hybrid- Augmented Intelligence for Power Grid Dispatch and Control","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100013148","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Syst. Man Cybern, Syst."],"published-print":{"date-parts":[[2023,12]]},"DOI":"10.1109\/tsmc.2023.3296773","type":"journal-article","created":{"date-parts":[[2023,8,8]],"date-time":"2023-08-08T17:36:40Z","timestamp":1691516200000},"page":"7403-7414","source":"Crossref","is-referenced-by-count":2,"title":["Ask-AC: An Initiative Advisor-in-the-Loop Actor\u2013Critic Framework"],"prefix":"10.1109","volume":"53","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0584-9129","authenticated-orcid":false,"given":"Shunyu","family":"Liu","sequence":"first","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2492-5230","authenticated-orcid":false,"given":"Kaixuan","family":"Chen","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-5499-5388","authenticated-orcid":false,"given":"Na","family":"Yu","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3671-6521","authenticated-orcid":false,"given":"Jie","family":"Song","sequence":"additional","affiliation":[{"name":"College of Software Technology, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8640-8434","authenticated-orcid":false,"given":"Zunlei","family":"Feng","sequence":"additional","affiliation":[{"name":"College of Software Technology, Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2621-6048","authenticated-orcid":false,"given":"Mingli","family":"Song","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Hangzhou, China"}]}],"member":"263","reference":[{"volume-title":"Reinforcement Learning: An Introduction","year":"2018","author":"Sutton","key":"ref1"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.13140\/RG.2.2.18893.74727"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-019-1724-z"},{"key":"ref5","first-page":"1856","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Haarnoja"},{"key":"ref6","first-page":"1","article-title":"Continuous control with deep reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Lillicrap"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/817"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/884"},{"key":"ref9","first-page":"3366","article-title":"Policy shaping with human teachers","volume-title":"Proc. Int. Joint Conf. Artif. Intell.","author":"Cederborg"},{"key":"ref10","first-page":"2625","article-title":"Policy shaping: Integrating human feedback with reinforcement learning","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Griffith"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/1597735.1597738"},{"key":"ref12","first-page":"2285","article-title":"Interactive learning from policy-dependent human feedback","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"MacGlashan"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11485"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2020.3035406"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/331"},{"key":"ref16","article-title":"ProLoNets: Neural-encoding human experts\u2019 domain knowledge to warm start reinforcement learning","author":"Silva","year":"2019","journal-title":"arXiv:1902.06007"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-019-04509-x"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2020\/317"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2020.3000073"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.11396"},{"key":"ref21","first-page":"804","article-title":"Interactive teaching strategies for agent training","volume-title":"Proc. Int. Joint Conf. Artif. Intell.","author":"Amir"},{"key":"ref22","first-page":"1053","article-title":"Teaching on a budget: Agents advising agents in reinforcement learning","volume-title":"Proc. Int. Conf. Auton. Agents Multi-Agent Syst.","author":"Torrey"},{"key":"ref23","first-page":"1100","article-title":"Simultaneously learning and advising in multiagent reinforcement learning","volume-title":"Proc. Int. Conf. Auton. Agents Multi-Agent Syst.","author":"da Silva"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016128"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.6036"},{"key":"ref26","first-page":"1","article-title":"Actor-Mimic: Deep multitask and transfer reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Parisotto"},{"key":"ref27","first-page":"1","article-title":"Policy distillation","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Rusu"},{"key":"ref28","first-page":"4496","article-title":"Distral: Robust multitask reinforcement learning","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Teh"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10733"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2008.10.024"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11757"},{"key":"ref32","first-page":"4572","article-title":"Generative adversarial imitation learning","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Ho"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1145\/3054912"},{"key":"ref34","first-page":"2474","article-title":"Policy optimization with demonstrations","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"80","author":"Kang"},{"key":"ref35","first-page":"2859","article-title":"Learning from limited demonstrations","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Kim"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/687"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/530"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992696"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2020.3041775"},{"key":"ref40","first-page":"1928","article-title":"Asynchronous methods for deep reinforcement learning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mnih"},{"key":"ref41","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv:1707.06347"},{"volume-title":"Openai gym","year":"2016","author":"Brockman","key":"ref42"},{"volume-title":"Minimalistic gridworld environment for OpenAI gym","year":"2018","author":"Chevalier-Boisvert","key":"ref43"},{"key":"ref44","first-page":"1","article-title":"IQ-Learn: Inverse soft-Q learning for imitation","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Garg"},{"key":"ref45","first-page":"1","article-title":"What matters for adversarial imitation learning?","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Orsini"},{"key":"ref46","first-page":"1","article-title":"Imitation learning by reinforcement learning","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Ciosek"}],"container-title":["IEEE Transactions on Systems, Man, and Cybernetics: Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6221021\/10321815\/10210582.pdf?arnumber=10210582","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,4,13]],"date-time":"2024-04-13T04:50:30Z","timestamp":1712983830000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10210582\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12]]},"references-count":46,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tsmc.2023.3296773","relation":{},"ISSN":["2168-2216","2168-2232"],"issn-type":[{"type":"print","value":"2168-2216"},{"type":"electronic","value":"2168-2232"}],"subject":[],"published":{"date-parts":[[2023,12]]}}}