{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,10]],"date-time":"2026-05-10T16:18:09Z","timestamp":1778429889599,"version":"3.51.4"},"reference-count":35,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"11","license":[{"start":{"date-parts":[[2020,11,1]],"date-time":"2020-11-01T00:00:00Z","timestamp":1604188800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,11,1]],"date-time":"2020-11-01T00:00:00Z","timestamp":1604188800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,11,1]],"date-time":"2020-11-01T00:00:00Z","timestamp":1604188800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["91746118"],"award-info":[{"award-number":["91746118"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Shenzhen Municipal Science and Technology Innovation Committee Basic Research","award":["JCYJ20170410172224515"],"award-info":[{"award-number":["JCYJ20170410172224515"]}]},{"name":"Shenzhen Science and Technology Innovation Committee","award":["ZDSYS20170725140921348"],"award-info":[{"award-number":["ZDSYS20170725140921348"]}]},{"name":"Robotic Discipline Development Fund","award":["2016-1418"],"award-info":[{"award-number":["2016-1418"]}]},{"name":"Shenzhen Institute of Artificial Intelligence and Robotics for Society"},{"name":"ARC Research Hub on Integrated Energy Storage Solutions","award":["IH180100020"],"award-info":[{"award-number":["IH180100020"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Ind. Inf."],"published-print":{"date-parts":[[2020,11]]},"DOI":"10.1109\/tii.2020.2974037","type":"journal-article","created":{"date-parts":[[2020,2,14]],"date-time":"2020-02-14T21:30:56Z","timestamp":1581715856000},"page":"6912-6921","source":"Crossref","is-referenced-by-count":152,"title":["Cooperative Wind Farm Control With Deep Reinforcement Learning and Knowledge-Assisted Learning"],"prefix":"10.1109","volume":"16","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3133-3137","authenticated-orcid":false,"given":"Huan","family":"Zhao","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5446-2655","authenticated-orcid":false,"given":"Junhua","family":"Zhao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8507-0558","authenticated-orcid":false,"given":"Jing","family":"Qiu","sequence":"additional","affiliation":[]},{"given":"Gaoqi","family":"Liang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9659-0858","authenticated-orcid":false,"given":"Zhao Yang","family":"Dong","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICMLA.2016.0051"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2017.7989385"},{"key":"ref31","first-page":"387","article-title":"Deterministic policy gradient algorithms","author":"silver","year":"2014","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref30","first-page":"3675","article-title":"Hierarchical deep reinforcement learning: Integrating temporal abstraction and intrinsic motivation","author":"kulkarni","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ROBOT.1997.606886"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.23919\/ChiCC.2018.8482853"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2013.6579907"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.enconman.2015.05.031"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2015.11.064"},{"key":"ref13","author":"sutton","year":"2018","journal-title":"Reinforcement Learning An Introduction"},{"key":"ref14","article-title":"Playing Atari with deep reinforcement learning","author":"mnih","year":"2013"},{"key":"ref15","article-title":"Continuous control with deep reinforcement learning","author":"lillicrap","year":"2015"},{"key":"ref16","first-page":"1437","article-title":"A comprehensive survey on safe reinforcement learning","volume":"16","author":"garc?a","year":"2015","journal-title":"J Mach Learn Res"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1613\/jair.301"},{"key":"ref18","first-page":"213","article-title":"R-max &#x2013; A general polynomial time algorithm for near-optimal reinforcement learning","volume":"3","author":"brafman","year":"2002","journal-title":"J Mach Learn Res"},{"key":"ref19","article-title":"Actuator disc methods applied to wind turbines","author":"mikkelsen","year":"2003"},{"key":"ref28","first-page":"2094","article-title":"Deep reinforcement learning with double q-learning","author":"van hasselt","year":"2016","journal-title":"Proc 13th AAAI Conf Artif Intell"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2015.2395384"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCC.2011.2106494"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2013.2272888"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2015.2447508"},{"key":"ref29","first-page":"29","article-title":"Deep recurrent q-learning for partially observable MDPs","author":"hausknecht","year":"2015","journal-title":"Proc AAAI Fall Symp Series"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TIA.2015.2394435"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ACC.2014.6858970"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2015.2420792"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TSTE.2017.2774508"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TCST.2013.2257780"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.23919\/ACC.2017.7962923"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/0167-6105(80)90042-2"},{"key":"ref22","first-page":"407","article-title":"A simple model for cluster efficiency","author":"katic","year":"1986","journal-title":"Proc Eur Wind Energy Conf and Exhib"},{"key":"ref21","article-title":"Actuator line modeling of wind turbine wakes","author":"troldborg","year":"2009"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1002\/we.458"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1088\/1742-6596\/753\/3\/032005"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"ref25","article-title":"Deep reinforcement learning: An overview","author":"li","year":"2017"}],"container-title":["IEEE Transactions on Industrial Informatics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9424\/9153960\/08999726.pdf?arnumber=8999726","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T16:58:54Z","timestamp":1651078734000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8999726\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,11]]},"references-count":35,"journal-issue":{"issue":"11"},"URL":"https:\/\/doi.org\/10.1109\/tii.2020.2974037","relation":{},"ISSN":["1551-3203","1941-0050"],"issn-type":[{"value":"1551-3203","type":"print"},{"value":"1941-0050","type":"electronic"}],"subject":[],"published":{"date-parts":[[2020,11]]}}}