{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T18:30:01Z","timestamp":1775154601511,"version":"3.50.1"},"reference-count":36,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T00:00:00Z","timestamp":1669852800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T00:00:00Z","timestamp":1669852800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,12,1]],"date-time":"2022-12-01T00:00:00Z","timestamp":1669852800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62072478"],"award-info":[{"award-number":["62072478"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Cybern."],"published-print":{"date-parts":[[2022,12]]},"DOI":"10.1109\/tcyb.2021.3121312","type":"journal-article","created":{"date-parts":[[2021,11,3]],"date-time":"2021-11-03T19:26:15Z","timestamp":1635967575000},"page":"13902-13914","source":"Crossref","is-referenced-by-count":116,"title":["An Accurate GRU-Based Power Time-Series Prediction Approach With Selective State Updating and Stochastic Optimization"],"prefix":"10.1109","volume":"52","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3109-2502","authenticated-orcid":false,"given":"Wendong","family":"Zheng","sequence":"first","affiliation":[{"name":"School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4234-1359","authenticated-orcid":false,"given":"Gang","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, Sun Yat-sen University, Guangzhou, China"}]}],"member":"263","reference":[{"key":"ref33","first-page":"928","article-title":"Online convex programming and generalized infinitesimal gradient ascent","author":"zinkevich","year":"2003","journal-title":"Proc 20th Int Conf Mach Learn (ICML)"},{"key":"ref32","author":"ghodke","year":"2018","journal-title":"California Renwable Production"},{"key":"ref31","first-page":"2394","article-title":"A statistical investigation of long memory in language and music","volume":"97","author":"greaves-tunnell","year":"2019","journal-title":"Proc 36th Int Conf Mach Learn (ICML)"},{"key":"ref30","author":"chung","year":"2014","journal-title":"Empirical evaluation of gated recurrent neural networks on sequence modeling"},{"key":"ref36","year":"2018","journal-title":"Machine Learning Household Electric Power Consumption"},{"key":"ref35","author":"jhana","year":"2018","journal-title":"Hourly energy demand generation and weather"},{"key":"ref34","author":"mulla","year":"2018","journal-title":"Hourly Energy Consumption"},{"key":"ref10","first-page":"1","article-title":"Skip RNN: Learning to skip state updates in recurrent neural networks","author":"campos","year":"2018","journal-title":"Proc 6th Int Conf Learn Represent (ICLR)"},{"key":"ref11","first-page":"11365","article-title":"Do RNN and LSTM have long memory","author":"zhao","year":"2020","journal-title":"Proc 36th Int Conf Mach Learn"},{"key":"ref12","first-page":"1","article-title":"On the convergence of ADAM and beyond","author":"reddi","year":"2018","journal-title":"Proc 6th Int Conf Learn Represent (ICLR)"},{"key":"ref13","first-page":"1","article-title":"Adaptive gradient methods with dynamic bound of learning rate","author":"luo","year":"2019","journal-title":"Proc 7th Int Conf Learn Represent (ICLR)"},{"key":"ref14","first-page":"1","article-title":"Adversarial sparse transformer for time series forecasting","author":"wu","year":"2020","journal-title":"Proc Annu Conf Neural Inf Process Syst (NeurIPS)"},{"key":"ref15","first-page":"5244","article-title":"Enhancing the locality and breaking the memory bottleneck of transformer on time series forecasting","author":"li","year":"2019","journal-title":"Proc Annu Conf Neural Inf Process Syst (NIPS)"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2018.2832085"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2018.2789686"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2019.2945999"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2018.2863020"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/355"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICNSC.2019.8743312"},{"key":"ref27","first-page":"1","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2015","journal-title":"Proc 3rd Int Conf Learn Represent (ICLR)"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2020.01.031"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CCECE.2019.8861542"},{"key":"ref29","first-page":"1","article-title":"AdaShift: Decorrelation and convergence of adaptive learning rate methods","author":"zhou","year":"2019","journal-title":"Proc 7th Int Conf Learn Represent (ICLR)"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICMLA.2015.33"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/SMC.2016.7844673"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330858"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2018.2877510"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.future.2018.09.054"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v27i1.8480"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TCYB.2019.2903736"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1016\/j.apenergy.2020.114977"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/s00521-009-0331-6"},{"key":"ref24","first-page":"3882","article-title":"Phased LSTM: Accelerating recurrent network training for long or event-based sequences","author":"neil","year":"2016","journal-title":"Proc Adv Neural Inf Process Syst"},{"key":"ref23","first-page":"1","article-title":"Zoneout: Regularizing RNNs by randomly preserving hidden activations","author":"krueger","year":"2017","journal-title":"Proc 5th Int Conf Learn Represent (ICLR)"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/HPCC\/SmartCity\/DSS.2018.00062"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1111\/tgis.12644"}],"container-title":["IEEE Transactions on Cybernetics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6221036\/9954937\/09600449.pdf?arnumber=9600449","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,12]],"date-time":"2022-12-12T19:15:21Z","timestamp":1670872521000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9600449\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,12]]},"references-count":36,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tcyb.2021.3121312","relation":{},"ISSN":["2168-2267","2168-2275"],"issn-type":[{"value":"2168-2267","type":"print"},{"value":"2168-2275","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,12]]}}}