{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T05:02:34Z","timestamp":1770786154944,"version":"3.50.0"},"reference-count":32,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["52275099"],"award-info":[{"award-number":["52275099"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Ind. Electron."],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1109\/tie.2025.3600552","type":"journal-article","created":{"date-parts":[[2025,10,16]],"date-time":"2025-10-16T17:37:27Z","timestamp":1760636247000},"page":"2564-2575","source":"Crossref","is-referenced-by-count":0,"title":["ERL-Fill: An Emotion-Aware Reinforcement Learning Framework With Staged Pretraining for Gas\u2013Solid Flow Filling Control"],"prefix":"10.1109","volume":"73","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6280-8254","authenticated-orcid":false,"given":"Qihang","family":"Ma","sequence":"first","affiliation":[{"name":"School of Mechatronics Engineering, Harbin Institute of Technology, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8543-9455","authenticated-orcid":false,"given":"Gaoliang","family":"Peng","sequence":"additional","affiliation":[{"name":"School of Mechatronics Engineering, Harbin Institute of Technology, Harbin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1533-6979","authenticated-orcid":false,"given":"Wei","family":"Zhang","sequence":"additional","affiliation":[{"name":"College of Information Science and Technology, Eastern Institute of Technology, Ningbo, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-9977-1848","authenticated-orcid":false,"given":"Jinghan","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Mechatronics Engineering, Harbin Institute of Technology, Harbin, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.powtec.2019.07.073"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1002\/ange.202102009"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/jsen.2025.3548564"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1002\/adem.202001002"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1016\/j.addma.2022.102766"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/tmag.2015.2454500"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.jclepro.2023.139083"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/S0005-1098(98)00178-2"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/S0032-5910(00)00294-1"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.powtec.2017.08.064"},{"key":"ref11","article-title":"Safe reinforcement learning direct torque control for continuous control set permanent magnet synchronous motor drives","author":"Schenke","year":"2024","journal-title":"Authorea Preprints"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICMECT.2019.8932124"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.3390\/pr10122503"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/lra.2025.3544927"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/tmech.2025.3541797"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TIE.2022.3204966"},{"issue":"1","key":"ref17","doi-asserted-by":"crossref","first-page":"845","DOI":"10.1109\/TII.2023.3263274","article-title":"A data-driven reinforcement learning enabled battery fast charging optimization using real-world experimental data","volume":"20","author":"Li","year":"2023","journal-title":"IEEE Trans. Ind. Informat."},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3207346"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TSMC.2021.3098451"},{"key":"ref20","first-page":"1861","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","volume-title":"Proc. Int. Conf. Mach. Learn","author":"Haarnoja","year":"2018"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2018.2805379"},{"key":"ref22","first-page":"1587","article-title":"Addressing function approximation error in actor-critic methods","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fujimoto","year":"2018"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/LRA.2023.3342559"},{"key":"ref24","first-page":"1179","article-title":"Conservative Q-learning for offline reinforcement learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Kumar","year":"2020"},{"key":"ref25","first-page":"1","article-title":"The emotional foundations of human-agent interaction","volume":"129","author":"Cowie","year":"2019","journal-title":"Int. J. Human-Comput. Stud."},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.4018\/jse.2011010103"},{"issue":"4","key":"ref27","first-page":"585","article-title":"An emotion-inspired reward shaping approach for social human-robot interaction","volume":"45","author":"Kelley","year":"2021","journal-title":"Auton. Robots"},{"issue":"8","key":"ref28","first-page":"9052","article-title":"Emotion-driven reinforcement learning for safe exploration","volume-title":"Proc. AAAI Conf. Artif. Intell.","volume":"36","author":"Huang","year":"2022"},{"key":"ref29","first-page":"7487","article-title":"Stabilizing transformers for reinforcement learning","volume-title":"Proc. 37th Int. Conf. Mach. Learn. (ICML)","volume":"119","author":"Parisotto","year":"2020"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2909031"},{"key":"ref31","first-page":"2304","article-title":"Dynamic experience replay for efficient exploration","volume-title":"Proc. 27th Int. Joint Conf. Artif. Intell. (IJCAI)","author":"Luo","year":"2018"},{"key":"ref32","first-page":"1","article-title":"Preventing gradient attenuation in Lipschitz constrained convolutional networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NeurIPS)","volume":"32","author":"Li","year":"2019"}],"container-title":["IEEE Transactions on Industrial Electronics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/41\/11385824\/11205975.pdf?arnumber=11205975","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,10]],"date-time":"2026-02-10T21:05:34Z","timestamp":1770757534000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11205975\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2]]},"references-count":32,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tie.2025.3600552","relation":{},"ISSN":["0278-0046","1557-9948"],"issn-type":[{"value":"0278-0046","type":"print"},{"value":"1557-9948","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,2]]}}}