{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,15]],"date-time":"2026-04-15T20:17:46Z","timestamp":1776284266710,"version":"3.50.1"},"reference-count":9,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,7,3]],"date-time":"2025-07-03T00:00:00Z","timestamp":1751500800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,7,3]],"date-time":"2025-07-03T00:00:00Z","timestamp":1751500800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,7,3]]},"DOI":"10.1109\/icecet63943.2025.11471700","type":"proceedings-article","created":{"date-parts":[[2026,4,9]],"date-time":"2026-04-09T19:42:35Z","timestamp":1775763755000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["Improved PMSM Control using RL with a Modified Reward Function"],"prefix":"10.1109","author":[{"given":"Muhammad","family":"Azam","sequence":"first","affiliation":[{"name":"University of North Dakota,School of Electrical Engineering and Computer Science,Grand Forks,ND,USA"}]},{"given":"Hossein","family":"Salehfar","sequence":"additional","affiliation":[{"name":"University of North Dakota,School of Electrical Engineering and Computer Science,Grand Forks,ND,USA"}]}],"member":"263","reference":[{"issue":"6","key":"ref1","first-page":"4653","article-title":"Reinforcement Learning-Based Control for Permanent Magnet Synchronous Motors","volume-title":"IEEE Transactions on Industrial Electronics","volume":"66","author":"Zhang","year":"2019"},{"key":"ref2","first-page":"113497","article-title":"Reward Shaping for Speed Control of PMSM Using Deep Reinforcement Learning","volume-title":"IEEE Access","volume":"8","author":"Zhang","year":"2020"},{"issue":"9","key":"ref3","first-page":"2264","article-title":"A Deep Deterministic Policy Gradient Algorithm with Improved Reward Function for PMSM Drive","volume-title":"Energies","volume":"13","author":"Liu","year":"2020"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1201\/9781420014235"},{"issue":"8","key":"ref5","first-page":"1247","article-title":"Sensorless Control of Electric Motor Drives","volume-title":"Proceedings of the IEEE","volume":"90","author":"Rajashekara"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/icra.2018.8463189"},{"key":"ref7","first-page":"1587","article-title":"Addressing Function Approximation Error in Actor-Critic Methods","volume-title":"Proc. Int. Conf. Machine Learning (ICML)","author":"Fujimoto"},{"issue":"9","key":"ref8","first-page":"10019","article-title":"Deep Reinforcement Learning-Based Torque Control for PMSM Drives","volume-title":"IEEE Transactions on Power Electronics","volume":"36","author":"Jiang","year":"2021"},{"key":"ref9","article-title":"Train TD3 Agent for PMSM Control","volume-title":"MathWorks Documentation"}],"event":{"name":"2025 5th International Conference on Electrical, Computer and Energy Technologies (ICECET)","location":"Paris, France","start":{"date-parts":[[2025,7,3]]},"end":{"date-parts":[[2025,7,6]]}},"container-title":["2025 5th International Conference on Electrical, Computer and Energy Technologies (ICECET)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11471878\/11471697\/11471700.pdf?arnumber=11471700","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,15]],"date-time":"2026-04-15T19:24:08Z","timestamp":1776281048000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11471700\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,3]]},"references-count":9,"URL":"https:\/\/doi.org\/10.1109\/icecet63943.2025.11471700","relation":{},"subject":[],"published":{"date-parts":[[2025,7,3]]}}}