{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T03:21:04Z","timestamp":1740108064768,"version":"3.37.3"},"reference-count":37,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2023,11,11]],"date-time":"2023-11-11T00:00:00Z","timestamp":1699660800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,11,11]],"date-time":"2023-11-11T00:00:00Z","timestamp":1699660800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62273002"],"award-info":[{"award-number":["62273002"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61873113"],"award-info":[{"award-number":["61873113"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Neural Comput &amp; Applic"],"published-print":{"date-parts":[[2024,1]]},"DOI":"10.1007\/s00521-023-09112-9","type":"journal-article","created":{"date-parts":[[2023,11,11]],"date-time":"2023-11-11T15:01:31Z","timestamp":1699714891000},"page":"1429-1447","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["DRL-dEWMA: a composite framework for run-to-run control in the semiconductor manufacturing process"],"prefix":"10.1007","volume":"36","author":[{"given":"Zhu","family":"Ma","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0993-3937","authenticated-orcid":false,"given":"Tianhong","family":"Pan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,11,11]]},"reference":[{"issue":"2","key":"9112_CR1","doi-asserted-by":"publisher","first-page":"305","DOI":"10.3390\/pr9020305","volume":"9","author":"P Espadinha-Cruz","year":"2021","unstructured":"Espadinha-Cruz P, Godina R, Rodrigues EM (2021) A review of data mining applications in semiconductor manufacturing. Processes 9(2):305","journal-title":"Processes"},{"key":"9112_CR2","doi-asserted-by":"crossref","unstructured":"Moyne J, Del\u00a0Castillo E, Hurwitz AM (2018) Run-to-run control in semiconductor manufacturing, CRC press","DOI":"10.1201\/9781420040661"},{"key":"9112_CR3","doi-asserted-by":"publisher","first-page":"107","DOI":"10.1016\/j.isatra.2018.09.005","volume":"83","author":"K Liu","year":"2018","unstructured":"Liu K, Chen Y, Zhang T, Tian S, Zhang X (2018) A survey of run-to-run control for batch processes. ISA Trans 83:107\u2013125","journal-title":"ISA Trans"},{"issue":"2","key":"9112_CR4","doi-asserted-by":"publisher","first-page":"154","DOI":"10.1109\/TSM.2019.2897828","volume":"32","author":"HY Wang","year":"2019","unstructured":"Wang HY, Pan TH, Wong DS-H, Tan F (2019) An extended state observer-based run to run control for semiconductor manufacturing processes. IEEE Trans Semicond Manuf 32(2):154\u2013162","journal-title":"IEEE Trans Semicond Manuf"},{"issue":"8","key":"9112_CR5","doi-asserted-by":"publisher","first-page":"4975","DOI":"10.1109\/TII.2019.2957145","volume":"16","author":"M Khakifirooz","year":"2019","unstructured":"Khakifirooz M, Chien C-F, Fathi M, Pardalos PM (2019) Minimax optimization for recipe management in high-mixed semiconductor lithography process. IEEE Trans Industr Inf 16(8):4975\u20134985","journal-title":"IEEE Trans Industr Inf"},{"issue":"4","key":"9112_CR6","doi-asserted-by":"publisher","first-page":"1846","DOI":"10.1109\/TASE.2020.3021949","volume":"18","author":"S-KS Fan","year":"2020","unstructured":"Fan S-KS, Jen C-H, Hsu C-Y, Liao Y-L (2020) A new double exponentially weighted moving average run-to-run control using a disturbance-accumulating strategy for mixed-product mode. IEEE Trans Autom Sci Eng 18(4):1846\u20131860","journal-title":"IEEE Trans Autom Sci Eng"},{"issue":"3","key":"9112_CR7","doi-asserted-by":"publisher","first-page":"387","DOI":"10.1109\/TSM.2021.3096787","volume":"34","author":"Z Zhong","year":"2021","unstructured":"Zhong Z, Wang A, Kim H, Paynabar K, Shi J (2021) Adaptive cautious regularized run-to-run controller for lithography process. IEEE Trans Semicond Manuf 34(3):387\u2013397","journal-title":"IEEE Trans Semicond Manuf"},{"key":"9112_CR8","doi-asserted-by":"publisher","DOI":"10.1016\/j.compchemeng.2022.108044","volume":"168","author":"M Tom","year":"2022","unstructured":"Tom M, Yun S, Wang H, Ou F, Orkoulas G, Christofides PD (2022) Machine learning-based run-to-run control of a spatial thermal atomic layer etching reactor. Comput Chem Eng 168:108044","journal-title":"Comput Chem Eng"},{"issue":"23","key":"9112_CR9","doi-asserted-by":"publisher","first-page":"8062","DOI":"10.1080\/00207543.2022.2164088","volume":"61","author":"L Chen","year":"2023","unstructured":"Chen L, Chu L, Ge C, Zhang Y (2023) A general tool-based multi-product model for high-mixed production in semiconductor manufacturing. Int J Product Res 61(23):8062\u20138079. https:\/\/doi.org\/10.1080\/00207543.2022.2164088","journal-title":"Int J Product Res"},{"issue":"5","key":"9112_CR10","doi-asserted-by":"publisher","first-page":"806","DOI":"10.1109\/TCPMT.2017.2691283","volume":"7","author":"Q Gong","year":"2017","unstructured":"Gong Q, Yang G, Pan C, Chen Y, Lee M (2017) Performance analysis of double EWMA controller under dynamic models with drift. IEEE Trans Components Pack Manuf Technol 7(5):806\u2013814","journal-title":"IEEE Trans Components Pack Manuf Technol"},{"issue":"6","key":"9112_CR11","doi-asserted-by":"publisher","first-page":"473","DOI":"10.1016\/j.omega.2004.03.003","volume":"32","author":"C-T Su","year":"2004","unstructured":"Su C-T, Hsu C-C (2004) A time-varying weights tuning method of the double EWMA controller. Omega 32(6):473\u2013480","journal-title":"Omega"},{"issue":"4","key":"9112_CR12","doi-asserted-by":"publisher","first-page":"564","DOI":"10.1016\/j.jprocont.2011.01.004","volume":"21","author":"W Wu","year":"2011","unstructured":"Wu W, Maa C-Y (2011) Double EWMA controller using neural network-based tuning algorithm for mimo non-squared systems. J Process Control 21(4):564\u2013572","journal-title":"J Process Control"},{"issue":"6","key":"9112_CR13","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1109\/MSP.2017.2743240","volume":"34","author":"K Arulkumaran","year":"2017","unstructured":"Arulkumaran K, Deisenroth MP, Brundage M, Bharath AA (2017) Deep reinforcement learning: a brief survey. IEEE Signal Process Mag 34(6):26\u201338","journal-title":"IEEE Signal Process Mag"},{"key":"9112_CR14","unstructured":"Ziya T, Karakose M (2020) Comparative study for deep reinforcement learning with cnn, rnn, and lstm in autonomous navigation. In: 2020 International conference on data analytics for business and industry: way towards a sustainable economy (ICDABI), IEEE, pp. 1\u20135"},{"issue":"2","key":"9112_CR15","doi-asserted-by":"publisher","first-page":"202","DOI":"10.1109\/TNN.2008.2005134","volume":"20","author":"P Arena","year":"2009","unstructured":"Arena P, Fortuna L, Frasca M, Patan\u00e9 L (2009) Learning anticipation via spiking networks: application to navigation control. IEEE Trans Neural Networks 20(2):202\u2013216","journal-title":"IEEE Trans Neural Networks"},{"key":"9112_CR16","unstructured":"Tang G, Kumar N, Yoo R, Michmizos K (2021) Deep reinforcement learning with population-coded spiking neural network for continuous control. In: Conference on robot learning, PMLR, pp. 2016\u20132029"},{"key":"9112_CR17","doi-asserted-by":"publisher","first-page":"5409","DOI":"10.1007\/s00521-020-05352-1","volume":"33","author":"Z Song","year":"2021","unstructured":"Song Z, Yang J, Mei X, Tao T, Xu M (2021) Deep reinforcement learning for permanent magnet synchronous motor speed control systems. Neural Comput Appl 33:5409\u20135418","journal-title":"Neural Comput Appl"},{"key":"9112_CR18","first-page":"1","volume":"35","author":"D Song","year":"2022","unstructured":"Song D, Gan W, Yao P, Zang W, Qu X (2022) Surface path tracking method of autonomous surface underwater vehicle based on deep reinforcement learning. Neural Comput Appl 35:1\u201321","journal-title":"Neural Comput Appl"},{"issue":"10","key":"9112_CR19","doi-asserted-by":"publisher","DOI":"10.1002\/aic.16689","volume":"65","author":"S Spielberg","year":"2019","unstructured":"Spielberg S, Tulsyan A, Lawrence NP, Loewen PD, Bhushan Gopaluni R (2019) Toward self-driving processes: a deep reinforcement learning approach to control. AIChE Journal 65(10):e16689","journal-title":"AIChE Journal"},{"key":"9112_CR20","unstructured":"Fujimoto S, Hoof H, Meger D (2018) Addressing function approximation error in actor-critic methods. In: International conference on machine learning, PMLR, pp. 1587\u20131596"},{"key":"9112_CR21","doi-asserted-by":"publisher","DOI":"10.1016\/j.compchemeng.2020.106886","volume":"139","author":"R Nian","year":"2020","unstructured":"Nian R, Liu J, Huang B (2020) A review on reinforcement learning: introduction and applications in industrial process control. Comput Chem Eng 139:106886","journal-title":"Comput Chem Eng"},{"issue":"9","key":"9112_CR22","doi-asserted-by":"publisher","first-page":"2028","DOI":"10.1002\/cjce.24508","volume":"100","author":"D Dutta","year":"2022","unstructured":"Dutta D, Upreti SR (2022) A survey and comparative evaluation of actor-critic methods in process control. Can J Chem Eng 100(9):2028\u20132056","journal-title":"Can J Chem Eng"},{"issue":"13","key":"9112_CR23","doi-asserted-by":"publisher","first-page":"4316","DOI":"10.1080\/00207543.2021.1973138","volume":"60","author":"M Panzer","year":"2022","unstructured":"Panzer M, Bender B (2022) Deep reinforcement learning in production systems: a systematic literature review. Int J Prod Res 60(13):4316\u20134341","journal-title":"Int J Prod Res"},{"key":"9112_CR24","doi-asserted-by":"publisher","DOI":"10.1016\/j.compind.2022.103748","volume":"143","author":"J Deng","year":"2022","unstructured":"Deng J, Sierla S, Sun J, Vyatkin V (2022) Reinforcement learning for industrial process control: a case study in flatness control in steel industry. Comput Ind 143:103748","journal-title":"Comput Ind"},{"key":"9112_CR25","doi-asserted-by":"publisher","first-page":"75","DOI":"10.1016\/j.cirpj.2022.11.003","volume":"40","author":"C Li","year":"2023","unstructured":"Li C, Zheng P, Yin Y, Wang B, Wang L (2023) Deep reinforcement learning in smart manufacturing: a review and prospects. CIRP J Manuf Sci Technol 40:75\u2013101","journal-title":"CIRP J Manuf Sci Technol"},{"issue":"4","key":"9112_CR26","doi-asserted-by":"publisher","first-page":"3609","DOI":"10.1109\/TIE.2020.2979561","volume":"68","author":"M Gheisarnejad","year":"2020","unstructured":"Gheisarnejad M, Khooban MH (2020) An intelligent non-integer PID controller-based deep reinforcement learning: Implementation and experimental results. IEEE Trans Industr Electron 68(4):3609\u20133618","journal-title":"IEEE Trans Industr Electron"},{"key":"9112_CR27","doi-asserted-by":"publisher","DOI":"10.1016\/j.conengprac.2021.105046","volume":"121","author":"NP Lawrence","year":"2022","unstructured":"Lawrence NP, Forbes MG, Loewen PD, McClement DG, Backstr\u00f6m JU, Gopaluni RB (2022) Deep reinforcement learning with shallow controllers: an experimental application to PID tuning. Control Eng Pract 121:105046","journal-title":"Control Eng Pract"},{"issue":"3","key":"9112_CR28","doi-asserted-by":"publisher","first-page":"2347","DOI":"10.1007\/s00521-022-07710-7","volume":"35","author":"R Shalaby","year":"2023","unstructured":"Shalaby R, El-Hossainy M, Abo-Zalam B, Mahmoud TA (2023) Optimal fractional-order PID controller based on fractional-order actor-critic algorithm. Neural Comput Appl 35(3):2347\u20132380","journal-title":"Neural Comput Appl"},{"key":"9112_CR29","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1016\/j.neucom.2021.06.096","volume":"484","author":"H Qin","year":"2022","unstructured":"Qin H, Tan P, Chen Z, Sun M, Sun Q (2022) Deep reinforcement learning based active disturbance rejection control for ship course control. Neurocomputing 484:99\u2013108","journal-title":"Neurocomputing"},{"key":"9112_CR30","doi-asserted-by":"publisher","DOI":"10.1016\/j.oceaneng.2022.110631","volume":"247","author":"Y Zheng","year":"2022","unstructured":"Zheng Y, Tao J, Sun Q, Sun H, Chen Z, Sun M, Xie G (2022) Soft actor-critic based active disturbance rejection path following control for unmanned surface vessel under wind and wave disturbances. Ocean Eng 247:110631","journal-title":"Ocean Eng"},{"issue":"3","key":"9112_CR31","doi-asserted-by":"publisher","first-page":"454","DOI":"10.1109\/TSM.2020.3002896","volume":"33","author":"J Yu","year":"2020","unstructured":"Yu J, Guo P (2020) Run-to-run control of chemical mechanical polishing process based on deep reinforcement learning. IEEE Trans Semicond Manuf 33(3):454\u2013465","journal-title":"IEEE Trans Semicond Manuf"},{"key":"9112_CR32","doi-asserted-by":"crossref","unstructured":"Ma Z, Pan T (2021) A quota-ddpg controller for run-to-run control. In: China automation congress (CAC). IEEE 2021: 2515\u20132519","DOI":"10.1109\/CAC53003.2021.9728433"},{"key":"9112_CR33","doi-asserted-by":"publisher","first-page":"19337","DOI":"10.1007\/s00521-023-08760-1","volume":"35","author":"Z Ma","year":"2023","unstructured":"Ma Z, Pan T (2023) Distributional reinforcement learning for run-to-run control in semiconductor manufacturing processes. Neural Comput Appl 35:19337\u201319350. https:\/\/doi.org\/10.1007\/s00521-023-08760-1","journal-title":"Neural Comput Appl"},{"key":"9112_CR34","unstructured":"Li Y, Du J, Jiang W (2021) Reinforcement learning for process control with application in semiconductor manufacturing. arXiv preprint arXiv:2110.11572"},{"issue":"1","key":"9112_CR35","doi-asserted-by":"publisher","first-page":"91","DOI":"10.1109\/TSM.2022.3225480","volume":"36","author":"Z Ma","year":"2022","unstructured":"Ma Z, Pan T (2022) Adaptive weight tuning of EWMA controller via model-free deep reinforcement learning. IEEE Trans Semicond Manuf 36(1):91\u201399","journal-title":"IEEE Trans Semicond Manuf"},{"issue":"3","key":"9112_CR36","doi-asserted-by":"publisher","first-page":"381","DOI":"10.1080\/00401706.2016.1228547","volume":"59","author":"S-T Tseng","year":"2017","unstructured":"Tseng S-T, Chen P-Y (2017) A generalized quasi-MMSE controller for run-to-run dynamic models. Technometrics 59(3):381\u2013390","journal-title":"Technometrics"},{"issue":"12","key":"9112_CR37","doi-asserted-by":"publisher","first-page":"1157","DOI":"10.1080\/07408179908969916","volume":"31","author":"ED Castillo","year":"1999","unstructured":"Castillo ED (1999) Long run and transient analysis of a double EWMA feedback controller. IIE Trans 31(12):1157\u20131169","journal-title":"IIE Trans"}],"container-title":["Neural Computing and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00521-023-09112-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00521-023-09112-9\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00521-023-09112-9.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,5]],"date-time":"2024-01-05T08:11:33Z","timestamp":1704442293000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00521-023-09112-9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,11]]},"references-count":37,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2024,1]]}},"alternative-id":["9112"],"URL":"https:\/\/doi.org\/10.1007\/s00521-023-09112-9","relation":{},"ISSN":["0941-0643","1433-3058"],"issn-type":[{"type":"print","value":"0941-0643"},{"type":"electronic","value":"1433-3058"}],"subject":[],"published":{"date-parts":[[2023,11,11]]},"assertion":[{"value":"28 March 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 October 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 November 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this manuscript.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}