{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,30]],"date-time":"2026-01-30T05:55:19Z","timestamp":1769752519007,"version":"3.49.0"},"reference-count":45,"publisher":"Springer Science and Business Media LLC","issue":"8","license":[{"start":{"date-parts":[[2024,11,4]],"date-time":"2024-11-04T00:00:00Z","timestamp":1730678400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,4]],"date-time":"2024-11-04T00:00:00Z","timestamp":1730678400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"the National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["No.61873014"],"award-info":[{"award-number":["No.61873014"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"the National Key R&D Program Funded Projects of China","award":["2020AAA0109202"],"award-info":[{"award-number":["2020AAA0109202"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Intell Manuf"],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1007\/s10845-024-02513-0","type":"journal-article","created":{"date-parts":[[2024,11,4]],"date-time":"2024-11-04T04:20:08Z","timestamp":1730694008000},"page":"5779-5800","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Online simulation task scheduling in cloud manufacturing with cross attention and deep reinforcement learning"],"prefix":"10.1007","volume":"36","author":[{"given":"Zhen","family":"Chen","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1989-6102","authenticated-orcid":false,"given":"Lin","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Yuanjun","family":"Laili","sequence":"additional","affiliation":[]},{"given":"Xiaohan","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Fei","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,4]]},"reference":[{"issue":"11","key":"2513_CR1","doi-asserted-by":"publisher","first-page":"9069","DOI":"10.1016\/j.aej.2022.02.042","volume":"61","author":"MH Alabdullah","year":"2022","unstructured":"Alabdullah, M. H., & Abido, M. A. (2022). Microgrid energy management using deep q-network reinforcement learning. Alexandria Engineering Journal, 61(11), 9069\u20139078.","journal-title":"Alexandria Engineering Journal"},{"issue":"13","key":"2513_CR2","doi-asserted-by":"publisher","first-page":"11578","DOI":"10.1109\/JIOT.2021.3130474","volume":"9","author":"J Baek","year":"2021","unstructured":"Baek, J., & Kaddoum, G. (2021). Online partial offloading and task scheduling in SDN-FOG networks with deep recurrent reinforcement learning. IEEE Internet of Things Journal, 9(13), 11578\u201311589.","journal-title":"IEEE Internet of Things Journal"},{"key":"2513_CR3","doi-asserted-by":"crossref","unstructured":"Che, Y., Lin, F., & Liu, J. (2021). Deep reinforcement learning in M2M communication for resource scheduling. In 2021 World conference on computing and communication technologies (WCCCT) (pp. 97\u2013100). IEEE","DOI":"10.1109\/WCCCT52091.2021.00025"},{"key":"2513_CR4","unstructured":"Chen, X., Qu, G., Tang, Y., Low, S., & Li, N. (2021). Reinforcement learning for decision-making and control in power systems: Tutorial, review, and vision. arXiv preprint. arXiv:2102.01168"},{"issue":"11","key":"2513_CR5","doi-asserted-by":"publisher","first-page":"4253","DOI":"10.1109\/TCAD.2022.3197523","volume":"41","author":"Z Feng","year":"2022","unstructured":"Feng, Z., Zonghua, G., Haichuan, Yu., Deng, Q., & Niu, L. (2022). Online rerouting and rescheduling of time-triggered flows for fault tolerance in time-sensitive networking. IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 41(11), 4253\u20134264.","journal-title":"IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems"},{"key":"2513_CR6","doi-asserted-by":"publisher","DOI":"10.1016\/j.omega.2022.102643","volume":"111","author":"C Ferreira","year":"2022","unstructured":"Ferreira, C., Figueira, G., & Amorim, P. (2022). Effective and interpretable dispatching rules for dynamic job shops via guided empirical learning. Omega, 111, 102643.","journal-title":"Omega"},{"issue":"6","key":"2513_CR7","doi-asserted-by":"publisher","first-page":"965","DOI":"10.1080\/0305215X.2017.1391240","volume":"50","author":"K Gao","year":"2018","unstructured":"Gao, K., Wang, L., Luo, J., Jiang, H., Sadollah, A., & Pan, Q. (2018). Discrete harmony search algorithm for scheduling and rescheduling the reprocessing problems in remanufacturing: A case study. Engineering Optimization, 50(6), 965\u2013981.","journal-title":"Engineering Optimization"},{"key":"2513_CR8","doi-asserted-by":"crossref","unstructured":"Gu, D., Chen, J., Shi, X., Ran, L., Zhang, Y., & Shang, M. (2021). Heterogeneous-aware online cloud task scheduler based on clustering and deep reinforcement learning ensemble. In Advances in natural computation, fuzzy systems and knowledge discovery (pp. 152\u2013159). Springer.","DOI":"10.1007\/978-3-030-70665-4_18"},{"issue":"6","key":"2513_CR9","doi-asserted-by":"publisher","first-page":"7378","DOI":"10.3934\/mbe.2020377","volume":"17","author":"A Halty","year":"2020","unstructured":"Halty, A., S\u00e1nchez, R., V\u00e1zquez, V., Viana, V., Pineyro, P., & Rossit, D. A. (2020). Scheduling in cloud manufacturing systems: Recent systematic literature review. Mathematical Biosciences and Engineering, 17(6), 7378\u20137397. https:\/\/doi.org\/10.3934\/mbe.2020377","journal-title":"Mathematical Biosciences and Engineering"},{"key":"2513_CR10","unstructured":"Huang, S., & Onta\u00f1\u00f3n, S. (2020). A closer look at invalid action masking in policy gradient algorithms. arXiv preprint. arXiv:2006.14171"},{"issue":"3","key":"2513_CR11","doi-asserted-by":"publisher","first-page":"4232","DOI":"10.1109\/JSYST.2021.3122126","volume":"16","author":"Y Huang","year":"2022","unstructured":"Huang, Y., Cheng, L., Xue, L., Liu, C., Li, Y., Li, J., & Ward, T. (2022). Deep adversarial imitation reinforcement learning for QOS-aware cloud job scheduling. IEEE Systems Journal, 16(3), 4232\u20134242. https:\/\/doi.org\/10.1109\/JSYST.2021.3122126","journal-title":"IEEE Systems Journal"},{"issue":"4","key":"2513_CR12","doi-asserted-by":"publisher","first-page":"1041","DOI":"10.17762\/turcomat.v12i4.612","volume":"12","author":"I Mahmood","year":"2021","unstructured":"Mahmood, I., Sadeeq, M. A. M., Zeebaree, S. R. M., Shukur, H., Jacksi, K., Yasin, H., Radie, A. H., & Najat, Z. (2021). Task scheduling algorithms in cloud computing: A review. Turkish Journal of Computer and Mathematics Education (TURCOMAT), 12(4), 1041\u20131053.","journal-title":"Turkish Journal of Computer and Mathematics Education (TURCOMAT)"},{"issue":"8","key":"2513_CR13","doi-asserted-by":"publisher","first-page":"2120","DOI":"10.3390\/en14082120","volume":"14","author":"Y Ji","year":"2021","unstructured":"Ji, Y., Wang, J., Jiacan, X., & Li, D. (2021). Data-driven online energy scheduling of a microgrid based on deep reinforcement learning. Energies, 14(8), 2120.","journal-title":"Energies"},{"issue":"9","key":"2513_CR14","doi-asserted-by":"publisher","first-page":"6597","DOI":"10.1109\/JIOT.2021.3113872","volume":"9","author":"F Jiang","year":"2021","unstructured":"Jiang, F., Dong, L., Wang, K., Yang, K., & Pan, C. (2021). Distributed resource scheduling for large-scale MEC systems: A multiagent ensemble deep reinforcement learning with imitation acceleration. IEEE Internet of Things Journal, 9(9), 6597\u20136610.","journal-title":"IEEE Internet of Things Journal"},{"issue":"10","key":"2513_CR15","doi-asserted-by":"publisher","first-page":"9278","DOI":"10.1109\/JIOT.2020.2988457","volume":"7","author":"F Jiang","year":"2020","unstructured":"Jiang, F., Wang, K., Dong, L., Pan, C., & Yang, K. (2020). Stacked autoencoder-based deep reinforcement learning for online resource scheduling in large-scale MEC networks. IEEE Internet of Things Journal, 7(10), 9278\u20139290.","journal-title":"IEEE Internet of Things Journal"},{"key":"2513_CR16","first-page":"1","volume":"34","author":"BM Kayhan","year":"2021","unstructured":"Kayhan, B. M., & Yildiz, G. (2021). Reinforcement learning applications to machine scheduling problems: A comprehensive literature review. Journal of Intelligent Manufacturing, 34, 1\u201325.","journal-title":"Journal of Intelligent Manufacturing"},{"key":"2513_CR17","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.jnca.2019.06.006","volume":"143","author":"M Kumar","year":"2019","unstructured":"Kumar, M., Sharma, S. C., Goel, A., & Singh, S. P. (2019). A comprehensive survey for scheduling techniques in cloud computing. Journal of Network and Computer Applications, 143, 1\u201333.","journal-title":"Journal of Network and Computer Applications"},{"key":"2513_CR18","doi-asserted-by":"publisher","DOI":"10.1016\/j.simpat.2022.102521","volume":"118","author":"F Li","year":"2022","unstructured":"Li, F., Tan, W. J., & Cai, W. (2022). A wholistic optimization of containerized workflow scheduling and deployment in the cloud-edge environment. Simulation Modelling Practice and Theory, 118, 102521.","journal-title":"Simulation Modelling Practice and Theory"},{"key":"2513_CR19","doi-asserted-by":"crossref","unstructured":"Lin, J., Peng, Z., & Cui, D. (2018). Deep reinforcement learning for multi-resource cloud job scheduling. In Neural information processing: 25th International conference, ICONIP 2018, SIEM Reap, Cambodia, 13\u201316 December 2018, Proceedings, Part III 25 (pp. 289\u2013302). Springer.","DOI":"10.1007\/978-3-030-04182-3_26"},{"issue":"7","key":"2513_CR20","doi-asserted-by":"publisher","first-page":"1927","DOI":"10.1080\/00207543.2019.1636321","volume":"58","author":"D Mourtzis","year":"2020","unstructured":"Mourtzis, D. (2020). Simulation in the design and operation of manufacturing systems: State of the art and new trends. International Journal of Production Research, 58(7), 1927\u20131949.","journal-title":"International Journal of Production Research"},{"issue":"12","key":"2513_CR21","doi-asserted-by":"publisher","first-page":"10519","DOI":"10.1109\/JIOT.2023.3241222","volume":"10","author":"L Niu","year":"2023","unstructured":"Niu, L., Chen, X., Zhang, N., Zhu, Y., Yin, R., Wu, C., & Cao, Y. (2023). Multi-agent meta-reinforcement learning for optimized task scheduling in heterogeneous edge computing systems. IEEE Internet of Things Journal, 10(12), 10519\u201310531. https:\/\/doi.org\/10.1109\/JIOT.2023.3241222","journal-title":"IEEE Internet of Things Journal"},{"key":"2513_CR22","unstructured":"Parisotto, E., Song, F., Rae, J., Pascanu, R., Gulcehre, C., Jayakumar, S., Jaderberg, M., Kaufman, R. L., Clark, A., Noury, S., Botvinick, M., Heess, N., & Hadsell, R. (2020). Stabilizing transformers for reinforcement learning. In International conference on machine learning (pp. 7487\u20137498). PMLR."},{"key":"2513_CR23","doi-asserted-by":"publisher","unstructured":"Ran, L., Shi, X., & Shang, M. (2019). SLAS-aware online task scheduling based on deep reinforcement learning method in cloud environment. In 2019 IEEE 21st international conference on high performance computing and communications; IEEE 17th international conference on smart city; IEEE 5th international conference on data science and systems (HPCC\/SmartCity\/DSS) (pp. 1518\u20131525). https:\/\/doi.org\/10.1109\/HPCC\/SmartCity\/DSS.2019.00209","DOI":"10.1109\/HPCC\/SmartCity\/DSS.2019.00209"},{"key":"2513_CR24","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3291371","author":"L Ren","year":"2023","unstructured":"Ren, L., Jia, Z., Laili, Y., & Huang, D. (2023). Deep learning for time-series prediction in IIoT: Progress, challenges, and prospects. IEEE Transactions on Neural Networks and Learning Systems. https:\/\/doi.org\/10.1109\/TNNLS.2023.3291371","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"2513_CR25","doi-asserted-by":"publisher","DOI":"10.1016\/j.simpat.2021.102328","volume":"110","author":"N Rizvi","year":"2021","unstructured":"Rizvi, N., Dharavath, R., & Edla, D. R. (2021). Cost and makespan aware workflow scheduling in IaaS clouds using hybrid spider monkey optimization. Simulation Modelling Practice and Theory, 110, 102328.","journal-title":"Simulation Modelling Practice and Theory"},{"key":"2513_CR26","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2021.106886","volume":"219","author":"NS Shahraki","year":"2021","unstructured":"Shahraki, N. S., & Zahiri, S. H. (2021). DRLA: Dimensionality ranking in learning automata and its application on designing analog active filters. Knowledge-Based Systems, 219, 106886.","journal-title":"Knowledge-Based Systems"},{"issue":"1","key":"2513_CR27","first-page":"3","volume":"5","author":"A Shahzad","year":"2016","unstructured":"Shahzad, A., & Mebarki, N. (2016). Learning dispatching rules for scheduling: A synergistic view comprising decision trees. Tabu search and simulation. Computers, 5(1), 3.","journal-title":"Tabu search and simulation. Computers"},{"issue":"6","key":"2513_CR28","doi-asserted-by":"publisher","first-page":"4171","DOI":"10.1007\/s10586-022-03630-2","volume":"25","author":"K Siddesha","year":"2022","unstructured":"Siddesha, K., Jayaramaiah, G. V., & Singh, C. (2022). A novel deep reinforcement learning scheme for task scheduling in cloud computing. Cluster Computing, 25(6), 4171\u20134188.","journal-title":"Cluster Computing"},{"key":"2513_CR29","doi-asserted-by":"crossref","unstructured":"Sun, S., & Li, X. (2020). Deep-reinforcement-learning-based scheduling with contiguous resource allocation for next-generation cellular systems. arXiv preprint. arXiv:2010.11269","DOI":"10.1007\/978-3-030-80126-7_46"},{"key":"2513_CR30","unstructured":"Sutton, R. S., & Barto, A. G. (2018). Reinforcement learning: An introduction. MIT."},{"key":"2513_CR31","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2023.110880","volume":"278","author":"X-R Tao","year":"2023","unstructured":"Tao, X.-R., Pan, Q.-K., Sang, H.-Y., Gao, L., Yang, A.-L., & Rong, M. (2023). Nondominated sorting genetic algorithm-II with Q-learning for the distributed permutation flowshop rescheduling problem. Knowledge-Based Systems, 278, 110880.","journal-title":"Knowledge-Based Systems"},{"key":"2513_CR32","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, \u0141., & Polosukhin, I. (2017). Attention is all you need. In Advances in neural information processing systems (Vol. 30)."},{"key":"2513_CR33","doi-asserted-by":"publisher","first-page":"106","DOI":"10.1016\/j.ins.2019.05.012","volume":"498","author":"H Wang","year":"2019","unstructured":"Wang, H., Yulei, W., Min, G., Jie, X., & Tang, P. (2019). Data-driven dynamic resource scheduling for network slicing: A deep reinforcement learning approach. Information Sciences, 498, 106\u2013116.","journal-title":"Information Sciences"},{"key":"2513_CR34","doi-asserted-by":"publisher","first-page":"195","DOI":"10.1016\/j.future.2023.04.020","volume":"146","author":"X Wang","year":"2023","unstructured":"Wang, X., & Shen, H. (2023). Online scheduling of coflows by attention-empowered scalable deep reinforcement learning. Future Generation Computer Systems, 146, 195\u2013206.","journal-title":"Future Generation Computer Systems"},{"key":"2513_CR35","doi-asserted-by":"publisher","first-page":"452","DOI":"10.1016\/j.jmsy.2022.08.013","volume":"65","author":"X Wang","year":"2022","unstructured":"Wang, X., Zhang, L., Liu, Y., Zhao, C., & Wang, K. (2022). Solving task scheduling problems in cloud manufacturing via attention mechanism and deep reinforcement learning. Journal of Manufacturing Systems, 65, 452\u2013468.","journal-title":"Journal of Manufacturing Systems"},{"key":"2513_CR36","doi-asserted-by":"publisher","first-page":"2593","DOI":"10.1007\/s10845-023-02161-w","volume":"35","author":"Z Wang","year":"2023","unstructured":"Wang, Z., & Liao, W. (2023). Smart scheduling of dynamic job shop based on discrete event simulation and deep reinforcement learning. Journal of Intelligent Manufacturing, 35, 2593\u20132610.","journal-title":"Journal of Intelligent Manufacturing"},{"issue":"4","key":"2513_CR37","doi-asserted-by":"publisher","first-page":"911","DOI":"10.1007\/s10845-022-01915-2","volume":"33","author":"C Waubert de Puiseau","year":"2022","unstructured":"Waubert de Puiseau, C., Meyes, R., & Meisen, T. (2022). On reliability of reinforcement learning based production scheduling systems: A comparative survey. Journal of Intelligent Manufacturing, 33(4), 911\u2013927.","journal-title":"Journal of Intelligent Manufacturing"},{"key":"2513_CR38","doi-asserted-by":"publisher","DOI":"10.1007\/s11276-021-02883-w","author":"G Wu","year":"2022","unstructured":"Wu, G. (2022). Deep reinforcement learning based multi-layered traffic scheduling scheme in data center networks. Wireless Networks. https:\/\/doi.org\/10.1007\/s11276-021-02883-w","journal-title":"Wireless Networks"},{"key":"2513_CR39","doi-asserted-by":"crossref","unstructured":"Yang, Y., & Shen, H. (2021). Deep reinforcement learning enhanced greedy optimization for online scheduling of batched tasks in cloud HPC systems. IEEE Transactions on Parallel and Distributed Systems, 33(11), 3003\u20133014.","DOI":"10.1109\/TPDS.2021.3138459"},{"key":"2513_CR40","doi-asserted-by":"crossref","unstructured":"Zhang, L., Wang, F., & Li, F. (2019a). Cloud-based simulation. In Summer of simulation: 50 years of seminal computer simulation research (pp. 97\u2013115).","DOI":"10.1007\/978-3-030-17164-3_6"},{"key":"2513_CR41","doi-asserted-by":"publisher","DOI":"10.1016\/j.compind.2019.08.004","volume":"112","author":"L Zhang","year":"2019","unstructured":"Zhang, L., Zhou, L., Ren, L., & Laili, Y. (2019). Modeling and simulation in intelligent manufacturing. Computers in Industry, 112, 103123.","journal-title":"Computers in Industry"},{"key":"2513_CR42","doi-asserted-by":"publisher","first-page":"258","DOI":"10.1016\/j.future.2022.11.017","volume":"141","author":"Y Zhang","year":"2023","unstructured":"Zhang, Y., Li, R., Zhao, Y., Li, R., Wang, Y., & Zhou, Z. (2023). Multi-agent deep reinforcement learning for online request scheduling in edge cooperation networks. Future Generation Computer Systems, 141, 258\u2013268. https:\/\/doi.org\/10.1016\/j.future.2022.11.017","journal-title":"Future Generation Computer Systems"},{"key":"2513_CR43","doi-asserted-by":"publisher","DOI":"10.1016\/j.rcim.2019.101910","volume":"64","author":"C Zhao","year":"2020","unstructured":"Zhao, C., Luo, X., & Zhang, L. (2020). Modeling of service agents for simulation in cloud manufacturing. Robotics and Computer-Integrated Manufacturing, 64, 101910.","journal-title":"Robotics and Computer-Integrated Manufacturing"},{"key":"2513_CR44","unstructured":"Zhou, L., & Zhang, L. (2016). A dynamic task scheduling method based on simulation in cloud manufacturing. In Theory, methodology, tools and applications for modeling and simulation of complex systems: 16th Asia simulation conference and SCS autumn simulation multi-conference, AsiaSim\/SCS AutumnSim 2016, Beijing, China, 8\u201311 October 2016, Proceedings, Part III 16 (pp. 20\u201324). Springer."},{"key":"2513_CR45","doi-asserted-by":"crossref","unstructured":"Zhuang, Y., Li, Y., Cheng, L., Wang, C., & Lin, E. (2022). Online scheduling of PV and energy storage system based on deep reinforcement learning. In 2022 IEEE international conference on power systems technology (POWERCON) (pp. 1\u20136). IEEE.","DOI":"10.1109\/POWERCON53406.2022.9930039"}],"container-title":["Journal of Intelligent Manufacturing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10845-024-02513-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10845-024-02513-0\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10845-024-02513-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,1]],"date-time":"2025-11-01T07:56:57Z","timestamp":1761983817000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10845-024-02513-0"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,4]]},"references-count":45,"journal-issue":{"issue":"8","published-print":{"date-parts":[[2025,12]]}},"alternative-id":["2513"],"URL":"https:\/\/doi.org\/10.1007\/s10845-024-02513-0","relation":{},"ISSN":["0956-5515","1572-8145"],"issn-type":[{"value":"0956-5515","type":"print"},{"value":"1572-8145","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,4]]},"assertion":[{"value":"30 September 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 October 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"4 November 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"We declare that we do not have any commercial or associative interest that represents a conflict of interest in connection with the work submitted.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}