{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,9]],"date-time":"2026-04-09T23:20:48Z","timestamp":1775776848658,"version":"3.50.1"},"reference-count":97,"publisher":"Informa UK Limited","issue":"16","license":[{"start":{"date-parts":[[2022,8,6]],"date-time":"2022-08-06T00:00:00Z","timestamp":1659744000000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"CADS4.0","award":["RTI2018-101344-B-I00"],"award-info":[{"award-number":["RTI2018-101344-B-I00"]}]},{"name":"NIOTOME","award":["RTI2018-102020-B-I00"],"award-info":[{"award-number":["RTI2018-102020-B-I00"]}]},{"name":"EU H2020 research and innovation programme","award":["825631"],"award-info":[{"award-number":["825631"]}]},{"name":"Zero-Defect Manufacturing Platform","award":["958205"],"award-info":[{"award-number":["958205"]}]},{"name":"Industrial Data Services for Quality Control in Smart Manufacturing"},{"name":"Industrial Production and Logistics Optimization in Industry 4.0","award":["PROMETEO\/2021\/065"],"award-info":[{"award-number":["PROMETEO\/2021\/065"]}]},{"DOI":"10.13039\/501100003359","name":"Generalitat Valenciana","doi-asserted-by":"crossref","id":[{"id":"10.13039\/501100003359","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["www.tandfonline.com"],"crossmark-restriction":true},"short-container-title":["International Journal of Production Research"],"published-print":{"date-parts":[[2023,8,18]]},"DOI":"10.1080\/00207543.2022.2104180","type":"journal-article","created":{"date-parts":[[2022,8,6]],"date-time":"2022-08-06T20:42:08Z","timestamp":1659818528000},"page":"5772-5789","update-policy":"https:\/\/doi.org\/10.1080\/tandf_crossmark_01","source":"Crossref","is-referenced-by-count":117,"title":["Reinforcement learning applied to production planning and control"],"prefix":"10.1080","volume":"61","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0379-8786","authenticated-orcid":false,"given":"Ana","family":"Esteso","sequence":"first","affiliation":[{"name":"Universitat Polit\u00e8cnica de Val\u00e8ncia","place":["Valencia, Spain"]}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8678-6881","authenticated-orcid":false,"given":"David","family":"Peidro","sequence":"additional","affiliation":[{"name":"Universitat Polit\u00e8cnica de Val\u00e8ncia","place":["Alicante, Spain"]}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8447-3387","authenticated-orcid":false,"given":"Josefa","family":"Mula","sequence":"additional","affiliation":[{"name":"Universitat Polit\u00e8cnica de Val\u00e8ncia","place":["Alicante, Spain"]}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1693-2876","authenticated-orcid":false,"given":"Manuel","family":"D\u00edaz-Madro\u00f1ero","sequence":"additional","affiliation":[{"name":"Universitat Polit\u00e8cnica de Val\u00e8ncia","place":["Alicante, Spain"]}]}],"member":"301","published-online":{"date-parts":[[2022,8,6]]},"reference":[{"key":"e_1_3_3_2_1","unstructured":"Abadi Mart\u00edn Ashish Agarwal Paul Barham Eugene Brevdo Zhifeng Chen Craig Citro Greg S. Corrado et al. 2016. \u201cTensorFlow: Large-Scale Machine Learning on Heterogeneous Distributed Systems.\u201d https:\/\/www.tensorflow.org\/."},{"key":"e_1_3_3_3_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-59747-4_38"},{"key":"e_1_3_3_4_1","doi-asserted-by":"crossref","unstructured":"Barat Souvik Harshad Khadilkar Hardik Meisheri Vinay Kulkarni Vinita Baniwal Prashant Kumar and Monika Gajrani. 2019. \u201cActor Based Simulation for Closed Loop Control of Supply Chain Using Reinforcement Learning.\u201d Proceedings of the International Joint Conference on Autonomous Agents and Multiagent Systems AAMAS 3 1802\u20131804.","DOI":"10.65109\/RFYL9145"},{"key":"e_1_3_3_5_1","volume-title":"Dynamic Programming","author":"Bellman Richard.","year":"1957","unstructured":"Bellman, Richard. 1957. Dynamic Programming. Princeton, New Jersey: Princeton University Press."},{"key":"e_1_3_3_6_1","doi-asserted-by":"publisher","DOI":"10.1038\/srep00400"},{"key":"e_1_3_3_7_1","unstructured":"Brockman Greg Vicki Cheung Ludwig Pettersson Jonas Schneider John Schulman Jie Tang and Wojciech Zaremba. 2016. \u201cOpenAI Gym.\u201d http:\/\/arxiv.org\/abs\/1606.01540."},{"key":"e_1_3_3_8_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.cie.2020.106774"},{"key":"e_1_3_3_9_1","doi-asserted-by":"publisher","DOI":"10.3390\/su12197978"},{"key":"e_1_3_3_10_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.cie.2021.107379"},{"key":"e_1_3_3_11_1","doi-asserted-by":"publisher","DOI":"10.3390\/app11114948"},{"key":"e_1_3_3_12_1","doi-asserted-by":"publisher","DOI":"10.5281\/zenodo.1134899"},{"key":"e_1_3_3_13_1","unstructured":"Castro Pablo Samuel Subhodeep Moitra Carles Gelada Saurabh Kumar and Marc G Bellemare. 2018. \u201cDopamine: A Research Framework for Deep Reinforcement Learning.\u201d http:\/\/arxiv.org\/abs\/1812.06110."},{"key":"e_1_3_3_14_1","doi-asserted-by":"publisher","DOI":"10.1080\/00207543.2020.1733125"},{"key":"e_1_3_3_15_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-14347-3_34"},{"key":"e_1_3_3_16_1","unstructured":"Deisenroth Marc Peter Gerhard Neumann and Jan Peters. 2013. A Survey on Policy Search for Robotics. Now Publishers."},{"key":"e_1_3_3_17_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-29946-9_11"},{"key":"e_1_3_3_18_1","unstructured":"Farazi Nahid Parvez Tanvir Ahamed Limon Barua and Bo Zou. 2020. \u201cDeep Reinforcement Learning and Transportation Research: A Comprehensive Review.\u201d ArXiv Preprint ArXiv:2010.06187."},{"key":"e_1_3_3_19_1","doi-asserted-by":"publisher","DOI":"10.1080\/00207543.2011.571443"},{"key":"e_1_3_3_20_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.compfluid.2021.104973"},{"key":"e_1_3_3_21_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-021-09996-w"},{"key":"e_1_3_3_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/TSMCC.2012.2218595"},{"key":"e_1_3_3_23_1","unstructured":"Guadarrama Sergio Anoop Korattikara Oscar Ramirez Pablo Castro Ethan Holly Sam Fishman Ke Wang et al. 2018. \u201cTF-Agents: A Library for Reinforcement Learning in TensorFlow.\u201d https:\/\/github.com\/tensorflow\/agents."},{"key":"e_1_3_3_24_1","unstructured":"Haarnoja Tuomas Aurick Zhou Pieter Abbeel and Sergey Levine. 2018. \u201cSoft Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor.\u201d ArXiv Preprint ArXiv:1801.01290."},{"key":"e_1_3_3_25_1","unstructured":"Han Miyoung. 2018. \u201cReinforcement Learning Approaches in Dynamic Environments.\u201d"},{"key":"e_1_3_3_26_1","doi-asserted-by":"crossref","unstructured":"Hessel Matteo Joseph Modayil Hado Van Hasselt Tom Schaul Georg Ostrovski Will Dabney Dan Horgan Bilal Piot Mohammad Azar and David Silver. 2018. \u201cRainbow: Combining Improvements in Deep Reinforcement Learning.\u201d In Proceedings of the AAAI Conference on Artificial Intelligence. 32.","DOI":"10.1609\/aaai.v32i1.11796"},{"key":"e_1_3_3_27_1","unstructured":"Hill Ashley Antonin Raffin Maximilian Ernestus Adam Gleave Anssi Kanervisto Rene Traore Prafulla Dhariwal et al. 2018. \u201cStable Baselines.\u201d GitHub Repository. GitHub."},{"key":"e_1_3_3_28_1","unstructured":"Hoffman Matt Bobak Shahriari John Aslanides Gabriel Barth-Maron Feryal Behbahani Tamara Norman Abbas Abdolmaleki et al. 2020. \u201cAcme: A Research Framework for Distributed Reinforcement Learning.\u201d https:\/\/arxiv.org\/abs\/2006.00979."},{"key":"e_1_3_3_29_1","doi-asserted-by":"publisher","DOI":"10.1109\/COASE.2019.8843338"},{"key":"e_1_3_3_30_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.compchemeng.2020.106982"},{"key":"e_1_3_3_31_1","unstructured":"Hubbs Christian D Hector D Perez Owais Sarwar Nikolaos V Sahinidis Ignacio E Grossmann and John M Wassick. 2020b. \u201cOR-Gym: A Reinforcement Learning Library for Operations Research Problems.\u201d"},{"key":"e_1_3_3_32_1","doi-asserted-by":"publisher","DOI":"10.1080\/00207543.2020.1798035"},{"key":"e_1_3_3_33_1","doi-asserted-by":"publisher","DOI":"10.1080\/09537287.2015.1128010"},{"key":"e_1_3_3_34_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2008.07.036"},{"key":"e_1_3_3_35_1","unstructured":"Kapturowski Steven Georg Ostrovski Will Dabney John Quan and Remi Munos. 2019. \u201cRecurrent Experience Replay in Distributed Reinforcement Learning.\u201d In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=r1lyTjAqYX."},{"key":"e_1_3_3_36_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.simpat.2015.07.004"},{"key":"e_1_3_3_37_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10845-021-01847-3"},{"key":"e_1_3_3_38_1","doi-asserted-by":"publisher","DOI":"10.1080\/00207543.2020.1748247"},{"key":"e_1_3_3_39_1","first-page":"1008","article-title":"Actor-Critic Algorithms","volume":"12","author":"Konda Vijay R","year":"2000","unstructured":"Konda, Vijay R, and John N Tsitsiklis. 2000. \u201cActor-Critic Algorithms.\u201d Advances in Neural Information Processing Systems 12: 1008\u20131014.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_3_40_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11740-018-0855-7"},{"key":"e_1_3_3_41_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10845-020-01612-y"},{"key":"e_1_3_3_42_1","doi-asserted-by":"publisher","DOI":"10.1080\/00207543.2021.1972179"},{"key":"e_1_3_3_43_1","unstructured":"Kuhnle Alexander Michael Schaarschmidt and Kai Fricke. 2017. \u201cTensorforce: A TensorFlow Library for Applied Reinforcement Learning.\u201d https:\/\/github.com\/tensorforce\/tensorforce."},{"key":"e_1_3_3_44_1","doi-asserted-by":"publisher","DOI":"10.1016\/S0019-8501(99)00113-3"},{"key":"e_1_3_3_45_1","doi-asserted-by":"publisher","DOI":"10.1109\/WSC48552.2020.9383997"},{"key":"e_1_3_3_46_1","unstructured":"Li Yuxi. 2018. \u201cDeep Reinforcement Learning.\u201d ArXiv Preprint ArXiv:1810.06339."},{"key":"e_1_3_3_47_1","unstructured":"Liang Eric Richard Liaw Robert Nishihara Philipp Moritz Roy Fox Joseph Gonzalez Ken Goldberg and I. Stoica. 2017. \u201cRay RLLib: A Composable and Scalable Reinforcement Learning Library.\u201d ArXiv abs\/1712.0."},{"key":"e_1_3_3_48_1","unstructured":"Lillicrap Timothy P Jonathan J Hunt Alexander Pritzel Nicolas Heess Tom Erez Yuval Tassa David Silver and Daan Wierstra. 2015. \u201cContinuous Control with Deep Reinforcement Learning.\u201d ArXiv Preprint ArXiv:1509.02971."},{"key":"e_1_3_3_49_1","volume-title":"Hierarchical Production Planning for Job Shops","author":"Mehra A.","year":"1995","unstructured":"Mehra, A. 1995. Hierarchical Production Planning for Job Shops. College Park: University of Maryland, Harvard University and Industry."},{"key":"e_1_3_3_50_1","unstructured":"Mnih Volodymyr Adria Puigdomenech Badia Mehdi Mirza Alex Graves Timothy Lillicrap Tim Harley David Silver and Koray Kavukcuoglu. 2016. \u201cAsynchronous Methods for Deep Reinforcement Learning.\u201d In International Conference on Machine Learning 1928\u20131937."},{"key":"e_1_3_3_51_1","doi-asserted-by":"publisher","DOI":"10.1038\/nature14236"},{"key":"e_1_3_3_52_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10100-021-00740-x"},{"key":"e_1_3_3_53_1","doi-asserted-by":"publisher","DOI":"10.1080\/00207543.2021.1973138"},{"key":"e_1_3_3_54_1","doi-asserted-by":"publisher","DOI":"10.1080\/00207543.2020.1870013"},{"key":"e_1_3_3_55_1","doi-asserted-by":"publisher","DOI":"10.1109\/TASE.2019.2956762"},{"key":"e_1_3_3_56_1","unstructured":"Paszke Adam Sam Gross Soumith Chintala Gregory Chanan Edward Yang Zachary DeVito Zeming Lin Alban Desmaison Luca Antiga and Adam Lerer. 2017. \u201cAutomatic Differentiation in PyTorch.\u201d In 31st Conference on Neural Information Processing Systems (NIPS 2017). Long Beach USA."},{"key":"e_1_3_3_57_1","unstructured":"Paszke Adam Sam Gross Francisco Massa Adam Lerer James Bradbury Gregory Chanan Trevor Killeen et al. 2019. \u201cPyTorch: An Imperative Style High-Performance Deep Learning Library.\u201d In Advances in Neural Information Processing Systems 32 edited by H. Wallach H. Larochelle A. Beygelzimer F. d\u2019Alch\u00e9-Buc E. Fox and R. Garnett 8024\u20138035. Curran Associates. http:\/\/papers.neurips.cc\/paper\/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf."},{"key":"e_1_3_3_58_1","unstructured":"Plappert Matthias. 2016. \u201cKeras-RL.\u201d GitHub Repository. GitHub."},{"key":"e_1_3_3_59_1","doi-asserted-by":"publisher","DOI":"10.1080\/00207540110118640"},{"key":"e_1_3_3_60_1","volume-title":"Agent-Based Intelligent Manufacturing System for the 21st Century","author":"Qiao B.","year":"2000","unstructured":"Qiao, B., and J. Zhu. 2000. Agent-Based Intelligent Manufacturing System for the 21st Century. Nanjing: Mechatronic Engineering Institute, Nanjing University of Aeronautics and Astronautics."},{"key":"e_1_3_3_61_1","doi-asserted-by":"publisher","DOI":"10.1109\/ETFA.2018.8502508"},{"key":"e_1_3_3_62_1","doi-asserted-by":"publisher","DOI":"10.1109\/WSC.2015.7408317"},{"issue":"268","key":"e_1_3_3_63_1","first-page":"1","article-title":"Stable-Baselines3: Reliable Reinforcement Learning Implementations","volume":"22","author":"Raffin Antonin","year":"2021","unstructured":"Raffin, Antonin, Ashley Hill, Adam Gleave, Anssi Kanervisto, Maximilian Ernestus, and Noah Dormann. 2021. \u201cStable-Baselines3: Reliable Reinforcement Learning Implementations.\u201d Journal of Machine Learning Research 22 (268): 1\u20138.","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_3_64_1","volume-title":"On-Line Q-Learning Using Connectionist Systems","author":"Rummery Gavin A","year":"1994","unstructured":"Rummery, Gavin A, and Mahesan Niranjan. 1994. On-Line Q-Learning Using Connectionist Systems. Cambridge: University of Cambridge, Department of Engineering Cambridge."},{"key":"e_1_3_3_65_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.ifacol.2019.11.397"},{"key":"e_1_3_3_66_1","volume-title":"Artificial Intelligence: A Modern Approach","author":"Russell Stuart J","year":"2003","unstructured":"Russell, Stuart J, and Peter Norvig. 2003. Artificial Intelligence: A Modern Approach. New Jersey: Pearson Education."},{"key":"e_1_3_3_67_1","unstructured":"Schaul Tom John Quan Ioannis Antonoglou and David Silver. 2016. \u201cPrioritized Experience Replay.\u201d ArXiv Preprint ArXiv:1511.05952."},{"key":"e_1_3_3_68_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-13709-0_46"},{"key":"e_1_3_3_69_1","unstructured":"Schulman John Sergey Levine Pieter Abbeel Michael Jordan and Philipp Moritz. 2015. \u201cTrust Region Policy Optimization.\u201d In International Conference on Machine Learning 1889\u20131897."},{"key":"e_1_3_3_70_1","unstructured":"Schulman John Filip Wolski Prafulla Dhariwal Alec Radford and Oleg Klimov. 2017. \u201cProximal Policy Optimization Algorithms.\u201d ArXiv Preprint ArXiv:1707.06347."},{"key":"e_1_3_3_71_1","unstructured":"Serrano-Ruiz J. C. J. Mula D. Peidro and M. D\u00edaz-Madro\u00f1ero. 2021. \u201cA Metamodel for the Supply Chain 4.0.\u201d Journal of Industrial Integration Information. Under Review."},{"key":"e_1_3_3_72_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.cie.2018.03.039"},{"key":"e_1_3_3_73_1","doi-asserted-by":"publisher","DOI":"10.1023\/A:1008942012299"},{"key":"e_1_3_3_74_1","volume-title":"Reinforcement Learning: An Introduction","author":"Sutton Richard S","year":"2018","unstructured":"Sutton, Richard S, and Andrew G Barto. 2018. Reinforcement Learning: An Introduction. Cambridge: MIT Press."},{"key":"e_1_3_3_75_1","doi-asserted-by":"publisher","DOI":"10.1109\/JPROC.2017.2761740"},{"key":"e_1_3_3_76_1","doi-asserted-by":"publisher","DOI":"10.2200\/S00268ED1V01Y201005AIM009"},{"key":"e_1_3_3_77_1","unstructured":"Torres Jordi. 2020. \u201cDeep Reinforcement Learning Explained.\u201d https:\/\/torres.ai\/deep-reinforcement-learning-explained- series\/."},{"key":"e_1_3_3_78_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10845-012-0711-0"},{"key":"e_1_3_3_79_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10845-019-01531-7"},{"key":"e_1_3_3_80_1","doi-asserted-by":"publisher","DOI":"10.1142\/S0129065709002063"},{"key":"e_1_3_3_81_1","doi-asserted-by":"crossref","unstructured":"Van Hasselt Hado Arthur Guez and David Silver. 2016. \u201cDeep Reinforcement Learning with Double Q-Learning.\u201d In Proceedings of the AAAI Conference on Artificial Intelligence. 30.","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"e_1_3_3_82_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.compind.2020.103239"},{"key":"e_1_3_3_83_1","volume-title":"Python Deep Learning: Exploring Deep Learning Techniques and Neural Network Architectures with Pytorch, Keras, and TensorFlow","author":"Vasilev Ivan","year":"2019","unstructured":"Vasilev, Ivan, Daniel Slater, Gianmario Spacagna, Peter Roelants, and Valentino Zocca. 2019. Python Deep Learning: Exploring Deep Learning Techniques and Neural Network Architectures with Pytorch, Keras, and TensorFlow. Birmingham: Packt Publishing Ltd."},{"key":"e_1_3_3_84_1","volume-title":"Manufacturing Planning and Control for Supply Chain Management","author":"Vollmann T. E.","year":"2005","unstructured":"Vollmann, T. E., W. L. Berry, D. C. Whybark, and F. R. Jacobs. 2005. Manufacturing Planning and Control for Supply Chain Management. New York: McGraw Hill."},{"key":"e_1_3_3_85_1","doi-asserted-by":"publisher","DOI":"10.1080\/00207543.2020.1847342"},{"key":"e_1_3_3_86_1","unstructured":"Wang Ziyu Tom Schaul Matteo Hessel Hado Hasselt Marc Lanctot and Nando Freitas. 2016. \u201cDueling Network Architectures for Deep Reinforcement Learning.\u201d In International Conference on Machine Learning 1995\u20132003."},{"key":"e_1_3_3_87_1","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992698"},{"key":"e_1_3_3_88_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-79629-6_18"},{"key":"e_1_3_3_89_1","volume-title":"Multiagent Systems: A Modern Approach to Distributed Artificial Intelligence","author":"Weiss Gerhard.","year":"1999","unstructured":"Weiss, Gerhard. 1999. Multiagent Systems: A Modern Approach to Distributed Artificial Intelligence. Cambridge: MIT Press."},{"key":"e_1_3_3_90_1","unstructured":"Wijmans Erik Abhishek Kadian Ari Morcos Stefan Lee Irfan Essa Devi Parikh Manolis Savva and Dhruv Batra. 2020. \u201cDD-PPO: Learning Near-Perfect PointGoal Navigators from 2.5 Billion Frames.\u201d"},{"key":"e_1_3_3_91_1","doi-asserted-by":"publisher","DOI":"10.1007\/BF00992696"},{"key":"e_1_3_3_92_1","unstructured":"Winder Phil. 2020. Reinforcement Learning: Industrial Applications of Intelligent Agents."},{"key":"e_1_3_3_93_1","doi-asserted-by":"publisher","DOI":"10.1080\/00207543.2021.1943037"},{"key":"e_1_3_3_94_1","unstructured":"Yu Chao Jiming Liu and Shamim Nemati. 2020. \u201cReinforcement Learning in Healthcare: A Survey.\u201d ArXiv Preprint ArXiv:1908.08796."},{"key":"e_1_3_3_95_1","unstructured":"Zhang Cong Wen Song Zhiguang Cao Jie Zhang Puay Siew Tan and Chi Xu. 2020. \u201cLearning to Dispatch for Job Shop Scheduling via Deep Reinforcement Learning \u201d http:\/\/arxiv.org\/abs\/2010.12367."},{"key":"e_1_3_3_96_1","doi-asserted-by":"crossref","unstructured":"Zheng Shuai Chetan Gupta and Susumu Serita. 2020. \u201cManufacturing Dispatching Using Reinforcement and Transfer Learning.\u201d The European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases October. http:\/\/arxiv.org\/abs\/1910.02035.","DOI":"10.1007\/978-3-030-46133-1_39"},{"key":"e_1_3_3_97_1","doi-asserted-by":"publisher","DOI":"10.1109\/COASE.2017.8256260"},{"key":"e_1_3_3_98_1","doi-asserted-by":"publisher","DOI":"10.1115\/MSEC2017-2771"}],"container-title":["International Journal of Production Research"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.tandfonline.com\/doi\/pdf\/10.1080\/00207543.2022.2104180","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T17:32:23Z","timestamp":1769707943000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.tandfonline.com\/doi\/full\/10.1080\/00207543.2022.2104180"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,8,6]]},"references-count":97,"journal-issue":{"issue":"16","published-print":{"date-parts":[[2023,8,18]]}},"alternative-id":["10.1080\/00207543.2022.2104180"],"URL":"https:\/\/doi.org\/10.1080\/00207543.2022.2104180","relation":{},"ISSN":["0020-7543","1366-588X"],"issn-type":[{"value":"0020-7543","type":"print"},{"value":"1366-588X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,8,6]]},"assertion":[{"value":"The publishing and review policy for this title is described in its Aims & Scope.","order":1,"name":"peerreview_statement","label":"Peer Review Statement"},{"value":"http:\/\/www.tandfonline.com\/action\/journalInformation?show=aimsScope&journalCode=tprs20","URL":"http:\/\/www.tandfonline.com\/action\/journalInformation?show=aimsScope&journalCode=tprs20","order":2,"name":"aims_and_scope_url","label":"Aim & Scope"},{"value":"2021-07-29","order":0,"name":"received","label":"Received","group":{"name":"publication_history","label":"Publication History"}},{"value":"2022-07-11","order":2,"name":"accepted","label":"Accepted","group":{"name":"publication_history","label":"Publication History"}},{"value":"2022-08-06","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}