{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,23]],"date-time":"2026-04-23T08:03:02Z","timestamp":1776931382883,"version":"3.51.2"},"publisher-location":"New York, NY, USA","reference-count":31,"publisher":"ACM","license":[{"start":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T00:00:00Z","timestamp":1763164800000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["2008265, 2412345"],"award-info":[{"award-number":["2008265, 2412345"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,11,16]]},"DOI":"10.1145\/3731599.3767514","type":"proceedings-article","created":{"date-parts":[[2025,11,7]],"date-time":"2025-11-07T16:18:44Z","timestamp":1762532324000},"page":"1406-1414","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["RL4Sys: A Lightweight System-driven RL Framework for Drop-in Integration in System Optimization"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-5275-7837","authenticated-orcid":false,"given":"Jiaxin","family":"Dong","sequence":"first","affiliation":[{"name":"University of Delaware, Wilmington, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-2622-936X","authenticated-orcid":false,"given":"Md. Hasanur","family":"Rashid","sequence":"additional","affiliation":[{"name":"University of Delaware, Newark, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2232-3305","authenticated-orcid":false,"given":"Helen","family":"Xu","sequence":"additional","affiliation":[{"name":"Georgia Institute of Technology, Atlanta, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4078-8149","authenticated-orcid":false,"given":"Dong","family":"Dai","sequence":"additional","affiliation":[{"name":"University of Delaware, Newark, Delaware, USA"}]}],"member":"320","published-online":{"date-parts":[[2025,11,15]]},"reference":[{"key":"e_1_3_3_1_2_2","unstructured":"Mariusz Bojarski Davide Del\u00a0Testa Daniel Dworakowski Bernhard Firner Beat Flepp Prasoon Goyal Lawrence\u00a0D. Jackel Mathew Monfort Urs Muller Jiakai Zhang Xin Zhang Jake Zhao and Karol Zieba. 2016. End to End Learning for Self-Driving Cars. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1604.07316 (2016)."},{"key":"e_1_3_3_1_3_2","doi-asserted-by":"crossref","unstructured":"Han Cai Kan Ren Weinan Zhang Kleanthis Malialis Jun Wang Yong Yu and Defeng Guo. 2017. Real-Time Bidding by Reinforcement Learning in Display Advertising. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1701.02490 (2017).","DOI":"10.1145\/3018661.3018702"},{"key":"e_1_3_3_1_4_2","doi-asserted-by":"publisher","unstructured":"Pablo\u00a0Samuel Castro Subhodeep Moitra Carles Gelada Saurabh Kumar and Marc\u00a0G. Bellemare. 2018. Dopamine: A Research Framework for Deep Reinforcement Learning. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1812.06110 (2018). 10.48550\/arXiv.1812.06110","DOI":"10.48550\/arXiv.1812.06110"},{"key":"e_1_3_3_1_5_2","doi-asserted-by":"publisher","DOI":"10.1109\/MSST.2019.00-20"},{"key":"e_1_3_3_1_6_2","volume-title":"International Conference on Learning Representations","author":"Espeholt Lasse","year":"2020","unstructured":"Lasse Espeholt, Rapha\u00ebl Marinier, Piotr Sta\u0144czyk, Ke Wang, and Marcin Michalski. 2020. SEED RL: Scalable and Efficient Deep-RL with Accelerated Central Inference. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=rkgvXlrKwH"},{"key":"e_1_3_3_1_7_2","series-title":"Proceedings of Machine Learning Research","first-page":"1407","volume-title":"Proceedings of the 35th International Conference on Machine Learning","volume":"80","author":"Espeholt Lasse","year":"2018","unstructured":"Lasse Espeholt, Hubert Soyer, R\u00e9mi Munos, Karen Simonyan, Volodymyr Mnih, Tom Ward, Yotam Doron, Vlad Firoiu, Tim Harley, Iain Dunning, Shane Legg, and Koray Kavukcuoglu. 2018. IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor-Learner Architectures. In Proceedings of the 35th International Conference on Machine Learning(Proceedings of Machine Learning Research, Vol.\u00a080), Jennifer Dy and Andreas Krause (Eds.). PMLR, 1407\u20131416. https:\/\/proceedings.mlr.press\/v80\/espeholt18a.html"},{"key":"e_1_3_3_1_8_2","unstructured":"Jason Gauci Edoardo Conti Yitao Liang Kittipat Virochsiri Yuchen He Zachary Kaden Vivek Narayanan Xiaohui Ye Zhengxing Chen and Scott Fujimoto. 2019. Horizon: Facebook\u2019s Open Source Applied Reinforcement Learning Platform. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1811.00260 (2019). https:\/\/arxiv.org\/abs\/1811.00260"},{"key":"e_1_3_3_1_9_2","doi-asserted-by":"publisher","unstructured":"Matthew\u00a0W. Hoffman Bobak Shahriari John Aslanides Gabriel Barth-Maron Nikola Momchev Danila Sinopalnikov Piotr Sta\u0144czyk Sabela Ramos Anton Raichuk Damien Vincent L\u00e9onard Hussenot Robert Dadashi Gabriel Dulac-Arnold Manu Orsini Alexis Jacq Johan Ferret Nino Vieillard Seyed\u00a0Kamyar Seyed\u00a0Ghasemipour Sertan Girgin Olivier Pietquin Feryal Behbahani Tamara Norman Abbas Abdolmaleki Albin Cassirer Fan Yang Kate Baumli Sarah Henderson Abe Friesen Ruba Haroun Alex Novikov Sergio G\u00f3mez\u00a0Colmenarejo Serkan Cabi Caglar Gulcehre Tom Le\u00a0Paine Srivatsan Srinivasan Andrew Cowie Ziyu Wang Bilal Piot and Nando de Freitas. 2020. Acme: A Research Framework for Distributed Reinforcement Learning. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2006.00979 (2020). 10.48550\/arXiv.2006.00979","DOI":"10.48550\/arXiv.2006.00979"},{"key":"e_1_3_3_1_10_2","unstructured":"Shengyi Huang Rousslan Fernand\u00a0Julien Dossa Chang Ye Jeff Braga Dipam Chakraborty Kinal Mehta and Jo\u00e3o G.\u00a0M. Ara\u00fajo. 2022. CleanRL: High-Quality Single-File Implementations of Deep Reinforcement Learning Algorithms. Journal of Machine Learning Research 23 274 (2022) 1\u201318. https:\/\/jmlr.org\/papers\/v23\/21-1342.html"},{"key":"e_1_3_3_1_11_2","volume-title":"International Conference on Learning Representations","author":"Kapturowski Keiran","year":"2019","unstructured":"Keiran Kapturowski, Georg Ostrovski, John Quan, R\u00e9mi Munos, and Will Dabney. 2019. Recurrent Experience Replay in Distributed Reinforcement Learning. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=r1lyTjAqYX"},{"key":"e_1_3_3_1_12_2","doi-asserted-by":"publisher","DOI":"10.1145\/3624062.3624201"},{"key":"e_1_3_3_1_13_2","unstructured":"Hyunsung Lee. 2020. Intelligent Replication Management for HDFS Using Reinforcement Learning. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2008.08665 (2020)."},{"key":"e_1_3_3_1_14_2","unstructured":"Sergey Levine Chelsea Finn Trevor Darrell and Pieter Abbeel. 2016. End-to-End Training of Deep Visuomotor Policies. Journal of Machine Learning Research 17 39 (2016) 1\u201340."},{"key":"e_1_3_3_1_15_2","series-title":"Proceedings of Machine Learning Research","first-page":"3053","volume-title":"Proceedings of the 35th International Conference on Machine Learning","volume":"80","author":"Liang Eric","year":"2018","unstructured":"Eric Liang, Richard Liaw, Robert Nishihara, Philipp Moritz, Roy Fox, Ken Goldberg, Joseph Gonzalez, Michael Jordan, and Ion Stoica. 2018. RLlib: Abstractions for Distributed Reinforcement Learning. In Proceedings of the 35th International Conference on Machine Learning(Proceedings of Machine Learning Research, Vol.\u00a080), Jennifer Dy and Andreas Krause (Eds.). PMLR, 3053\u20133062. https:\/\/proceedings.mlr.press\/v80\/liang18b.html"},{"key":"e_1_3_3_1_16_2","doi-asserted-by":"publisher","DOI":"10.1145\/3005745.3005750"},{"key":"e_1_3_3_1_17_2","unstructured":"Azalia Mirhoseini Anna Goldie Mustafa Yazgan Joe W.\u00a0J. Jiang Ebrahim\u00a0M. Songhori Shen Wang Young-Joon Lee Eric Johnson Omkar Pathak Sungmin Bae Azade Nazi Jiwoo Pak Andy Tong Kavya Srinivasa William Hang Emre Tuncer Anand Babu Quoc\u00a0V. Le James Laudon Richard Ho Roger Carpenter and Jeff Dean. 2020. Chip Placement with Deep Reinforcement Learning. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2004.10746 (2020)."},{"key":"e_1_3_3_1_18_2","doi-asserted-by":"publisher","unstructured":"Volodymyr Mnih Koray Kavukcuoglu David Silver et\u00a0al. 2015. Human-level control through deep reinforcement learning. Nature 518 7540 (2015) 529\u2013533. 10.1038\/nature14236","DOI":"10.1038\/nature14236"},{"key":"e_1_3_3_1_19_2","unstructured":"OpenAI. 2018. Spinning Up in Deep Reinforcement Learning. https:\/\/spinningup.openai.com\/. https:\/\/spinningup.openai.com\/en\/latest\/ Accessed 2025-07-30."},{"key":"e_1_3_3_1_20_2","series-title":"Proceedings of Machine Learning Research","first-page":"7652","volume-title":"Proceedings of the 37th International Conference on Machine Learning","volume":"119","author":"Petrenko Aleksei","year":"2020","unstructured":"Aleksei Petrenko, Zhehui Huang, Tushar Kumar, Gaurav Sukhatme, and Vladlen Koltun. 2020. Sample Factory: Egocentric 3D Control from Pixels at 100000 FPS with Asynchronous Reinforcement Learning. In Proceedings of the 37th International Conference on Machine Learning(Proceedings of Machine Learning Research, Vol.\u00a0119). PMLR, 7652\u20137662. https:\/\/proceedings.mlr.press\/v119\/petrenko20a\/petrenko20a.pdf"},{"key":"e_1_3_3_1_21_2","unstructured":"Antonin Raffin Ashley Hill Adam Gleave Anssi Kanervisto Maximilian Ernestus and Noah Dormann. 2021. Stable-Baselines3: Reliable Reinforcement Learning Implementations. Journal of Machine Learning Research 22 268 (2021) 1\u20138. https:\/\/jmlr.org\/papers\/v22\/20-1364.html"},{"key":"e_1_3_3_1_22_2","unstructured":"Md\u00a0Hasanur Rashid Youbiao He Forrest\u00a0Sheng Bao and Dong Dai. 2023. Iopathtune: Adaptive online parameter tuning for parallel file system i\/o path. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2301.06622 (2023)."},{"key":"e_1_3_3_1_23_2","doi-asserted-by":"publisher","DOI":"10.1109\/CCGRID64434.2025.00037"},{"key":"e_1_3_3_1_24_2","unstructured":"John Schulman Filip Wolski Prafulla Dhariwal Alec Radford and Oleg Klimov. 2017. Proximal Policy Optimization Algorithms. arXiv:1707.06347. arXiv preprint."},{"key":"e_1_3_3_1_25_2","first-page":"380","volume-title":"Proceedings of the 2003 Linux Symposium","author":"Schwan Philip","year":"2003","unstructured":"Philip Schwan. 2003. Lustre: Building a File System for 1,000-node Clusters. In Proceedings of the 2003 Linux Symposium. Ottawa, Ontario, Canada, 380\u2013386. https:\/\/www.kernel.org\/doc\/ols\/2003\/ols2003-pages-380-386.pdf"},{"key":"e_1_3_3_1_26_2","doi-asserted-by":"publisher","unstructured":"David Silver Aja Huang Christopher\u00a0J. Maddison et\u00a0al. 2016. Mastering the game of Go with deep neural networks and tree search. Nature 529 7587 (2016) 484\u2013489. 10.1038\/nature16961","DOI":"10.1038\/nature16961"},{"key":"e_1_3_3_1_27_2","unstructured":"CloudLab Team. 2023. CloudLab: Flexible scientific infrastructure for research on the future of cloud computing. https:\/\/www.cloudlab.us\/."},{"key":"e_1_3_3_1_28_2","unstructured":"TensorFlow Authors. 2018. TF-Agents: A Library for Reinforcement Learning in TensorFlow. https:\/\/www.tensorflow.org\/agents. https:\/\/www.tensorflow.org\/agents Accessed 2025-07-30."},{"key":"e_1_3_3_1_29_2","unstructured":"Jiayi Weng Huayu Chen Dong Yan Kaichao You Alexis Duburcq Minghao Zhang Yi Su Hang Su and Jun Zhu. 2022. Tianshou: A Highly Modularized Deep Reinforcement Learning Library. Journal of Machine Learning Research 23 1127 (2022) 1\u20136. http:\/\/jmlr.org\/papers\/v23\/21-1127.html"},{"key":"e_1_3_3_1_30_2","doi-asserted-by":"publisher","DOI":"10.5555\/3433701.3433742"},{"key":"e_1_3_3_1_31_2","unstructured":"Xiangyu Zhao Liang Zhang Long Xia Zhuoye Ding Dawei Yin and Jiliang Tang. 2018. Deep Reinforcement Learning for List-wise Recommendations. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1801.00209 (2018)."},{"key":"e_1_3_3_1_32_2","unstructured":"Cheng Zhong et\u00a0al. 2017. A Deep Reinforcement Learning-Based Framework for Content Caching. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1712.08132 (2017)."}],"event":{"name":"SC Workshops '25: Workshops of the International Conference for High Performance Computing, Networking, Storage and Analysis","location":"St Louis MO USA","acronym":"SC Workshops '25","sponsor":["SIGHPC ACM Special Interest Group on High Performance Computing, Special Interest Group on High Performance Computing"]},"container-title":["Proceedings of the SC '25 Workshops of the International Conference for High Performance Computing, Networking, Storage and Analysis"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/abs\/10.1145\/3731599.3767514","content-type":"text\/html","content-version":"vor","intended-application":"syndication"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3731599.3767514","content-type":"application\/pdf","content-version":"vor","intended-application":"syndication"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3731599.3767514","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,9]],"date-time":"2026-01-09T19:33:53Z","timestamp":1767987233000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3731599.3767514"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,15]]},"references-count":31,"alternative-id":["10.1145\/3731599.3767514","10.1145\/3731599"],"URL":"https:\/\/doi.org\/10.1145\/3731599.3767514","relation":{},"subject":[],"published":{"date-parts":[[2025,11,15]]},"assertion":[{"value":"2025-11-15","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}