{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,7]],"date-time":"2025-10-07T21:10:08Z","timestamp":1759871408920,"version":"build-2065373602"},"publisher-location":"New York, NY, USA","reference-count":63,"publisher":"ACM","funder":[{"name":"European Research Council ANR-23-CE23-0035 Opt4DAC","award":["101125586"],"award-info":[{"award-number":["101125586"]}]},{"name":"Responsible and Scalable Learning for Robots Assisting Humans - Carl Zeiss Foundation"},{"name":"Cirrus UK National Tier-2 HPC Service - University of Edinburgh and EPSRC","award":["EP\/P020267\/1"],"award-info":[{"award-number":["EP\/P020267\/1"]}]},{"name":"St Andrews Global Doctoral Scholarship programme"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,7,14]]},"DOI":"10.1145\/3712256.3726395","type":"proceedings-article","created":{"date-parts":[[2025,7,8]],"date-time":"2025-07-08T12:26:58Z","timestamp":1751977618000},"page":"1162-1171","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["On the Importance of Reward Design in Reinforcement Learning-based Dynamic Algorithm Configuration: A Case Study on OneMax with (1+(\u03bb,\u03bb))-GA"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-7707-2069","authenticated-orcid":false,"given":"Tai","family":"Nguyen","sequence":"first","affiliation":[{"name":"University of St Andrews, St Andrews, United Kingdom"},{"name":"CNRS,LIP6, Sorbonne University, Paris, France"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-0749-9519","authenticated-orcid":false,"given":"Phong","family":"Le","sequence":"additional","affiliation":[{"name":"University of St Andrews, St Andrews, United Kingdom"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8703-8559","authenticated-orcid":false,"given":"Andr\u00e9","family":"Biedenkapp","sequence":"additional","affiliation":[{"name":"University of Freiburg, Freiburg, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4981-3227","authenticated-orcid":false,"given":"Carola","family":"Doerr","sequence":"additional","affiliation":[{"name":"CNRS,LIP6, Sorbonne University, Paris, France"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2693-6953","authenticated-orcid":false,"given":"Nguyen","family":"Dang","sequence":"additional","affiliation":[{"name":"University of St Andrews, St Andrews, United Kingdom"}]}],"member":"320","published-online":{"date-parts":[[2025,7,13]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.13922"},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.asoc.2008.07.001"},{"key":"e_1_3_2_2_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/2908812.2908899"},{"key":"e_1_3_2_2_4_1","volume-title":"Unifying count-based exploration and intrinsic motivation. Advances in neural information processing systems 29","author":"Bellemare Marc","year":"2016","unstructured":"Marc Bellemare, Sriram Srinivasan, Georg Ostrovski, Tom Schaul, David Saxton, and Remi Munos. 2016. Unifying count-based exploration and intrinsic motivation. Advances in neural information processing systems 29 (2016)."},{"key":"e_1_3_2_2_5_1","volume-title":"Jun Gong, Marlos C. Machado, Subhodeep Moitra, Sameera S. Ponda, and Ziyu Wang.","author":"Bellemare Marc G.","year":"2020","unstructured":"Marc G. Bellemare, Salvatore Candido, Pablo Samuel Castro, Jun Gong, Marlos C. Machado, Subhodeep Moitra, Sameera S. Ponda, and Ziyu Wang. 2020. Autonomous navigation of stratospheric balloons using reinforcement learning. Nat. 588, 7836 (2020), 77\u201382."},{"key":"e_1_3_2_2_6_1","volume-title":"A Markovian decision process. Journal of Mathematics and Mechanics","author":"Bellman R.","year":"1957","unstructured":"R. Bellman. 1957. A Markovian decision process. Journal of Mathematics and Mechanics (1957), 679\u2013684."},{"key":"e_1_3_2_2_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/3638530.3654291"},{"key":"e_1_3_2_2_8_1","volume-title":"ECAI","author":"Biedenkapp Andr\u00e9","year":"2020","unstructured":"Andr\u00e9 Biedenkapp, H Furkan Bozkurt, Theresa Eimer, Frank Hutter, and Marius Lindauer. 2020. Dynamic algorithm configuration: Foundation of a new metaalgorithmic framework. In ECAI 2020. IOS Press, 427\u2013434."},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"publisher","DOI":"10.1145\/3512290.3528846"},{"key":"e_1_3_2_2_10_1","volume-title":"Proceedings of the Third International Conference on Automated Machine Learning (AutoML 2024), Workshop Track.","author":"Bordne Philipp","year":"2024","unstructured":"Philipp Bordne, M. Asif Hasan, Eddie Bergman, Noor Awad, and Andr\u00e9 Biedenkapp. 2024. CANDID DAC: Leveraging Coupled Action Dimensions with Importance Differences in DAC. In Proceedings of the Third International Conference on Automated Machine Learning (AutoML 2024), Workshop Track."},{"key":"e_1_3_2_2_11_1","first-page":"213","article-title":"R-max-a general polynomial time algorithm for near-optimal reinforcement learning","author":"Brafman Ronen I","year":"2002","unstructured":"Ronen I Brafman and Moshe Tennenholtz. 2002. R-max-a general polynomial time algorithm for near-optimal reinforcement learning. Journal of Machine Learning Research 3, Oct (2002), 213\u2013231.","journal-title":"Journal of Machine Learning Research 3"},{"key":"e_1_3_2_2_12_1","volume-title":"Large-scale study of curiosity-driven learning. arXiv preprint arXiv:1808.04355","author":"Burda Yuri","year":"2018","unstructured":"Yuri Burda, Harri Edwards, Deepak Pathak, Amos Storkey, Trevor Darrell, and Alexei A Efros. 2018. Large-scale study of curiosity-driven learning. arXiv preprint arXiv:1808.04355 (2018)."},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"publisher","DOI":"10.1057\/jors.2013.71"},{"key":"e_1_3_2_2_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/3594805.3607127"},{"key":"e_1_3_2_2_15_1","volume-title":"Dora the explorer: Directed outreaching reinforcement action-selection. arXiv preprint arXiv:1804.04012","author":"Choshen Leshem","year":"2018","unstructured":"Leshem Choshen, Lior Fox, and Yonatan Loewenstein. 2018. Dora the explorer: Directed outreaching reinforcement action-selection. arXiv preprint arXiv:1804.04012 (2018)."},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"crossref","unstructured":"Jonas Degrave Federico Felici Jonas Buchli Michael Neunert Brendan D. Tracey Francesco Carpanese Timo Ewalds Roland Hafner Abbas Abdolmaleki Diego de Las Casas Craig Donner Leslie Fritz Cristian Galperti Andrea Huber James Keeling Maria Tsimpoukelli Jackie Kay Antoine Merle Jean-Marc Moret Seb Noury Federico Pesamosca David Pfau Olivier Sauter Cristian Sommariva Stefano Coda Basil Duval Ambrogio Fasoli Pushmeet Kohli Koray Kavukcuoglu Demis Hassabis and Martin A. Riedmiller. 2022. Magnetic control of tokamak plasmas through deep reinforcement learning. Nat. 602 7897 (2022) 414\u2013419.","DOI":"10.1038\/s41586-021-04301-9"},{"key":"e_1_3_2_2_17_1","doi-asserted-by":"publisher","DOI":"10.5555\/3635637.3662895"},{"key":"e_1_3_2_2_18_1","doi-asserted-by":"publisher","DOI":"10.1145\/2739480.2754684"},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","DOI":"10.1007\/s00453-017-0354-9"},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.tcs.2014.11.028"},{"volume-title":"Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence (IJCAI'21)","author":"Eimer T.","key":"e_1_3_2_2_21_1","unstructured":"T. Eimer, A. Biedenkapp, M. Reimer, S. Adriaensen, F. Hutter, and M. Lindauer. 2021. DACBench: A Benchmark Library for Dynamic Algorithm Configuration. In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence (IJCAI'21). ijcai.org."},{"key":"e_1_3_2_2_22_1","volume-title":"Convergence of optimistic and incremental Q-learning. Advances in neural information processing systems 14","author":"Even-Dar Eyal","year":"2001","unstructured":"Eyal Even-Dar and Yishay Mansour. 2001. Convergence of optimistic and incremental Q-learning. Advances in neural information processing systems 14 (2001)."},{"volume-title":"Proceedings of the 23rd International Conference on Autonomous Agents and Multiagent Systems","author":"Forbes Grant C.","key":"e_1_3_2_2_23_1","unstructured":"Grant C. Forbes, Nitish Gupta, Leonardo Villalobos-Arias, Colin M. Potts, Arnav Jhala, and David L. Roberts. 2024. Potential-Based Reward Shaping for Intrinsic Motivation. In Proceedings of the 23rd International Conference on Autonomous Agents and Multiagent Systems (Auckland, New Zealand) (AAMAS '24). International Foundation for Autonomous Agents and Multiagent Systems, Richland, SC, 589\u2013597."},{"key":"e_1_3_2_2_24_1","unstructured":"Github. 2025. https:\/\/github.com\/taindp98\/OneMax-DAC.git."},{"key":"e_1_3_2_2_25_1","volume-title":"Soft Actor-Critic Algorithms and Applications. CoRR abs\/1812.05905","author":"Haarnoja Tuomas","year":"2018","unstructured":"Tuomas Haarnoja, Aurick Zhou, Kristian Hartikainen, George Tucker, Sehoon Ha, Jie Tan, Vikash Kumar, Henry Zhu, Abhishek Gupta, Pieter Abbeel, and Sergey Levine. 2018. Soft Actor-Critic Algorithms and Applications. CoRR abs\/1812.05905 (2018)."},{"key":"e_1_3_2_2_26_1","volume-title":"Dotan Di Castro, and Shie Mannor","author":"Hallak Assaf","year":"2015","unstructured":"Assaf Hallak, Dotan Di Castro, and Shie Mannor. 2015. Contextual Markov Decision Processes. CoRR abs\/1502.02259 (2015). http:\/\/arxiv.org\/abs\/1502.02259"},{"key":"e_1_3_2_2_27_1","volume-title":"The CMA evolution strategy: a comparing review. Towards a new evolutionary computation: Advances in the estimation of distribution algorithms","author":"Hansen Nikolaus","year":"2006","unstructured":"Nikolaus Hansen. 2006. The CMA evolution strategy: a comparing review. Towards a new evolutionary computation: Advances in the estimation of distribution algorithms (2006), 75\u2013102."},{"key":"e_1_3_2_2_28_1","first-page":"267","article-title":"ParamILS","volume":"36","author":"Hutter Frank","year":"2009","unstructured":"Frank Hutter, Holger H. Hoos, Kevin Leyton-Brown, and Thomas St\u00fctzle. 2009. ParamILS: An Automatic Algorithm Configuration Framework. 36 (2009), 267\u2013306.","journal-title":"An Automatic Algorithm Configuration Framework."},{"key":"e_1_3_2_2_29_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-29178-4_37"},{"key":"e_1_3_2_2_30_1","volume-title":"Champion-level drone racing using deep reinforcement learning. Nat. 620, 7976","author":"Kaufmann Elia","year":"2023","unstructured":"Elia Kaufmann, Leonard Bauersfeld, Antonio Loquercio, Matthias M\u00fcller, Vladlen Koltun, and Davide Scaramuzza. 2023. Champion-level drone racing using deep reinforcement learning. Nat. 620, 7976 (2023), 982\u2013987."},{"key":"e_1_3_2_2_31_1","doi-asserted-by":"publisher","DOI":"10.5555\/2955239.2955303"},{"volume-title":"Proceedings of the 3rd International Conference on Learning Representations, (ICLR'15)","author":"Diederik","key":"e_1_3_2_2_32_1","unstructured":"Diederik P. Kingma and Jimmy Ba. 2015. Adam: A Method for Stochastic Optimization. In Proceedings of the 3rd International Conference on Learning Representations, (ICLR'15), Yoshua Bengio and Yann LeCun (Eds.)."},{"key":"e_1_3_2_2_33_1","unstructured":"Michail G Lagoudakis Michael L Littman et al. 2000. Algorithm Selection using Reinforcement Learning.. In ICML. 511\u2013518."},{"volume-title":"Theory and application of reward shaping in reinforcement learning","author":"Laud Adam Daniel","key":"e_1_3_2_2_34_1","unstructured":"Adam Daniel Laud. 2004. Theory and application of reward shaping in reinforcement learning. University of Illinois at Urbana-Champaign."},{"volume-title":"Proceedings of the 28th International Conference on Advances in Neural Information Processing Systems (NeurIPS'14)","author":"Levine S.","key":"e_1_3_2_2_35_1","unstructured":"S. Levine and P. Abbeel. 2014. Learning Neural Network Policies with Guided Policy Search under Unknown Dynamics. In Proceedings of the 28th International Conference on Advances in Neural Information Processing Systems (NeurIPS'14), Z. Ghahramani, M. Welling, C. Cortes, N. Lawrence, and K. Weinberger (Eds.). 1071\u20131079."},{"key":"e_1_3_2_2_36_1","volume-title":"Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971","author":"Lillicrap TP","year":"2015","unstructured":"TP Lillicrap. 2015. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971 (2015)."},{"key":"e_1_3_2_2_37_1","volume-title":"4th International Conference on Learning Representations, ICLR 2016, San Juan, Puerto Rico, May 2\u20134, 2016, Conference Track Proceedings, Yoshua Bengio and Yann LeCun (Eds.). http:\/\/arxiv.org\/abs\/1509","author":"Lillicrap Timothy P.","year":"2016","unstructured":"Timothy P. Lillicrap, Jonathan J. Hunt, Alexander Pritzel, Nicolas Heess, Tom Erez, Yuval Tassa, David Silver, and Daan Wierstra. 2016. Continuous control with deep reinforcement learning. In 4th International Conference on Learning Representations, ICLR 2016, San Juan, Puerto Rico, May 2\u20134, 2016, Conference Track Proceedings, Yoshua Bengio and Yann LeCun (Eds.). http:\/\/arxiv.org\/abs\/1509.02971"},{"key":"e_1_3_2_2_38_1","article-title":"SMAC3: A Versatile Bayesian Optimization Package for Hyperparameter Optimization","volume":"23","author":"Lindauer Marius","year":"2022","unstructured":"Marius Lindauer, Katharina Eggensperger, Matthias Feurer, Andr\u00e9 Biedenkapp, Difan Deng, Carolin Benjamins, Tim Ruhkopf, Ren\u00e9 Sass, and Frank Hutter. 2022. SMAC3: A Versatile Bayesian Optimization Package for Hyperparameter Optimization. J. Mach. Learn. Res. 23 (2022), 54:1\u201354:9. https:\/\/jmlr.org\/papers\/v23\/21-0888.html","journal-title":"J. Mach. Learn. Res."},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.orp.2016.09.002"},{"volume-title":"Reward Shaping for Reinforcement Learning with An Assistant Reward Agent. In Forty-first International Conference on Machine Learning.","author":"Ma Haozhe","key":"e_1_3_2_2_40_1","unstructured":"Haozhe Ma, Kuankuan Sima, Thanh Vinh Vo, Di Fu, and Tze-Yun Leong. [n. d.]. Reward Shaping for Reinforcement Learning with An Assistant Reward Agent. In Forty-first International Conference on Machine Learning."},{"key":"e_1_3_2_2_41_1","volume-title":"Playing atari with deep reinforcement learning. arXiv preprint arXiv:1312.5602","author":"Mnih Volodymyr","year":"2013","unstructured":"Volodymyr Mnih. 2013. Playing atari with deep reinforcement learning. arXiv preprint arXiv:1312.5602 (2013)."},{"key":"e_1_3_2_2_42_1","volume-title":"Proceedings of the Sixteenth International Conference on Machine Learning.","author":"Ng Andrew Y","year":"1999","unstructured":"Andrew Y Ng, Daishi Harada, and Stuart Russell. 1999. Theory and application to reward shaping. In Proceedings of the Sixteenth International Conference on Machine Learning."},{"key":"e_1_3_2_2_43_1","volume-title":"Deep exploration via bootstrapped DQN. Advances in neural information processing systems 29","author":"Osband Ian","year":"2016","unstructured":"Ian Osband, Charles Blundell, Alexander Pritzel, and Benjamin Van Roy. 2016. Deep exploration via bootstrapped DQN. Advances in neural information processing systems 29 (2016)."},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.13596"},{"key":"e_1_3_2_2_45_1","doi-asserted-by":"publisher","DOI":"10.5555\/2955491.2955607"},{"key":"e_1_3_2_2_46_1","first-page":"463","article-title":"Learning to Drive a Bicycle Using Reinforcement Learning and Shaping","volume":"98","author":"Randl\u00f8v Jette","year":"1998","unstructured":"Jette Randl\u00f8v and Preben Alstr\u00f8m. 1998. Learning to Drive a Bicycle Using Reinforcement Learning and Shaping.. In ICML, Vol. 98. 463\u2013471.","journal-title":"ICML"},{"key":"e_1_3_2_2_47_1","doi-asserted-by":"publisher","DOI":"10.1109\/SITIS.2010.22"},{"key":"e_1_3_2_2_48_1","doi-asserted-by":"publisher","DOI":"10.1613\/jair.1.13676"},{"key":"e_1_3_2_2_49_1","volume-title":"PPSN 2020, Leiden, The Netherlands, September 5\u20139, 2020, Proceedings, Part I 16","author":"Shala Gresa","year":"2020","unstructured":"Gresa Shala, Andr\u00e9 Biedenkapp, Noor Awad, Steven Adriaensen, Marius Lindauer, and Frank Hutter. 2020. Learning step-size adaptation in CMA-ES. In Parallel Problem Solving from Nature-PPSN XVI: 16th International Conference, PPSN 2020, Leiden, The Netherlands, September 5\u20139, 2020, Proceedings, Part I 16. Springer, 691\u2013706."},{"key":"e_1_3_2_2_50_1","doi-asserted-by":"publisher","DOI":"10.1145\/3321707.3321813"},{"key":"e_1_3_2_2_51_1","doi-asserted-by":"crossref","unstructured":"David Silver Julian Schrittwieser Karen Simonyan Ioannis Antonoglou Aja Huang Arthur Guez Thomas Hubert Lucas Baker Matthew Lai Adrian Bolton et al. 2017. Mastering the game of go without human knowledge. nature 550 7676 (2017) 354\u2013359.","DOI":"10.1038\/nature24270"},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICTAI.2004.28"},{"key":"e_1_3_2_2_53_1","volume-title":"Reward scale robustness for proximal policy optimization via DreamerV3 tricks. Advances in Neural Information Processing Systems 36","author":"Sullivan Ryan","year":"2024","unstructured":"Ryan Sullivan, Akarsh Kumar, Shengyi Huang, John Dickerson, and Joseph Suarez. 2024. Reward scale robustness for proximal policy optimization via DreamerV3 tricks. Advances in Neural Information Processing Systems 36 (2024)."},{"key":"e_1_3_2_2_54_1","volume-title":"Exploit reward shifting in value-based deep-rl: Optimistic curiosity-based exploration and conservative exploitation via linear reward shaping. Advances in neural information processing systems 35","author":"Sun Hao","year":"2022","unstructured":"Hao Sun, Lei Han, Rui Yang, Xiaoteng Ma, Jian Guo, and Bolei Zhou. 2022. Exploit reward shifting in value-based deep-rl: Optimistic curiosity-based exploration and conservative exploitation via linear reward shaping. Advances in neural information processing systems 35 (2022), 37719\u201337734."},{"key":"e_1_3_2_2_55_1","doi-asserted-by":"publisher","DOI":"10.1109\/TNN.1998.712192"},{"key":"e_1_3_2_2_56_1","volume-title":"Learning to predict by the methods of temporal differences. Machine learning 3","author":"Sutton Richard S","year":"1988","unstructured":"Richard S Sutton. 1988. Learning to predict by the methods of temporal differences. Machine learning 3 (1988), 9\u201344."},{"key":"e_1_3_2_2_57_1","doi-asserted-by":"publisher","DOI":"10.1145\/1390156.1390288"},{"key":"e_1_3_2_2_58_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v30i1.10295"},{"key":"e_1_3_2_2_59_1","volume-title":"Learning values across many orders of magnitude. Advances in neural information processing systems 29","author":"Van Hasselt Hado P","year":"2016","unstructured":"Hado P Van Hasselt, Arthur Guez, Matteo Hessel, Volodymyr Mnih, and David Silver. 2016. Learning values across many orders of magnitude. Advances in neural information processing systems 29 (2016)."},{"key":"e_1_3_2_2_60_1","doi-asserted-by":"publisher","DOI":"10.1145\/3321707.3321803"},{"key":"e_1_3_2_2_61_1","volume-title":"Machine learning 8","author":"Watkins Christopher JCH","year":"1992","unstructured":"Christopher JCH Watkins and Peter Dayan. 1992. Q-learning. Machine learning 8 (1992), 279\u2013292."},{"key":"e_1_3_2_2_62_1","doi-asserted-by":"crossref","unstructured":"Peter R. Wurman Samuel Barrett Kenta Kawamoto James MacGlashan Kaushik Subramanian Thomas J. Walsh Roberto Capobianco Alisa Devlic Franziska Eckert Florian Fuchs Leilani Gilpin Piyush Khandelwal Varun Raj Kompella HaoChih Lin Patrick MacAlpine Declan Oller Takuma Seno Craig Sherstan Michael D. Thomure Houmehr Aghabozorgi Leon Barrett Rory Douglas Dion Whitehead Peter D\u00fcrr Peter Stone Michael Spranger and Hiroaki Kitano. 2022. Outracing champion Gran Turismo drivers with deep reinforcement learning. Nat. 602 7896 (2022) 223\u2013228.","DOI":"10.1038\/s41586-021-04357-7"},{"key":"e_1_3_2_2_63_1","volume-title":"Multi-agent Dynamic Algorithm Configuration. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems, NeurIPS'22","author":"Xue Ke","year":"2022","unstructured":"Ke Xue, Jiacheng Xu, Lei Yuan, Miqing Li, Chao Qian, Zongzhang Zhang, and Yang Yu. 2022. Multi-agent Dynamic Algorithm Configuration. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems, NeurIPS'22, Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (Eds.)."}],"event":{"name":"GECCO '25: Genetic and Evolutionary Computation Conference","sponsor":["SIGEVO ACM Special Interest Group on Genetic and Evolutionary Computation"],"location":"NH Malaga Hotel Malaga Spain","acronym":"GECCO '25"},"container-title":["Proceedings of the Genetic and Evolutionary Computation Conference"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3712256.3726395","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,7]],"date-time":"2025-10-07T20:38:04Z","timestamp":1759869484000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3712256.3726395"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,13]]},"references-count":63,"alternative-id":["10.1145\/3712256.3726395","10.1145\/3712256"],"URL":"https:\/\/doi.org\/10.1145\/3712256.3726395","relation":{},"subject":[],"published":{"date-parts":[[2025,7,13]]},"assertion":[{"value":"2025-07-13","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}