{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,8]],"date-time":"2025-10-08T00:20:57Z","timestamp":1759882857318,"version":"build-2065373602"},"publisher-location":"New York, NY, USA","reference-count":25,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,7,14]]},"DOI":"10.1145\/3712255.3726633","type":"proceedings-article","created":{"date-parts":[[2025,8,11]],"date-time":"2025-08-11T15:15:44Z","timestamp":1754925344000},"page":"527-530","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["An LLM-Based Genetic Algorithm for Prompt Engineering"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-3766-3369","authenticated-orcid":false,"given":"Leandro Augusto","family":"Loss","sequence":"first","affiliation":[{"name":"AI R&amp;D, AML RightSource, Philadelphia, PA, USA"},{"name":"ESSCA School of Management, Philadelphia, PA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-7573-7932","authenticated-orcid":false,"given":"Pratikkumar","family":"Dhuvad","sequence":"additional","affiliation":[{"name":"AI R&amp;D, AML RightSource, Philadelphia, PA, USA"}]}],"member":"320","published-online":{"date-parts":[[2025,8,11]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"https:\/\/ai.meta.com\/","author":"Meta AI.","year":"2024","unstructured":"Meta AI. 2024. Llama 3.2. (2024). https:\/\/ai.meta.com\/."},{"unstructured":"Mistral AI. 2024. Mistral NeMo. https:\/\/mistral.ai\/.","key":"e_1_3_2_1_2_1"},{"unstructured":"S. R. Anandhu. 2023. Topic Classification HuggingFace Dataset. https:\/\/huggingface.co\/datasets\/anandhu-sct\/topic_classification","key":"e_1_3_2_1_3_1"},{"key":"e_1_3_2_1_4_1","volume-title":"Neural Information Processing Systems 36","author":"Chen Angelica","year":"2024","unstructured":"Angelica Chen, David Dohan, and David So. 2024. EvoPrompting: language models for code-level neural architecture search. Adv. in Neural Information Processing Systems 36 (2024)."},{"key":"e_1_3_2_1_5_1","volume-title":"The Twelfth Int. Conf. on Learning Representations.","author":"Cheng Daixuan","year":"2023","unstructured":"Daixuan Cheng, Shaohan Huang, and Furu Wei. 2023. Adapting large language models via reading comprehension. The Twelfth Int. Conf. on Learning Representations."},{"key":"e_1_3_2_1_6_1","volume-title":"Rlprompt: Optimizing discrete text prompts with reinforcement learning.","author":"Deng Mingkai","year":"2023","unstructured":"Mingkai Deng, Jianyu Wang, Cheng-Ping Hsieh, Yihan Wang, Han Guo, Tianmin Shu, Meng Song, Eric P Xing, and Zhiting Hu. 2023. Rlprompt: Optimizing discrete text prompts with reinforcement learning. (2023), 3369\u20133391."},{"key":"e_1_3_2_1_7_1","volume-title":"Proc. of the Int. Conf. on Learning Representations (ICLR).","author":"Guo Qingyan","year":"2024","unstructured":"Qingyan Guo, Rui Wang, Junliang Guo, Bei Li, Kaitao Song, Xu Tan, Guoqing Liu, Jiang Bian, and Yujiu Yang. 2024. Connecting large language models with evolutionary algorithms yields powerful prompt optimizers. Proc. of the Int. Conf. on Learning Representations (ICLR)."},{"key":"e_1_3_2_1_8_1","volume-title":"Learning to program with natural language. arXiv preprint arXiv:2304.10464","author":"Guo Yiduo","year":"2023","unstructured":"Yiduo Guo, Yaobo Liang, Chenfei Wu, Wenshan Wu, Dongyan Zhao, and Nan Duan. 2023. Learning to program with natural language. arXiv preprint arXiv:2304.10464 (2023)."},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_9_1","DOI":"10.26615\/978-954-452-056-4_053"},{"key":"e_1_3_2_1_10_1","volume-title":"Proc. of the 29th Inte. Conf. on Computational Linguistics, 3251\u20133262","author":"Hou Yutai","year":"2022","unstructured":"Yutai Hou, Hongyuan Dong, Xinghao Wang, Bohan Li, and Wanxiang Che. 2022. MetaPrompting: Learning to Learn Better Prompts. Proc. of the 29th Inte. Conf. on Computational Linguistics, 3251\u20133262."},{"doi-asserted-by":"crossref","unstructured":"Cho-Jui Hsieh Si Si Felix X Yu and Inderjit S Dhillon. 2024. Automatic Engineering of Long Prompts. Findings of the Assoc. for Computational Linguistics 10672\u201310685.","key":"e_1_3_2_1_11_1","DOI":"10.18653\/v1\/2024.findings-acl.634"},{"doi-asserted-by":"crossref","unstructured":"Joel Lehman Jonathan Gordon Shawn Jain Kamal Ndousse Cathy Yeh and Kenneth O Stanley. 2023. Evolution through large models. Handbook of Evolutionary Machine Learning 331\u2013366.","key":"e_1_3_2_1_12_1","DOI":"10.1007\/978-981-99-3814-8_11"},{"unstructured":"Jiachang Liu Dinghan Shen Yizhe Zhang Bill Dolan Lawrence Carin and Weizhu Chen. 2022. What Makes Good In-Context Examples for GPT-3? Assoc. for Computational Linguistics 100\u2013114.","key":"e_1_3_2_1_13_1"},{"key":"e_1_3_2_1_14_1","volume-title":"IEEE Congress on Evolutionary Computation","author":"Leandro","year":"2025","unstructured":"Leandro A. Loss and Pratikkumar Dhuvad. 2025. From Manual to Automated Prompt Engineering: Evolving LLM Prompts with Genetic Algorithms. IEEE Congress on Evolutionary Computation 2025."},{"key":"e_1_3_2_1_15_1","volume-title":"Loss and Pratikkumar Dhuvad","author":"Leandro","year":"2025","unstructured":"Leandro A. Loss and Pratikkumar Dhuvad. 2025. An LLM-Based Genetic Algorithm for Prompt Engineering - Research Code and Datasets. https:\/\/github.com\/leloss\/gallm"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_16_1","DOI":"10.1145\/3694791"},{"key":"e_1_3_2_1_17_1","volume-title":"https:\/\/openai.com\/","author":"AI.","year":"2024","unstructured":"OpenAI. 2024. Hello GPT-4o. (2024). https:\/\/openai.com\/."},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_18_1","DOI":"10.18653\/v1\/2023.emnlp-main.494"},{"volume-title":"ScienceQA: a novel resource for question answering on scholarly articles","author":"Saikh Tanik","unstructured":"Tanik Saikh, Tirthankar Ghosal, Amish Mittal, Asif Ekbal, and Pushpak Bhattacharyya. 2022. ScienceQA: a novel resource for question answering on scholarly articles, Vol. 23. Int. J. on Digital Libraries, 289\u2013301. Issue 3.","key":"e_1_3_2_1_19_1"},{"key":"e_1_3_2_1_20_1","volume-title":"Proceedings of the First Conference on Language Modeling, 10994\u201311005","author":"Shi Weijia","year":"2023","unstructured":"Weijia Shi, Xiaochuang Han, Hila Gonen, Ari Holtzman, Yulia Tsvetkov, and Luke Zettlemoyer. 2023. Toward Human Readable Prompt Tuning: Kubrick's The Shining is a Good Movie, and a Good Prompt Too? Assoc. for Computational Linguistics, Proceedings of the First Conference on Language Modeling, 10994\u201311005."},{"key":"e_1_3_2_1_21_1","volume-title":"Eric Wallace, and Sameer Singh.","author":"Shin Taylor","year":"2020","unstructured":"Taylor Shin, Yasaman Razeghi, Robert L Logan IV, Eric Wallace, and Sameer Singh. 2020. Autoprompt: Eliciting knowledge from language models with automatically generated prompts. (2020), 4222\u20134235."},{"volume-title":"Genetic Algorithm for Prompt Engineering with Novel Genetic Operators. 15th Int. C. on Adv. Applied Informatics Winter, 209\u2013214","author":"Tanaka Hiroto","unstructured":"Hiroto Tanaka, Naoki Mori, and Makoto Okada. 2023. Genetic Algorithm for Prompt Engineering with Novel Genetic Operators. 15th Int. C. on Adv. Applied Informatics Winter, 209\u2013214.","key":"e_1_3_2_1_22_1"},{"key":"e_1_3_2_1_23_1","volume-title":"The Twelfth Int. Conf. on Learning Representations.","author":"Yang C","year":"2024","unstructured":"C Yang, X Wang, Y Lu, H Liu, QV Le, D Zhou, and X Chen. 2024. Large Language Models as Optimizers. The Twelfth Int. Conf. on Learning Representations."},{"key":"e_1_3_2_1_24_1","first-page":"355","article-title":"Prompt Engineering a Prompt Engineer","volume":"2024","author":"Ye Qinyuan","year":"2024","unstructured":"Qinyuan Ye, Maxamed Axmed, Reid Pryzant, and Fereshte Khani. 2024. Prompt Engineering a Prompt Engineer. Findings of the Assoc. for Computational Linguistics: ACL 2024, 355\u2013385.","journal-title":"Findings of the Assoc. for Computational Linguistics: ACL"},{"volume-title":"The Eleventh Int. Conf. on Learning Representations.","author":"Zhang Tianjun","unstructured":"Tianjun Zhang, Xuezhi Wang, Denny Zhou, Dale Schuurmans, and Joseph E. Gonzalez. 2023. TEMPERA: Test-Time Prompt Editing via Reinforcement Learning. The Eleventh Int. Conf. on Learning Representations.","key":"e_1_3_2_1_25_1"}],"event":{"sponsor":["SIGEVO ACM Special Interest Group on Genetic and Evolutionary Computation"],"acronym":"GECCO '25 Companion","name":"GECCO '25 Companion: Genetic and Evolutionary Computation Conference Companion","location":"NH Malaga Hotel Malaga Spain"},"container-title":["Proceedings of the Genetic and Evolutionary Computation Conference Companion"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3712255.3726633","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,7]],"date-time":"2025-10-07T11:58:51Z","timestamp":1759838331000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3712255.3726633"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,14]]},"references-count":25,"alternative-id":["10.1145\/3712255.3726633","10.1145\/3712255"],"URL":"https:\/\/doi.org\/10.1145\/3712255.3726633","relation":{},"subject":[],"published":{"date-parts":[[2025,7,14]]},"assertion":[{"value":"2025-08-11","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}