{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,7]],"date-time":"2026-05-07T15:49:39Z","timestamp":1778168979572,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":26,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/501100003816","name":"Huawei Technologies","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003816","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100015804","name":"China Computer Federation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100015804","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"NSF (National Science Foundation)","doi-asserted-by":"publisher","award":["2034508"],"award-info":[{"award-number":["2034508"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1145\/3650105.3652295","type":"proceedings-article","created":{"date-parts":[[2024,6,12]],"date-time":"2024-06-12T16:01:35Z","timestamp":1718208095000},"page":"103-107","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":22,"title":["On Evaluating the Efficiency of Source Code Generated by LLMs"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4358-5451","authenticated-orcid":false,"given":"Changan","family":"Niu","sequence":"first","affiliation":[{"name":"Software Institute, Nanjing University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6001-1372","authenticated-orcid":false,"given":"Ting","family":"Zhang","sequence":"additional","affiliation":[{"name":"Singapore Management University, Singapore, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9270-5072","authenticated-orcid":false,"given":"Chuanyi","family":"Li","sequence":"additional","affiliation":[{"name":"Software Institute, Nanjing University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-1102-9584","authenticated-orcid":false,"given":"Bin","family":"Luo","sequence":"additional","affiliation":[{"name":"Software Institute, Nanjing University, Nanjing, Texas, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8237-429X","authenticated-orcid":false,"given":"Vincent","family":"Ng","sequence":"additional","affiliation":[{"name":"Human Language Technology Research Institute, University of Texas at Dallas, Richardson, Texas, USA"}]}],"member":"320","published-online":{"date-parts":[[2024,6,12]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"[n. d.]. https:\/\/github.com\/NougatCA\/EfficiencyEval."},{"key":"e_1_3_2_1_2_1","unstructured":"[n. d.]. Codeforces. https:\/\/codeforces.com\/."},{"key":"e_1_3_2_1_3_1","unstructured":"[n. d.]. LeetCode. https:\/\/leetcode.com\/."},{"key":"e_1_3_2_1_4_1","volume-title":"Validation of the gem5 simulator for x86 architectures. In 2019 IEEE\/ACM Performance Modeling, Benchmarking and Simulation of High Performance Computer Systems (PMBS)","author":"Akram Ayaz","unstructured":"Ayaz Akram and Lina Sawalha. 2019. Validation of the gem5 simulator for x86 architectures. In 2019 IEEE\/ACM Performance Modeling, Benchmarking and Simulation of High Performance Computer Systems (PMBS). IEEE, 53--58."},{"key":"e_1_3_2_1_5_1","unstructured":"Anthropic. [n. d.]. Introducing Claude. https:\/\/www.anthropic.com\/index\/introducing-claude."},{"key":"e_1_3_2_1_6_1","unstructured":"Jacob Austin Augustus Odena Maxwell Nye Maarten Bosma Henryk Michalewski David Dohan Ellen Jiang Carrie Cai Michael Terry Quoc Le and Charles Sutton. 2021. Program Synthesis with Large Language Models. arXiv:2108.07732 [cs.PL]"},{"key":"e_1_3_2_1_7_1","unstructured":"Xiao Bi Deli Chen Guanting Chen Shanhuang Chen Damai Dai Chengqi Deng Honghui Ding Kai Dong Qiushi Du Zhe Fu et al. 2024. DeepSeek LLM: Scaling Open-Source Language Models with Longtermism. arXiv preprint arXiv:2401.02954 (2024)."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"crossref","unstructured":"Nathan Binkert Bradford Beckmann Gabriel Black Steven K Reinhardt Ali Saidi Arkaprava Basu Joel Hestness Derek R Hower Tushar Krishna Somayeh Sardashti et al. 2011. The gem5 simulator. ACM SIGARCH computer architecture news 39 2 (2011) 1--7.","DOI":"10.1145\/2024716.2024718"},{"key":"e_1_3_2_1_9_1","volume-title":"CodeT: Code Generation with Generated Tests. In The Eleventh International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=ktrw68Cmu9c","author":"Chen Bei","year":"2023","unstructured":"Bei Chen, Fengji Zhang, Anh Nguyen, Daoguang Zan, Zeqi Lin, Jian-Guang Lou, and Weizhu Chen. 2023. CodeT: Code Generation with Generated Tests. In The Eleventh International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=ktrw68Cmu9c"},{"key":"e_1_3_2_1_10_1","volume-title":"Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al.","author":"Chen Mark","year":"2021","unstructured":"Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, et al. 2021. Evaluating Large Language Models Trained on Code. arXiv:2107.03374 [cs.LG]"},{"key":"e_1_3_2_1_11_1","unstructured":"DeepSeek. 2023. DeepSeek Coder: Let the Code Write Itself. https:\/\/github.com\/deepseek-ai\/DeepSeek-Coder."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/3540250.3549096"},{"key":"e_1_3_2_1_13_1","unstructured":"GitHub. [n. d.]. GitHub Copilot. https:\/\/github.com\/features\/copilot."},{"key":"e_1_3_2_1_14_1","unstructured":"Google. [n. d.]. Bard. https:\/\/bard.google.com\/."},{"key":"e_1_3_2_1_15_1","volume-title":"ANPL: Towards Natural Programming with Interactive Decomposition. In Thirty-seventh Conference on Neural Information Processing Systems. https:\/\/openreview.net\/forum?id=RTRS3ZTsSj","author":"Huang Di","year":"2023","unstructured":"Di Huang, Ziyuan Nan, Xing Hu, Pengwei Jin, Shaohui Peng, Yuanbo Wen, Rui Zhang, Zidong Du, Qi Guo, Yewen Pu, and Yunji Chen. 2023. ANPL: Towards Natural Programming with Interactive Decomposition. In Thirty-seventh Conference on Neural Information Processing Systems. https:\/\/openreview.net\/forum?id=RTRS3ZTsSj"},{"key":"e_1_3_2_1_16_1","unstructured":"JetBrains. [n. d.]. JetBrains AI. https:\/\/www.jetbrains.com\/ai\/."},{"key":"e_1_3_2_1_17_1","volume-title":"Rigorous Evaluation of Large Language Models for Code Generation. In Thirty-seventh Conference on Neural Information Processing Systems.","author":"Liu Jiawei","year":"2023","unstructured":"Jiawei Liu, Chunqiu Steven Xia, Yuyao Wang, and Lingming Zhang. 2023. Is Your Code Generated by ChatGPT Really Correct? Rigorous Evaluation of Large Language Models for Code Generation. In Thirty-seventh Conference on Neural Information Processing Systems."},{"key":"e_1_3_2_1_18_1","unstructured":"Ziyang Luo Can Xu Pu Zhao Qingfeng Sun Xiubo Geng Wenxiang Hu Chongyang Tao Jing Ma Qingwei Lin and Daxin Jiang. 2023. WizardCoder: Empowering Code Large Language Models with Evol-Instruct. arXiv:2306.08568 [cs.CL]"},{"key":"e_1_3_2_1_19_1","volume-title":"Learning Performance-Improving Code Edits. In International Conference on Learning Representations.","author":"Madaan Aman","year":"2024","unstructured":"Aman Madaan, Alexander Shypula, Uri Alon, Milad Hashemi, Parthasarathy Ranganathan, Yiming Yang, Graham Neubig, and Amir Yazdanbakhsh. 2024. Learning Performance-Improving Code Edits. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_20_1","volume-title":"Self-refine: Iterative refinement with self-feedback. arXiv preprint arXiv:2303.17651","author":"Madaan Aman","year":"2023","unstructured":"Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, et al. 2023. Self-refine: Iterative refinement with self-feedback. arXiv preprint arXiv:2303.17651 (2023)."},{"key":"e_1_3_2_1_21_1","first-page":"i","volume":"202","unstructured":"Microsoft. 2023. Phi-2: The surprising power of small language models. https:\/\/www.microsoft.com\/en-us\/research\/blog\/phi-2-the-surprising-power-of-small-language-models\/.","journal-title":"Microsoft."},{"key":"e_1_3_2_1_23_1","volume-title":"Yossi Adi, Jingyu Liu, Tal Remez, J\u00e9r\u00e9my Rapin, et al.","author":"Roziere Baptiste","year":"2023","unstructured":"Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, J\u00e9r\u00e9my Rapin, et al. 2023. Code Llama: Open Foundation Models for Code. arXiv:2308.12950 [cs.CL]"},{"key":"e_1_3_2_1_24_1","volume-title":"A Lightweight Framework for High-Quality Code Generation. arXiv preprint arXiv:2307.08220","author":"Siddiq Mohammed Latif","year":"2023","unstructured":"Mohammed Latif Siddiq, Beatrice Casey, and Joanna Santos. 2023. A Lightweight Framework for High-Quality Code Generation. arXiv preprint arXiv:2307.08220 (2023)."},{"key":"e_1_3_2_1_25_1","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)."},{"key":"e_1_3_2_1_26_1","volume-title":"Evaluating the code quality of ai-assisted code generation tools: An empirical study on github copilot, amazon codewhisperer, and chatgpt. arXiv preprint arXiv:2304.10778","author":"Yeti\u015ftiren Burak","year":"2023","unstructured":"Burak Yeti\u015ftiren, I\u015f\u0131k \u00d6zsoy, Miray Ayerdem, and Eray T\u00fcz\u00fcn. 2023. Evaluating the code quality of ai-assisted code generation tools: An empirical study on github copilot, amazon codewhisperer, and chatgpt. arXiv preprint arXiv:2304.10778 (2023)."},{"key":"e_1_3_2_1_27_1","volume-title":"Thirty-seventh Conference on Neural Information Processing Systems. https:\/\/openreview.net\/forum?id=qd9qcbVAwQ","author":"Zelikman Eric","year":"2023","unstructured":"Eric Zelikman, Qian Huang, Gabriel Poesia, Noah Goodman, and Nick Haber. 2023. Parsel: Algorithmic Reasoning with Language Models by Composing Decompositions. In Thirty-seventh Conference on Neural Information Processing Systems. https:\/\/openreview.net\/forum?id=qd9qcbVAwQ"}],"event":{"name":"FORGE '24: 2024 IEEE\/ACM First International Conference on AI Foundation Models and Software Engineering","location":"Lisbon Portugal","acronym":"FORGE '24","sponsor":["SIGSOFT ACM Special Interest Group on Software Engineering"]},"container-title":["Proceedings of the 2024 IEEE\/ACM First International Conference on AI Foundation Models and Software Engineering"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3650105.3652295","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3650105.3652295","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T00:03:43Z","timestamp":1750291423000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3650105.3652295"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":26,"alternative-id":["10.1145\/3650105.3652295","10.1145\/3650105"],"URL":"https:\/\/doi.org\/10.1145\/3650105.3652295","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]},"assertion":[{"value":"2024-06-12","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}