{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,10]],"date-time":"2026-04-10T11:56:24Z","timestamp":1775822184891,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":41,"publisher":"ACM","license":[{"start":{"date-parts":[[2023,11,30]],"date-time":"2023-11-30T00:00:00Z","timestamp":1701302400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/100000001","name":"NSF (National Science Foundation)","doi-asserted-by":"publisher","award":["CCF-2100035,CCF-2211428"],"award-info":[{"award-number":["CCF-2100035,CCF-2211428"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2023,11,30]]},"DOI":"10.1145\/3611643.3613090","type":"proceedings-article","created":{"date-parts":[[2023,11,30]],"date-time":"2023-11-30T23:14:38Z","timestamp":1701386078000},"page":"2152-2156","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":9,"title":["A Language Model of Java Methods with Train\/Test Deduplication"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1803-560X","authenticated-orcid":false,"given":"Chia-Yi","family":"Su","sequence":"first","affiliation":[{"name":"University of Notre Dame, Notre Dame, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7475-7899","authenticated-orcid":false,"given":"Aakash","family":"Bansal","sequence":"additional","affiliation":[{"name":"University of Notre Dame, Notre Dame, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2652-5107","authenticated-orcid":false,"given":"Vijayanta","family":"Jain","sequence":"additional","affiliation":[{"name":"University of Maine, Orono, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7972-667X","authenticated-orcid":false,"given":"Sepideh","family":"Ghanavati","sequence":"additional","affiliation":[{"name":"University of Maine, Orono, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-0887-1083","authenticated-orcid":false,"given":"Collin","family":"McMillan","sequence":"additional","affiliation":[{"name":"University of Notre Dame, Notre Dame, USA"}]}],"member":"320","published-online":{"date-parts":[[2023,11,30]]},"reference":[{"key":"e_1_3_2_2_1_1","doi-asserted-by":"crossref","unstructured":"Toufique Ahmed and Premkumar Devanbu. 2022. Few-shot training LLMs for project-specific code-summarization. arXiv preprint arXiv:2207.04237.","DOI":"10.1145\/3551349.3559555"},{"key":"e_1_3_2_2_2_1","volume-title":"Project-Level Encoding for Neural Source Code Summarization of Subroutines. In 29th ACM\/IEEE International Conference on Program Comprehension (ICPC\u201921)","author":"Bansal Aakash","year":"2021","unstructured":"Aakash Bansal, Sakib Haque, and Collin McMillan. 2021. Project-Level Encoding for Neural Source Code Summarization of Subroutines. In 29th ACM\/IEEE International Conference on Program Comprehension (ICPC\u201921)."},{"key":"e_1_3_2_2_3_1","volume-title":"Proceedings of ACM Human-Computer Interaction","volume":"7","author":"Bansal Aakash","year":"2023","unstructured":"Aakash Bansal, Bonita Sharif, and Collin McMillan. 2023. Towards modeling human attention from eye movements for neutral source code summarization. Proceedings of ACM Human-Computer Interaction, Vol. 7."},{"key":"e_1_3_2_2_4_1","unstructured":"Stella Biderman Kieran Bicheno and Leo Gao. 2022. Datasheet for the pile. arXiv preprint arXiv:2201.07311."},{"key":"e_1_3_2_2_5_1","volume-title":"Shivanshu Purohit, USVSN Sai Prashanth, and Edward Raff.","author":"Biderman Stella","year":"2023","unstructured":"Stella Biderman, Hailey Schoelkopf, Quentin Anthony, Herbie Bradley, Kyle O\u2019Brien, Eric Hallahan, Mohammad Aflah Khan, Shivanshu Purohit, USVSN Sai Prashanth, and Edward Raff. 2023. Pythia: A suite for analyzing large language models across training and scaling. arXiv preprint arXiv:2304.01373."},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"crossref","unstructured":"Sid Black Stella Biderman Eric Hallahan Quentin Anthony Leo Gao Laurence Golding Horace He Connor Leahy Kyle McDonell and Jason Phang. 2022. Gpt-neox-20b: An open-source autoregressive language model. arXiv preprint arXiv:2204.06745.","DOI":"10.18653\/v1\/2022.bigscience-1.9"},{"key":"e_1_3_2_2_7_1","volume-title":"Petals: Collaborative inference and fine-tuning of large models. arXiv preprint arXiv:2209.01188.","author":"Borzunov Alexander","year":"2022","unstructured":"Alexander Borzunov, Dmitry Baranchuk, Tim Dettmers, Max Ryabinin, Younes Belkada, Artem Chumachenko, Pavel Samygin, and Colin Raffel. 2022. Petals: Collaborative inference and fine-tuning of large models. arXiv preprint arXiv:2209.01188."},{"key":"e_1_3_2_2_8_1","volume-title":"Language models are few-shot learners. Advances in neural information processing systems, 33","author":"Brown Tom","year":"2020","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, and Amanda Askell. 2020. Language models are few-shot learners. Advances in neural information processing systems, 33 (2020), 1877\u20131901."},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"crossref","unstructured":"Guanzheng Chen Fangyu Liu Zaiqiao Meng and Shangsong Liang. 2022. Revisiting parameter-efficient tuning: Are we really there yet? arXiv preprint arXiv:2202.07962.","DOI":"10.18653\/v1\/2022.emnlp-main.168"},{"key":"e_1_3_2_2_10_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805."},{"key":"e_1_3_2_2_11_1","first-page":"23","article-title":"A new algorithm for data compression","volume":"12","author":"Gage Philip","year":"1994","unstructured":"Philip Gage. 1994. A new algorithm for data compression. C Users Journal, 12, 2 (1994), 23\u201338.","journal-title":"C Users Journal"},{"key":"e_1_3_2_2_12_1","unstructured":"Github. 2022. Github Co-Pilot. https:\/\/github.com\/features\/copilot"},{"key":"e_1_3_2_2_13_1","unstructured":"Daniel Grittner. 2023. NanoGPT-LoRA. https:\/\/github.com\/danielgrittner\/nanoGPT-LoRA"},{"key":"e_1_3_2_2_14_1","doi-asserted-by":"publisher","DOI":"10.1145\/1810295.1810335"},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"publisher","DOI":"10.1145\/3524610.3527909"},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3501261"},{"key":"e_1_3_2_2_17_1","volume-title":"Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685.","author":"Hu Edward J","year":"2021","unstructured":"Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685."},{"key":"e_1_3_2_2_18_1","unstructured":"Stack Exchange Inc.. 2022. Stack Exchange Data Dumps. https:\/\/archive.org\/details\/stackexchange"},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"crossref","unstructured":"Sajed Jalil Suzzana Rafi Thomas D LaToza Kevin Moran and Wing Lam. 2023. Chatgpt and software testing education: Promises & perils. arXiv preprint arXiv:2302.03287.","DOI":"10.1109\/ICSTW58534.2023.00078"},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"publisher","DOI":"10.1109\/MSR.2013.6624047"},{"key":"e_1_3_2_2_21_1","doi-asserted-by":"crossref","unstructured":"Kevin Jesse Toufique Ahmed Premkumar T Devanbu and Emily Morgan. 2023. Large Language Models and Simple Stupid Bugs. arXiv preprint arXiv:2303.11455.","DOI":"10.1109\/MSR59073.2023.00082"},{"key":"e_1_3_2_2_22_1","unstructured":"Andrej Karpathy. 2022. NanoGPT. https:\/\/github.com\/karpathy\/nanoGPT"},{"key":"e_1_3_2_2_23_1","volume-title":"Jia Li, Chenghao Mou, Carlos Mu\u00f1oz Ferrandis, Yacine Jernite, Margaret Mitchell, Sean Hughes, and Thomas Wolf.","author":"Kocetkov Denis","year":"2022","unstructured":"Denis Kocetkov, Raymond Li, Loubna Ben Allal, Jia Li, Chenghao Mou, Carlos Mu\u00f1oz Ferrandis, Yacine Jernite, Margaret Mitchell, Sean Hughes, and Thomas Wolf. 2022. The Stack: 3 TB of permissively licensed source code. arXiv preprint arXiv:2211.15533."},{"key":"e_1_3_2_2_24_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-1394"},{"key":"e_1_3_2_2_25_1","volume-title":"Mining of massive data sets","author":"Leskovec Jure","unstructured":"Jure Leskovec, Anand Rajaraman, and Jeffrey David Ullman. 2020. Mining of massive data sets. Cambridge university press."},{"key":"e_1_3_2_2_26_1","unstructured":"C. Lopes S. Bajracharya J. Ossher and P. Baldi. 2010. Source Code Data Sets. http:\/\/www.ics.uci.edu\/\u223c lopes\/datasets\/"},{"key":"e_1_3_2_2_27_1","unstructured":"NVidia. 2020. NVidia Ampere Architecture Blog. https:\/\/www.nvidia.com\/en-us\/data-center\/ampere-architecture\/"},{"key":"e_1_3_2_2_28_1","unstructured":"OpenAI. 2022. ChatGPT. https:\/\/openai.com\/blog\/chatgpt"},{"key":"e_1_3_2_2_29_1","volume-title":"Plansformer: Generating Symbolic Plans using Transformers. arXiv preprint arXiv:2212.08681.","author":"Pallagani Vishal","year":"2022","unstructured":"Vishal Pallagani, Bharath Muppasani, Keerthiram Murugesan, Francesca Rossi, Lior Horesh, Biplav Srivastava, Francesco Fabiano, and Andrea Loreggia. 2022. Plansformer: Generating Symbolic Plans using Transformers. arXiv preprint arXiv:2212.08681."},{"key":"e_1_3_2_2_30_1","volume-title":"Examining Zero-Shot Vulnerability Repair with Large Language Models. In 2023 IEEE Symposium on Security and Privacy (SP). 1\u201318","author":"Pearce Hammond","year":"2022","unstructured":"Hammond Pearce, Benjamin Tan, Baleegh Ahmad, Ramesh Karri, and Brendan Dolan-Gavitt. 2022. Examining Zero-Shot Vulnerability Repair with Large Language Models. In 2023 IEEE Symposium on Security and Privacy (SP). 1\u201318."},{"key":"e_1_3_2_2_31_1","volume-title":"Language models are unsupervised multitask learners. OpenAI blog, 1, 8","author":"Radford Alec","year":"2019","unstructured":"Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1, 8 (2019), 9."},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.1145\/3581641.3584037"},{"key":"e_1_3_2_2_33_1","first-page":"3659","article-title":"Towards crowdsourced training of large neural networks using decentralized mixture-of-experts","volume":"33","author":"Ryabinin Max","year":"2020","unstructured":"Max Ryabinin and Anton Gusev. 2020. Towards crowdsourced training of large neural networks using decentralized mixture-of-experts. Advances in Neural Information Processing Systems, 33 (2020), 3659\u20133672.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_2_34_1","volume-title":"Sruti Srinivasa Ragavan, and Ben Zorn","author":"Sarkar Advait","year":"2022","unstructured":"Advait Sarkar, Andrew D Gordon, Carina Negreanu, Christian Poelitz, Sruti Srinivasa Ragavan, and Ben Zorn. 2022. What is it like to program with artificial intelligence? arXiv preprint arXiv:2208.06213."},{"key":"e_1_3_2_2_35_1","volume-title":"Noise-Robust De-Duplication at Scale","author":"Silcock Emily","unstructured":"Emily Silcock, Luca D\u2019Amico-Wong, Jinglin Yang, and Melissa Dell. 2022. Noise-Robust De-Duplication at Scale. National Bureau of Economic Research."},{"key":"e_1_3_2_2_36_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971.","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, and Faisal Azhar. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971."},{"key":"e_1_3_2_2_37_1","volume-title":"Leandro Von Werra, and Thomas Wolf","author":"Tunstall Lewis","year":"2022","unstructured":"Lewis Tunstall, Leandro Von Werra, and Thomas Wolf. 2022. Natural language processing with transformers. \" O\u2019Reilly Media, Inc.\"."},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.1145\/3491101.3519665"},{"key":"e_1_3_2_2_39_1","unstructured":"Leandro Von Werra and Loubna Ben Allal. 2023. StarCoder: A State-of-the-Art LLM for Code. https:\/\/huggingface.co\/blog\/starcoder"},{"key":"e_1_3_2_2_40_1","unstructured":"Ben Wang and Aran Komatsuzaki. 2021. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https:\/\/github.com\/kingoflolz\/mesh-transformer-jax"},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"crossref","unstructured":"Qinkai Zheng Xiao Xia Xu Zou Yuxiao Dong Shan Wang Yufei Xue Zihan Wang Lei Shen Andi Wang Yang Li Teng Su Zhilin Yang and Jie Tang. 2023. CodeGeeX: A Pre-Trained Model for Code Generation with Multilingual Evaluations on HumanEval-X. arxiv:2303.17568.","DOI":"10.1145\/3580305.3599790"}],"event":{"name":"ESEC\/FSE '23: 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering","location":"San Francisco CA USA","acronym":"ESEC\/FSE '23","sponsor":["SIGSOFT ACM Special Interest Group on Software Engineering"]},"container-title":["Proceedings of the 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3611643.3613090","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3611643.3613090","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T16:37:10Z","timestamp":1750178230000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3611643.3613090"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,30]]},"references-count":41,"alternative-id":["10.1145\/3611643.3613090","10.1145\/3611643"],"URL":"https:\/\/doi.org\/10.1145\/3611643.3613090","relation":{},"subject":[],"published":{"date-parts":[[2023,11,30]]},"assertion":[{"value":"2023-11-30","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}