{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,5]],"date-time":"2026-02-05T10:26:57Z","timestamp":1770287217859,"version":"3.49.0"},"publisher-location":"New York, NY, USA","reference-count":91,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,10,10]],"date-time":"2022-10-10T00:00:00Z","timestamp":1665360000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"the National Nature Science Foundation of China under Grant","award":["61902162, 61862033"],"award-info":[{"award-number":["61902162, 61862033"]}]},{"name":"Postgraduate Innovation Fund Project of Jiangxi Province","award":["YC2021-S308"],"award-info":[{"award-number":["YC2021-S308"]}]},{"name":"the Nature Science Foundation of Jiangxi Province","award":["20202BAB202015"],"award-info":[{"award-number":["20202BAB202015"]}]},{"name":"the Science and technology Key project of Education Department of Jiangxi Province","award":["GJJ210307"],"award-info":[{"award-number":["GJJ210307"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2022,10,10]]},"DOI":"10.1145\/3551349.3556912","type":"proceedings-article","created":{"date-parts":[[2023,1,5]],"date-time":"2023-01-05T20:43:54Z","timestamp":1672951434000},"page":"1-13","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":38,"title":["Prompt-tuned Code Language Model as a Neural Knowledge Base for Type Inference in Statically-Typed Partial Code"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8877-4267","authenticated-orcid":false,"given":"Qing","family":"Huang","sequence":"first","affiliation":[{"name":"School of Computer Information Engineering, Jiangxi Normal University, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6497-9380","authenticated-orcid":false,"given":"Zhiqiang","family":"Yuan","sequence":"additional","affiliation":[{"name":"School of Computer Information Engineering, Jiangxi Normal University, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7663-1421","authenticated-orcid":false,"given":"Zhenchang","family":"Xing","sequence":"additional","affiliation":[{"name":"CSIRO's Data61 &amp; Australian National University, Australia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2540-973X","authenticated-orcid":false,"given":"Xiwei","family":"Xu","sequence":"additional","affiliation":[{"name":"CSIRO's Data61, Australia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5839-3765","authenticated-orcid":false,"given":"Liming","family":"Zhu","sequence":"additional","affiliation":[{"name":"CSIRO's Data61 &amp; School of CSE, UNSW, Australia"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9466-1672","authenticated-orcid":false,"given":"Qinghua","family":"Lu","sequence":"additional","affiliation":[{"name":"CSIRO's Data61, Australia"}]}],"member":"320","published-online":{"date-parts":[[2023,1,5]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"crossref","unstructured":"Wasi\u00a0Uddin Ahmad Saikat Chakraborty Baishakhi Ray and Kai-Wei Chang. 2020. A transformer-based approach for source code summarization. arXiv preprint arXiv:2005.00653(2020).","DOI":"10.18653\/v1\/2020.acl-main.449"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","unstructured":"Raja\u00a0Naeem Akram and Konstantinos Markantonakis. 2016. Challenges of security and trust of mobile devices as digital avionics component. In 2016 Integrated Communications Navigation and Surveillance (ICNS). 1C4\u20131\u20131C4\u201311. https:\/\/doi.org\/10.1109\/ICNSURV.2016.7486323","DOI":"10.1109\/ICNSURV.2016.7486323"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/3212695"},{"key":"e_1_3_2_1_4_1","unstructured":"Miltiadis Allamanis Daniel Tarlow Andrew\u00a0D. Gordon and Yi Wei. 2015. Bimodal Modelling of Source Code and Natural Language. In ICML."},{"key":"e_1_3_2_1_5_1","unstructured":"Anonymous. 2021. A New Search Paradigm for Natural Language Code Search. (2021)."},{"key":"e_1_3_2_1_6_1","unstructured":"Anonymous. 2022. Analyzing CodeBERT\u2019s Performance on Natural Language Code Search. (2022)."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10664-018-9650-5"},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.1145\/1376616.1376746"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1145\/1518701.1518944"},{"key":"e_1_3_2_1_10_1","unstructured":"Tom\u00a0B. Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell Sandhini Agarwal Ariel Herbert-Voss Gretchen Krueger T.\u00a0J. Henighan Rewon Child Aditya Ramesh Daniel\u00a0M. Ziegler Jeff Wu Clemens Winter Christopher Hesse Mark Chen Eric Sigler Mateusz Litwin Scott Gray Benjamin Chess Jack Clark Christopher Berner Sam McCandlish Alec Radford Ilya Sutskever and Dario Amodei. 2020. Language Models are Few-Shot Learners. ArXiv abs\/2005.14165(2020)."},{"key":"e_1_3_2_1_11_1","unstructured":"Luca Buratti Saurabh Pujar Mihaela\u00a0A. Bornea Scott McCarley Yunhui Zheng Gaetano Rossiello Alessandro Morari Jim Laredo Veronika Thost Yufan Zhuang and Giacomo Domeniconi. 2020. Exploring Software Naturalness through Neural Language Models. ArXiv abs\/2006.12641(2020)."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"publisher","DOI":"10.1145\/1449764.1449790"},{"key":"e_1_3_2_1_13_1","volume-title":"2012 34th International Conference on Software Engineering (ICSE)","author":"Devanbu T.","year":"2012","unstructured":"Premkumar\u00a0T. Devanbu. 2012. On the naturalness of software. 2012 34th International Conference on Software Engineering (ICSE) (2012), 837\u2013847."},{"key":"e_1_3_2_1_14_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805(2018).","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805(2018)."},{"key":"e_1_3_2_1_15_1","volume-title":"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. ArXiv abs\/1810.04805(2019).","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. ArXiv abs\/1810.04805(2019)."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"crossref","unstructured":"Ning Ding Yulin Chen Xu Han Guangwei Xu Pengjun Xie Haitao Zheng Zhiyuan Liu Juan-Zi Li and Hong-Gee Kim. 2021. Prompt-Learning for Fine-Grained Entity Typing. ArXiv abs\/2108.10604(2021).","DOI":"10.18653\/v1\/2022.findings-emnlp.512"},{"key":"e_1_3_2_1_17_1","volume-title":"SnR: Constraint Based Type Inference for Incomplete Java Code Snippets. International Conference on Software Engineering (ICSE)","author":"Dong Yiwen","year":"2022","unstructured":"Yiwen Dong, Tianxiao Gu, Yongqiang Tian, and Chengnian Sun. 2022. SnR: Constraint Based Type Inference for Incomplete Java Code Snippets. International Conference on Software Engineering (ICSE) (2022)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"crossref","unstructured":"Zhangyin Feng Daya Guo Duyu Tang Nan Duan Xiaocheng Feng Ming Gong Linjun Shou Bing Qin Ting Liu Daxin Jiang and Ming Zhou. 2020. CodeBERT: A Pre-Trained Model for Programming and Natural Languages. ArXiv abs\/2002.08155(2020).","DOI":"10.18653\/v1\/2020.findings-emnlp.139"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/SUITE.2009.5070022"},{"key":"e_1_3_2_1_20_1","volume-title":"The Pile: An 800GB Dataset of Diverse Text for Language Modeling. arXiv preprint arXiv:2101.00027(2020).","author":"Gao Leo","year":"2020","unstructured":"Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, 2020. The Pile: An 800GB Dataset of Diverse Text for Language Modeling. arXiv preprint arXiv:2101.00027(2020)."},{"key":"e_1_3_2_1_21_1","unstructured":"Tianyu Gao Adam Fisch and Danqi Chen. 2021. Making Pre-trained Language Models Better Few-shot Learners. ArXiv abs\/2012.15723(2021)."},{"key":"e_1_3_2_1_22_1","unstructured":"Jian Gu Pasquale Salza and Harald\u00a0C. Gall. 2022. Assemble Foundation Models for Automatic Code Summarization."},{"key":"e_1_3_2_1_23_1","volume-title":"PPT: Pre-trained Prompt Tuning for Few-shot Learning. ArXiv abs\/2109.04332(2021).","author":"Gu Yuxian","year":"2021","unstructured":"Yuxian Gu, Xu Han, Zhiyuan Liu, and Minlie Huang. 2021. PPT: Pre-trained Prompt Tuning for Few-shot Learning. ArXiv abs\/2109.04332(2021)."},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSME46990.2020.00099"},{"key":"e_1_3_2_1_25_1","volume-title":"REALM: Retrieval-Augmented Language Model Pre-Training. ArXiv abs\/2002.08909(2020).","author":"Guu Kelvin","year":"2020","unstructured":"Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Ming-Wei Chang. 2020. REALM: Retrieval-Augmented Language Model Pre-Training. ArXiv abs\/2002.08909(2020)."},{"key":"e_1_3_2_1_26_1","volume-title":"On the Use of Automated Text Summarization Techniques for Summarizing Source Code. 2010 17th Working Conference on Reverse Engineering","author":"Haiduc Sonia","year":"2010","unstructured":"Sonia Haiduc, Jairo Aponte, Laura Moreno, and Andrian Marcus. 2010. On the Use of Automated Text Summarization Techniques for Summarizing Source Code. 2010 17th Working Conference on Reverse Engineering (2010), 35\u201344."},{"key":"e_1_3_2_1_27_1","volume-title":"PTR: Prompt Tuning with Rules for Text Classification. ArXiv abs\/2105.11259(2021).","author":"Han Xu","year":"2021","unstructured":"Xu Han, Weilin Zhao, Ning Ding, Zhiyuan Liu, and Maosong Sun. 2021. PTR: Prompt Tuning with Rules for Text Classification. ArXiv abs\/2105.11259(2021)."},{"key":"e_1_3_2_1_28_1","unstructured":"Kaiming He Xinlei Chen Saining Xie Yanghao Li Piotr Doll\u2019ar and Ross\u00a0B. Girshick. 2021. Masked Autoencoders Are Scalable Vision Learners. ArXiv abs\/2111.06377(2021)."},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"crossref","unstructured":"Benjamin Heinzerling and Kentaro Inui. 2020. Language models as knowledge bases: On entity representations storage capacity and paraphrased queries. arXiv preprint arXiv:2008.09036(2020).","DOI":"10.18653\/v1\/2021.eacl-main.153"},{"key":"e_1_3_2_1_30_1","doi-asserted-by":"crossref","unstructured":"Benjamin Heinzerling and Kentaro Inui. 2021. Language Models as Knowledge Bases: On Entity Representations Storage Capacity and Paraphrased Queries. ArXiv abs\/2008.09036(2021).","DOI":"10.18653\/v1\/2021.eacl-main.153"},{"key":"e_1_3_2_1_31_1","unstructured":"Vincent\u00a0J. Hellendoorn Charles Sutton Rishabh Singh Petros Maniatis and David Bieber. 2020. Global Relational Models of Source Code. In ICLR."},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1109\/SANER48275.2020.9054830"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10515-019-00263-5"},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1007\/s11432-017-9465-9"},{"key":"e_1_3_2_1_35_1","unstructured":"Hamel Husain Hongqi Wu Tiferet Gazit Miltiadis Allamanis and Marc Brockschmidt. 2019. CodeSearchNet Challenge: Evaluating the State of Semantic Code Search. ArXiv abs\/1909.09436(2019)."},{"key":"e_1_3_2_1_36_1","volume-title":"How Can We Know What Language Models Know?Transactions of the Association for Computational Linguistics 8","author":"Jiang Zhengbao","year":"2020","unstructured":"Zhengbao Jiang, Frank\u00a0F. Xu, J. Araki, and Graham Neubig. 2020. How Can We Know What Language Models Know?Transactions of the Association for Computational Linguistics 8 (2020), 423\u2013438."},{"key":"e_1_3_2_1_37_1","volume-title":"International Conference on Machine Learning. PMLR, 5110\u20135121","author":"Kanade Aditya","year":"2020","unstructured":"Aditya Kanade, Petros Maniatis, Gogul Balakrishnan, and Kensen Shi. 2020. Learning and evaluating contextual embedding of source code. In International Conference on Machine Learning. PMLR, 5110\u20135121."},{"key":"e_1_3_2_1_38_1","volume-title":"What do pre-trained code models know about code?2021 36th IEEE\/ACM International Conference on Automated Software Engineering (ASE)","author":"Karmakar Anjan","year":"2021","unstructured":"Anjan Karmakar and Romain Robbes. 2021. What do pre-trained code models know about code?2021 36th IEEE\/ACM International Conference on Automated Software Engineering (ASE) (2021), 1332\u20131336."},{"key":"e_1_3_2_1_39_1","doi-asserted-by":"publisher","DOI":"10.1145\/3180155.3180187"},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"crossref","unstructured":"Brian Lester Rami Al-Rfou and Noah Constant. 2021. The Power of Scale for Parameter-Efficient Prompt Tuning. ArXiv abs\/2104.08691(2021).","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"e_1_3_2_1_41_1","volume-title":"BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension. In ACL.","author":"Lewis Mike","year":"2020","unstructured":"Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension. In ACL."},{"key":"e_1_3_2_1_42_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSME.2018.00028"},{"key":"e_1_3_2_1_43_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"e_1_3_2_1_44_1","volume-title":"Cuebert: A New Mixing Board Concept for Musical Theatre. In NIME.","author":"Liebman Noah","year":"2010","unstructured":"Noah Liebman, Michael Nagara, Jacek Spiewla, and Erin Zolkosky. 2010. Cuebert: A New Mixing Board Concept for Musical Theatre. In NIME."},{"key":"e_1_3_2_1_45_1","unstructured":"Chin-Yew Lin and Franz\u00a0Josef Och. 2004. ORANGE: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation. In COLING."},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1007\/s10618-008-0118-x"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/3338906.3338971"},{"key":"e_1_3_2_1_48_1","unstructured":"Pengfei Liu Weizhe Yuan Jinlan Fu Zhengbao Jiang Hiroaki Hayashi and Graham Neubig. 2021. Pre-train prompt and predict: A systematic survey of prompting methods in natural language processing. arXiv preprint arXiv:2107.13586(2021)."},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"crossref","unstructured":"Xiao Liu Kaixuan Ji Yicheng Fu Zhengxiao Du Zhilin Yang and Jie Tang. 2021. P-Tuning v2: Prompt Tuning Can Be Comparable to Fine-tuning Universally Across Scales and Tasks. ArXiv abs\/2110.07602(2021).","DOI":"10.18653\/v1\/2022.acl-short.8"},{"key":"e_1_3_2_1_50_1","volume-title":"Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692(2019).","author":"Liu Yinhan","year":"2019","unstructured":"Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692(2019)."},{"key":"e_1_3_2_1_51_1","unstructured":"Shuai Lu Daya Guo Shuo Ren Junjie Huang Alexey Svyatkovskiy Ambrosio Blanco Colin\u00a0B. Clement Dawn Drain Daxin Jiang Duyu Tang Ge Li Lidong Zhou Linjun Shou Long Zhou Michele Tufano Ming Gong Ming Zhou Nan Duan Neel Sundaresan Shao\u00a0Kun Deng Shengyu Fu and Shujie Liu. 2021. CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation. ArXiv abs\/2102.04664(2021)."},{"key":"e_1_3_2_1_52_1","unstructured":"Subhadip Maji Swapna\u00a0Sourav Rout and Sudeep Choudhary. 2021. DCoM: A Deep Column Mapper for Semantic Data Type Detection. ArXiv abs\/2106.12871(2021)."},{"key":"e_1_3_2_1_53_1","unstructured":"Leandro T.\u00a0C. Melo Rodrigo\u00a0G. Ribeiro Breno C.\u00a0F. Guimar\u00e3es and Fernando Magno Quint\u00e3o Pereira. 2020. Type Inference for C: Applications to the Static Analysis of Incomplete Programs. ACM Trans. Program. Lang. Syst.(2020)."},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/2746194.2746198"},{"key":"e_1_3_2_1_55_1","doi-asserted-by":"publisher","DOI":"10.1145\/2491411.2494584"},{"key":"e_1_3_2_1_56_1","doi-asserted-by":"publisher","DOI":"10.1002\/spe.2346"},{"key":"e_1_3_2_1_57_1","unstructured":"Hammond\u00a0A. Pearce Baleegh Ahmad Benjamin Tan Brendan Dolan-Gavitt and Ramesh Karri. 2021. An Empirical Cybersecurity Evaluation of GitHub Copilot\u2019s Code Contributions. ArXiv abs\/2108.09293(2021)."},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"crossref","unstructured":"Fabio Petroni Tim Rockt\u00e4schel Patrick Lewis Anton Bakhtin Yuxiang Wu Alexander\u00a0H Miller and Sebastian Riedel. 2019. Language models as knowledge bases?arXiv preprint arXiv:1909.01066(2019).","DOI":"10.18653\/v1\/D19-1250"},{"key":"e_1_3_2_1_59_1","volume-title":"Statistical Learning of API Fully Qualified Names in Code Snippets of Online Forums. 2018 IEEE\/ACM 40th International Conference on Software Engineering (ICSE)","author":"Phan Hung\u00a0Dang","year":"2018","unstructured":"Hung\u00a0Dang Phan, Hoan\u00a0Anh Nguyen, Ngoc\u00a0M. Tran, Linh-Huyen Truong, Anh\u00a0Tuan Nguyen, and Tien\u00a0Nhut Nguyen. 2018. Statistical Learning of API Fully Qualified Names in Code Snippets of Online Forums. 2018 IEEE\/ACM 40th International Conference on Software Engineering (ICSE) (2018), 632\u2013642."},{"key":"e_1_3_2_1_60_1","volume-title":"CRYLOGGER: Detecting Crypto Misuses Dynamically. 2021 IEEE Symposium on Security and Privacy (SP) (2021)","author":"Piccolboni Luca","year":"2021","unstructured":"Luca Piccolboni, Giuseppe\u00a0Di Guglielmo, Luca\u00a0P. Carloni, and Simha Sethumadhavan. 2021. CRYLOGGER: Detecting Crypto Misuses Dynamically. 2021 IEEE Symposium on Security and Privacy (SP) (2021), 1972\u20131989."},{"key":"e_1_3_2_1_61_1","volume-title":"Language models are unsupervised multitask learners. OpenAI blog 1, 8","author":"Radford Alec","year":"2019","unstructured":"Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, 2019. Language models are unsupervised multitask learners. OpenAI blog 1, 8 (2019), 9."},{"key":"e_1_3_2_1_62_1","unstructured":"Colin Raffel Noam\u00a0M. Shazeer Adam Roberts Katherine Lee Sharan Narang Michael Matena Yanqi Zhou Wei Li and Peter\u00a0J. Liu. 2020. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. ArXiv abs\/1910.10683(2020)."},{"key":"e_1_3_2_1_63_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.91"},{"key":"e_1_3_2_1_64_1","doi-asserted-by":"publisher","DOI":"10.1145\/3324884.3416551"},{"key":"e_1_3_2_1_65_1","doi-asserted-by":"crossref","unstructured":"Adam Roberts Colin Raffel and Noam\u00a0M. Shazeer. 2020. How Much Knowledge Can You Pack into the Parameters of a Language Model?ArXiv abs\/2002.08910(2020).","DOI":"10.18653\/v1\/2020.emnlp-main.437"},{"key":"e_1_3_2_1_66_1","doi-asserted-by":"publisher","DOI":"10.1109\/ASE.2019.00032"},{"key":"e_1_3_2_1_67_1","doi-asserted-by":"crossref","unstructured":"Timo Schick and Hinrich Sch\u00fctze. 2021. Exploiting Cloze-Questions for Few-Shot Text Classification and Natural Language Inference. In EACL.","DOI":"10.18653\/v1\/2021.eacl-main.20"},{"key":"e_1_3_2_1_68_1","doi-asserted-by":"crossref","unstructured":"Timo Schick and Hinrich Sch\u00fctze. 2021. It\u2019s Not Just Size That Matters: Small Language Models Are Also Few-Shot Learners. ArXiv abs\/2009.07118(2021).","DOI":"10.18653\/v1\/2021.naacl-main.185"},{"key":"e_1_3_2_1_69_1","volume-title":"Autoprompt: Eliciting knowledge from language models with automatically generated prompts. arXiv preprint arXiv:2010.15980(2020).","author":"Shin Taylor","year":"2020","unstructured":"Taylor Shin, Yasaman Razeghi, Robert\u00a0L Logan\u00a0IV, Eric Wallace, and Sameer Singh. 2020. Autoprompt: Eliciting knowledge from language models with automatically generated prompts. arXiv preprint arXiv:2010.15980(2020)."},{"key":"e_1_3_2_1_70_1","volume-title":"Live API Documentation. International Conference on Software Engineering (ICSE)","author":"Subramanian Siddharth","year":"2014","unstructured":"Siddharth Subramanian, Laura Inozemtseva, and Reid Holmes. 2014. Live API Documentation. International Conference on Software Engineering (ICSE) (2014)."},{"key":"e_1_3_2_1_71_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSME.2019.00039"},{"key":"e_1_3_2_1_72_1","unstructured":"Yi Sun Yu Zheng Chao Hao and Hangping Qiu. 2021. NSP-BERT: A Prompt-based Zero-Shot Learner Through an Original Pre-training Task-Next Sentence Prediction. ArXiv abs\/2109.03564(2021)."},{"key":"e_1_3_2_1_73_1","unstructured":"Tianyi Tang Junyi Li and Wayne\u00a0Xin Zhao. 2022. Context-Tuning: Learning Contextualized Prompts for Natural Language Generation. ArXiv abs\/2201.08670(2022)."},{"key":"e_1_3_2_1_74_1","doi-asserted-by":"publisher","DOI":"10.1145\/1321631.1321663"},{"key":"e_1_3_2_1_75_1","doi-asserted-by":"crossref","unstructured":"Sergey Troshin and Nadezhda Chirkova. 2022. Probing Pretrained Models of Source Code. ArXiv abs\/2202.08975(2022).","DOI":"10.18653\/v1\/2022.blackboxnlp-1.31"},{"key":"e_1_3_2_1_76_1","doi-asserted-by":"crossref","unstructured":"Sergey Troshin and Nadezhda Chirkova. 2022. Probing Pretrained Models of Source Code. arXiv preprint arXiv:2202.08975(2022).","DOI":"10.18653\/v1\/2022.blackboxnlp-1.31"},{"key":"e_1_3_2_1_77_1","doi-asserted-by":"publisher","DOI":"10.1145\/3340544"},{"key":"e_1_3_2_1_78_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-0-387-09684-1_21"},{"key":"e_1_3_2_1_79_1","unstructured":"Ashish Vaswani Noam\u00a0M. Shazeer Niki Parmar Jakob Uszkoreit Llion Jones Aidan\u00a0N. Gomez Lukasz Kaiser and Illia Polosukhin. 2017. Attention is All you Need. In NIPS."},{"key":"e_1_3_2_1_80_1","doi-asserted-by":"crossref","unstructured":"Yao Wan Wei Zhao Hongyu Zhang Yulei Sui Guandong Xu and Hairong Jin. 2022. What Do They Capture? - A Structural Analysis of Pre-Trained Language Models for Source Code. ArXiv abs\/2202.06840(2022).","DOI":"10.1145\/3510003.3510050"},{"key":"e_1_3_2_1_81_1","doi-asserted-by":"crossref","unstructured":"Yao Wan Wei Zhao Hongyu Zhang Yulei Sui Guandong Xu and Hai Jin. 2022. What Do They Capture?\u2013A Structural Analysis of Pre-Trained Language Models for Source Code. arXiv preprint arXiv:2202.06840(2022).","DOI":"10.1145\/3510003.3510050"},{"key":"e_1_3_2_1_82_1","unstructured":"Deze Wang Zhouyang Jia Shanshan Li Yue Yu Yun Xiong Wei Dong and Xiangke Liao. 2021. Bridging Pre-trained Models and Downstream Tasks for Source Code Understanding. ArXiv abs\/2112.02268(2021)."},{"key":"e_1_3_2_1_83_1","volume-title":"Detecting Code Clones with Graph Neural Network and Flow-Augmented Abstract Syntax Tree. In 2020 IEEE 27th International Conference on Software Analysis, Evolution and Reengineering (SANER)","author":"Wang Wenhan","unstructured":"Wenhan Wang, Ge Li, Bo Ma, Xin Xia, and Zhi Jin. 2020. Detecting Code Clones with Graph Neural Network and Flow-Augmented Abstract Syntax Tree. In 2020 IEEE 27th International Conference on Software Analysis, Evolution and Reengineering (SANER). IEEE, 261\u2013271."},{"key":"e_1_3_2_1_84_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i16.17650"},{"key":"e_1_3_2_1_85_1","doi-asserted-by":"crossref","unstructured":"Yue Wang Weishi Wang Shafiq Joty and Steven\u00a0CH Hoi. 2021. Codet5: Identifier-aware unified pre-trained encoder-decoder models for code understanding and generation. arXiv preprint arXiv:2109.00859(2021).","DOI":"10.18653\/v1\/2021.emnlp-main.685"},{"key":"e_1_3_2_1_86_1","unstructured":"Yonghui Wu Mike Schuster Z. Chen Quoc\u00a0V. Le Mohammad Norouzi Wolfgang Macherey Maxim Krikun Yuan Cao Qin Gao Klaus Macherey Jeff Klingner Apurva Shah Melvin Johnson Xiaobing Liu Lukasz Kaiser Stephan Gouws Yoshikiyo Kato Taku Kudo Hideto Kazawa Keith Stevens George Kurian Nishant Patil Wei Wang Cliff Young Jason\u00a0R. Smith Jason Riesa Alex Rudnick Oriol Vinyals Gregory\u00a0S. Corrado Macduff Hughes and Jeffrey Dean. 2016. Google\u2019s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation. ArXiv abs\/1609.08144(2016)."},{"key":"e_1_3_2_1_87_1","volume-title":"How do developers utilize source code from stack overflow?Empirical Software Engineering 24, 2","author":"Wu Yuhao","year":"2019","unstructured":"Yuhao Wu, Shaowei Wang, Cor-Paul Bezemer, and Katsuro Inoue. 2019. How do developers utilize source code from stack overflow?Empirical Software Engineering 24, 2 (2019), 637\u2013673."},{"key":"e_1_3_2_1_88_1","doi-asserted-by":"publisher","DOI":"10.1145\/3180155.3180260"},{"key":"e_1_3_2_1_89_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE.2019.00046"},{"key":"e_1_3_2_1_90_1","unstructured":"Wenxuan Zhou Junyi Du and Xiang Ren. 2019. Improving BERT fine-tuning with embedding normalization. arXiv preprint arXiv:1911.03918(2019)."},{"key":"e_1_3_2_1_91_1","volume-title":"Devign: Effective Vulnerability Identification by Learning Comprehensive Program Semantics via Graph Neural Networks. ArXiv abs\/1909.03496(2019).","author":"Zhou Yaqin","year":"2019","unstructured":"Yaqin Zhou, Shangqing Liu, J. Siow, Xiaoning Du, and Yang Liu. 2019. Devign: Effective Vulnerability Identification by Learning Comprehensive Program Semantics via Graph Neural Networks. ArXiv abs\/1909.03496(2019)."}],"event":{"name":"ASE '22: 37th IEEE\/ACM International Conference on Automated Software Engineering","location":"Rochester MI USA","acronym":"ASE '22"},"container-title":["Proceedings of the 37th IEEE\/ACM International Conference on Automated Software Engineering"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3551349.3556912","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3551349.3556912","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T08:37:53Z","timestamp":1755851873000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3551349.3556912"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10,10]]},"references-count":91,"alternative-id":["10.1145\/3551349.3556912","10.1145\/3551349"],"URL":"https:\/\/doi.org\/10.1145\/3551349.3556912","relation":{},"subject":[],"published":{"date-parts":[[2022,10,10]]},"assertion":[{"value":"2023-01-05","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}