{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,23]],"date-time":"2026-04-23T17:28:59Z","timestamp":1776965339783,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":28,"publisher":"ACM","license":[{"start":{"date-parts":[[2020,12,21]],"date-time":"2020-12-21T00:00:00Z","timestamp":1608508800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"National Natural Science Foundation of China","award":["61832009, 61620106007, 61751210"],"award-info":[{"award-number":["61832009, 61620106007, 61751210"]}]},{"name":"National Key R&D Program","award":["2018YFB1003904"],"award-info":[{"award-number":["2018YFB1003904"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2020,12,21]]},"DOI":"10.1145\/3324884.3416591","type":"proceedings-article","created":{"date-parts":[[2021,1,27]],"date-time":"2021-01-27T23:39:02Z","timestamp":1611790742000},"page":"473-485","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":130,"title":["Multi-task learning based pre-trained language model for code completion"],"prefix":"10.1145","author":[{"given":"Fang","family":"Liu","sequence":"first","affiliation":[{"name":"Peking University, Beijing, China"}]},{"given":"Ge","family":"Li","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]},{"given":"Yunfei","family":"Zhao","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]},{"given":"Zhi","family":"Jin","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2021,1,27]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1145\/3212695"},{"key":"e_1_3_2_1_2_1","unstructured":"Dzmitry Bahdanau Kyunghyun Cho and Yoshua Bengio. 2015. Neural Machine Translation by Jointly Learning to Align and Translate. (2015)."},{"key":"e_1_3_2_1_3_1","volume-title":"Learning python code suggestion with a sparse pointer network. arXiv preprint arXiv:1611.08307","author":"Bhoopchand Avishkar","year":"2016","unstructured":"Avishkar Bhoopchand, Tim Rockt\u00e4schel, Earl Barr, and Sebastian Riedel. 2016. Learning python code suggestion with a sparse pointer network. arXiv preprint arXiv:1611.08307 (2016)."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1023\/A:1007379606734"},{"key":"e_1_3_2_1_5_1","volume-title":"Triet Huynh Minh Le, and Muhammad Ali Babar","author":"Chen Hao","year":"2020","unstructured":"Hao Chen, Triet Huynh Minh Le, and Muhammad Ali Babar. 2020. Deep Learning for Source Code Modeling and Generation: Models, Applications and Challenges. ACM Computing Surveys (CSUR) (2020)."},{"key":"e_1_3_2_1_6_1","unstructured":"Kyunghyun Cho Bart van Merrienboer Dzmitry Bahdanau and Yoshua Bengio. 2014. On the Properties of Neural Machine Translation: Encoder-Decoder Approaches. (2014) 103--111."},{"key":"e_1_3_2_1_7_1","unstructured":"Andrew M Dai and Quoc V Le. 2015. Semi-supervised sequence learning. In Advances in neural information processing systems. 3079--3087."},{"key":"e_1_3_2_1_8_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1285"},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6639344"},{"key":"e_1_3_2_1_10_1","volume-title":"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In NAACL-HLT (1)","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In NAACL-HLT (1). Association for Computational Linguistics, 4171--4186."},{"key":"e_1_3_2_1_11_1","unstructured":"Li Dong Nan Yang Wenhui Wang Furu Wei Xiaodong Liu Yu Wang Jianfeng Gao Ming Zhou and Hsiao-Wuen Hon. 2019. Unified language model pre-training for natural language understanding and generation. In Advances in Neural Information Processing Systems. 13042--13054."},{"key":"e_1_3_2_1_12_1","doi-asserted-by":"crossref","unstructured":"Zhangyin Feng Daya Guo Duyu Tang Nan Duan Xiaocheng Feng Ming Gong Linjun Shou Bing Qin Ting Liu Daxin Jiang et al. 2020. CodeBERT: A Pre-Trained Model for Programming and Natural Languages. arXiv preprint arXiv:2002.08155 (2020).","DOI":"10.18653\/v1\/2020.findings-emnlp.139"},{"key":"e_1_3_2_1_13_1","first-page":"23","article-title":"A new algorithm for data compression","volume":"12","author":"Gage Philip","year":"1994","unstructured":"Philip Gage. 1994. A new algorithm for data compression. C Users Journal 12, 2 (1994), 23--38.","journal-title":"C Users Journal"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1064"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v31i1.10742"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1145\/3236024.3236051"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/3106237.3106290"},{"key":"e_1_3_2_1_18_1","unstructured":"Dan Hendrycks and Kevin Gimpel. 2016. Bridging nonlinearities and stochastic regularizers with gaussian error linear units. (2016)."},{"key":"e_1_3_2_1_19_1","volume-title":"34th International Conference on Software Engineering, ICSE 2012","author":"Hindle Abram","year":"2012","unstructured":"Abram Hindle, Earl T. Barr, Zhendong Su, Mark Gabel, and Premkumar T. Devanbu. 2012. On the naturalness of software. In 34th International Conference on Software Engineering, ICSE 2012, June 2--9, 2012, Zurich, Switzerland. IEEE Computer Society, 837--847."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"publisher","DOI":"10.1145\/1808920.1808926"},{"key":"e_1_3_2_1_22_1","volume-title":"ACL (1)","author":"Howard Jeremy","unstructured":"Jeremy Howard and Sebastian Ruder. 2018. Universal Language Model Fine-tuning for Text Classification. In ACL (1). Association for Computational Linguistics, 328--339."},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1145\/3196321.3196334"},{"key":"e_1_3_2_1_24_1","volume-title":"Pre-trained Contextual Embedding of Source Code. arXiv preprint arXiv:2001.00059","author":"Kanade Aditya","year":"2019","unstructured":"Aditya Kanade, Petros Maniatis, Gogul Balakrishnan, and Kensen Shi. 2019. Pre-trained Contextual Embedding of Source Code. arXiv preprint arXiv:2001.00059 (2019)."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"crossref","unstructured":"Rafael-Michael Karampatsis Hlib Babii Romain Robbes Charles Sutton and Andrea Janes. 2020. Big Code!= Big Vocabulary: Open-Vocabulary Models for Source Code. ICSE.","DOI":"10.1145\/3377811.3380342"},{"key":"e_1_3_2_1_26_1","volume-title":"Code Prediction by Feeding Trees to Transformers. arXiv preprint arXiv:2003.13848","author":"Kim Seohyun","year":"2020","unstructured":"Seohyun Kim, Jinman Zhao, Yuchi Tian, and Satish Chandra. 2020. Code Prediction by Feeding Trees to Transformers. arXiv preprint arXiv:2003.13848 (2020)."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/578"},{"key":"e_1_3_2_1_28_1","unstructured":"Chang Liu Xin Wang Richard Shin Joseph E Gonzalez and Dawn Song. 2016"}],"event":{"name":"ASE '20: 35th IEEE\/ACM International Conference on Automated Software Engineering","location":"Virtual Event Australia","acronym":"ASE '20","sponsor":["SIGAI ACM Special Interest Group on Artificial Intelligence","SIGSOFT ACM Special Interest Group on Software Engineering","IEEE CS"]},"container-title":["Proceedings of the 35th IEEE\/ACM International Conference on Automated Software Engineering"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3324884.3416591","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3324884.3416591","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T22:01:38Z","timestamp":1750197698000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3324884.3416591"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,12,21]]},"references-count":28,"alternative-id":["10.1145\/3324884.3416591","10.1145\/3324884"],"URL":"https:\/\/doi.org\/10.1145\/3324884.3416591","relation":{},"subject":[],"published":{"date-parts":[[2020,12,21]]},"assertion":[{"value":"2021-01-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}