{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,18]],"date-time":"2025-09-18T10:15:42Z","timestamp":1758190542634,"version":"3.44.0"},"reference-count":43,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,5,29]],"date-time":"2025-05-29T00:00:00Z","timestamp":1748476800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,29]],"date-time":"2025-05-29T00:00:00Z","timestamp":1748476800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,5,29]]},"DOI":"10.1109\/sera65747.2025.11154515","type":"proceedings-article","created":{"date-parts":[[2025,9,16]],"date-time":"2025-09-16T17:31:58Z","timestamp":1758043918000},"page":"126-133","source":"Crossref","is-referenced-by-count":0,"title":["Automated Code Summarization by Training Large Language Models with Crowdsourced Knowledge"],"prefix":"10.1109","author":[{"given":"Meng","family":"Xia","sequence":"first","affiliation":[{"name":"University of Nebraska at Omaha,Computer Science,Nebraska,USA"}]},{"given":"Shradha","family":"Maharjan","sequence":"additional","affiliation":[{"name":"University of Nebraska at Omaha,Computer Science,Nebraska,USA"}]},{"given":"Myoungkyu","family":"Song","sequence":"additional","affiliation":[{"name":"University of Nebraska at Omaha,Computer Science,Nebraska,USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICPC.2015.12"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/tse.2017.2734091"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.3390\/sym14030471"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/tse.2015.2465386"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3387904.3389258"},{"key":"ref6","first-page":"68","article-title":"A study of the documentation essential to software maintenance","author":"Cozzetti","year":"2005","journal-title":"ICDC"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-19811-3_29"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/2597008.2597149"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/318372.318577"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3387904.3389268"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3368089.3417926"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.jss.2024.111964"},{"issue":"1","key":"ref14","first-page":"261","article-title":"Attention is all you need","volume":"30","author":"Vaswani","year":"2017"},{"article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","year":"2018","author":"Devlin","key":"ref15"},{"issue":"8","key":"ref16","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"issue":"140","key":"ref17","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"JMLR"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.499"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.685"},{"key":"ref20","article-title":"Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension","author":"Lewis","year":"2019","journal-title":"arXiv"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2019.8851751"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3238147.3238206"},{"key":"ref23","article-title":"Trans3: A transformer-based framework for unifying code summarization and code search","volume-title":"arXiv","author":"Wang","year":"2020"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/s10664-018-9634-5"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1007\/s10664-018-9650-5"},{"volume-title":"Graphcodebert: Pre-training code representations with data flow","year":"2020","author":"Guo","key":"ref26"},{"key":"ref27","first-page":"5110","article-title":"Learning and evaluating contextual embedding of source code","author":"Kanade","year":"2020","journal-title":"ICML"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.139"},{"key":"ref29","article-title":"Practical program repair in the era of large pre-trained language models","author":"Steven Xia","year":"2022","journal-title":"arXiv"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE-FoSE59343.2023.00008"},{"volume-title":"A survey of large language models for code: Evolution, benchmarking, and future trends","year":"2023","author":"Zheng","key":"ref31"},{"key":"ref32","first-page":"473","article-title":"Multi-task learning based pre-trained language model for code completion","author":"Liu","year":"2020","journal-title":"ASE"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1145\/3368089.3417058"},{"key":"ref34","first-page":"54","article-title":"Treebert: A tree-based pre-trained model for programming language","author":"Jiang","year":"2021","journal-title":"UAI"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE43902.2021.00041"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/SP46215.2023.10179420"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/access.2025.3546700"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/msr52588.2021.00024"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE43902.2021.00026"},{"key":"ref40","article-title":"Understanding the effectiveness of large language models in code translation","author":"Pan","year":"2023","journal-title":"arXiv"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.337"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1145\/3404835.3462840"},{"article-title":"A prompt learning framework for source code summarization","year":"2023","author":"Sun","key":"ref43"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.3115\/1073083.1073135"}],"event":{"name":"2025 IEEE\/ACIS 23rd International Conference on Software Engineering Research, Management and Applications (SERA)","start":{"date-parts":[[2025,5,29]]},"location":"Las Vegas, NV, USA","end":{"date-parts":[[2025,5,31]]}},"container-title":["2025 IEEE\/ACIS 23rd International Conference on Software Engineering Research, Management and Applications (SERA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11154458\/11154479\/11154515.pdf?arnumber=11154515","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,17]],"date-time":"2025-09-17T05:09:25Z","timestamp":1758085765000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11154515\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,29]]},"references-count":43,"URL":"https:\/\/doi.org\/10.1109\/sera65747.2025.11154515","relation":{},"subject":[],"published":{"date-parts":[[2025,5,29]]}}}