{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T00:47:00Z","timestamp":1774658820476,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":83,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,6,18]],"date-time":"2024-06-18T00:00:00Z","timestamp":1718668800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,6,18]]},"DOI":"10.1145\/3661167.3661210","type":"proceedings-article","created":{"date-parts":[[2024,6,14]],"date-time":"2024-06-14T12:24:25Z","timestamp":1718367865000},"page":"100-109","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":20,"title":["Code Summarization without Direct Access to Code - Towards Exploring Federated LLMs for Software Engineering"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0007-6772-9452","authenticated-orcid":false,"given":"Jahnavi","family":"Kumar","sequence":"first","affiliation":[{"name":"Research in Intelligent Software &amp; Human Analytics Lab, Department of Computer Science and Engineering, Indian Institute of Technology Tirupati, India"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0818-8178","authenticated-orcid":false,"given":"Sridhar","family":"Chimalakonda","sequence":"additional","affiliation":[{"name":"Research in Intelligent Software &amp; Human Analytics Lab, Department of Computer Science and Engineering, Indian Institute of Technology Tirupati, India"}]}],"member":"320","published-online":{"date-parts":[[2024,6,18]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1145\/3593434.3593468"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3551349.3559555"},{"key":"e_1_3_2_1_3_1","doi-asserted-by":"publisher","DOI":"10.1145\/1370175.1370223"},{"key":"e_1_3_2_1_4_1","volume-title":"Proceedings of the acl workshop on intrinsic and extrinsic evaluation measures for machine translation and\/or summarization. 65\u201372","author":"Banerjee Satanjeev","year":"2005","unstructured":"Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An automatic metric for MT evaluation with improved correlation with human judgments. In Proceedings of the acl workshop on intrinsic and extrinsic evaluation measures for machine translation and\/or summarization. 65\u201372."},{"key":"e_1_3_2_1_5_1","volume-title":"A parallel corpus of python functions and documentation strings for automated code documentation and code generation. arXiv preprint arXiv:1707.02275","author":"Valerio\u00a0Miceli Barone Antonio","year":"2017","unstructured":"Antonio Valerio\u00a0Miceli Barone and Rico Sennrich. 2017. A parallel corpus of python functions and documentation strings for automated code documentation and code generation. arXiv preprint arXiv:1707.02275 (2017)."},{"key":"e_1_3_2_1_6_1","first-page":"374","article-title":"Towards federated learning at scale: System design","volume":"1","author":"Bonawitz Keith","year":"2019","unstructured":"Keith Bonawitz, Hubert Eichner, Wolfgang Grieskamp, Dzmitry Huba, Alex Ingerman, Vladimir Ivanov, Chloe Kiddon, Jakub Kone\u010dn\u1ef3, Stefano Mazzocchi, Brendan McMahan, 2019. Towards federated learning at scale: System design. Proceedings of Machine Learning and Systems 1 (2019), 374\u2013388.","journal-title":"Proceedings of Machine Learning and Systems"},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3133982"},{"key":"e_1_3_2_1_8_1","volume-title":"Petals: Collaborative inference and fine-tuning of large models. arXiv preprint arXiv:2209.01188","author":"Borzunov Alexander","year":"2022","unstructured":"Alexander Borzunov, Dmitry Baranchuk, Tim Dettmers, Max Ryabinin, Younes Belkada, Artem Chumachenko, Pavel Samygin, and Colin Raffel. 2022. Petals: Collaborative inference and fine-tuning of large models. arXiv preprint arXiv:2209.01188 (2022)."},{"key":"e_1_3_2_1_9_1","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown Tom","year":"2020","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared\u00a0D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, 2020. Language models are few-shot learners. Advances in Neural Information Processing Systems 33 (2020), 1877\u20131901.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_10_1","unstructured":"Mark Chen Jerry Tworek Heewoo Jun and Qiming Yuan. 2021. Evaluating Large Language Models Trained on Code. arxiv:2107.03374\u00a0[cs.LG]"},{"key":"e_1_3_2_1_11_1","first-page":"1","article-title":"Palm: Scaling language modeling with pathways","volume":"24","author":"Chowdhery Aakanksha","year":"2023","unstructured":"Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung\u00a0Won Chung, Charles Sutton, Sebastian Gehrmann, 2023. Palm: Scaling language modeling with pathways. Journal of Machine Learning Research 24, 240 (2023), 1\u2013113.","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_1_12_1","first-page":"5765","article-title":"Source code analysis extractive approach to generate textual summary","volume":"95","author":"M\u00a0ABBAS DAWOOD","year":"2017","unstructured":"KAREEM\u00a0ABBAS DAWOOD, KHAIRONI\u00a0YATIM SHARIF, and KT Wei. 2017. Source code analysis extractive approach to generate textual summary. Journal of Theoretical and Applied Information Technology 95, 21 (2017), 5765\u20135777.","journal-title":"Journal of Theoretical and Applied Information Technology"},{"key":"e_1_3_2_1_13_1","volume-title":"Training a massively multimodal transformer on YouTube data: pre-training and parameter efficient fine-tuning on HPC infrastructure. Ph.\u00a0D. Dissertation","author":"Day Kastan\u00a0Vrabel","unstructured":"Kastan\u00a0Vrabel Day. 2023. Training a massively multimodal transformer on YouTube data: pre-training and parameter efficient fine-tuning on HPC infrastructure. Ph.\u00a0D. Dissertation. University of Illinois at Urbana-Champaign."},{"key":"e_1_3_2_1_14_1","volume-title":"Qlora: Efficient finetuning of quantized llms. Advances in Neural Information Processing Systems 36","author":"Dettmers Tim","year":"2024","unstructured":"Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. 2024. Qlora: Efficient finetuning of quantized llms. Advances in Neural Information Processing Systems 36 (2024)."},{"key":"e_1_3_2_1_15_1","volume-title":"Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805","author":"Devlin Jacob","year":"2018","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)."},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-023-00626-4"},{"key":"e_1_3_2_1_17_1","volume-title":"Parameter-Efficient Fine-Tuning for Medical Image Analysis: The Missed Opportunity. arXiv preprint arXiv:2305.08252","author":"Dutt Raman","year":"2023","unstructured":"Raman Dutt, Linus Ericsson, Pedro Sanchez, Sotirios\u00a0A Tsaftaris, and Timothy Hospedales. 2023. Parameter-Efficient Fine-Tuning for Medical Image Analysis: The Missed Opportunity. arXiv preprint arXiv:2305.08252 (2023)."},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1145\/3540250.3558916"},{"key":"e_1_3_2_1_19_1","volume-title":"FATE-LLM: A Industrial Grade Federated Learning Framework for Large Language Models. arXiv preprint arXiv:2310.10049","author":"Fan Tao","year":"2023","unstructured":"Tao Fan, Yan Kang, Guoqiang Ma, Weijing Chen, Wenbin Wei, Lixin Fan, and Qiang Yang. 2023. FATE-LLM: A Industrial Grade Federated Learning Framework for Large Language Models. arXiv preprint arXiv:2310.10049 (2023)."},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE48619.2023.00128"},{"key":"e_1_3_2_1_21_1","volume-title":"Codebert: A pre-trained model for programming and natural languages. arXiv preprint arXiv:2002.08155","author":"Feng Zhangyin","year":"2020","unstructured":"Zhangyin Feng, Daya Guo, Duyu Tang, Nan Duan, Xiaocheng Feng, Ming Gong, Linjun Shou, Bing Qin, Ting Liu, Daxin Jiang, 2020. Codebert: A pre-trained model for programming and natural languages. arXiv preprint arXiv:2002.08155 (2020)."},{"key":"e_1_3_2_1_22_1","volume-title":"Incoder: A generative model for code infilling and synthesis. arXiv preprint arXiv:2204.05999","author":"Fried Daniel","year":"2022","unstructured":"Daniel Fried, Armen Aghajanyan, Jessy Lin, Sida Wang, Eric Wallace, Freda Shi, Ruiqi Zhong, Wen-tau Yih, Luke Zettlemoyer, and Mike Lewis. 2022. Incoder: A generative model for code infilling and synthesis. arXiv preprint arXiv:2204.05999 (2022)."},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1145\/3540250.3549098"},{"key":"e_1_3_2_1_24_1","doi-asserted-by":"publisher","DOI":"10.1145\/3522674"},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1145\/3524610.3527907"},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1109\/SANER53432.2022.00013"},{"key":"e_1_3_2_1_27_1","volume-title":"FedMLSecurity: A Benchmark for Attacks and Defenses in Federated Learning and LLMs. arXiv preprint arXiv:2306.04959","author":"Han Shanshan","year":"2023","unstructured":"Shanshan Han, Baturalp Buyukates, Zijian Hu, Han Jin, Weizhao Jin, Lichao Sun, Xiaoyang Wang, Chulin Xie, Kai Zhang, Qifan Zhang, 2023. FedMLSecurity: A Benchmark for Attacks and Defenses in Federated Learning and LLMs. arXiv preprint arXiv:2306.04959 (2023)."},{"key":"e_1_3_2_1_28_1","volume-title":"Large language models for software engineering: A systematic literature review. arXiv preprint arXiv:2308.10620","author":"Hou Xinyi","year":"2023","unstructured":"Xinyi Hou, Yanjie Zhao, Yue Liu, Zhou Yang, Kailong Wang, Li Li, Xiapu Luo, David Lo, John Grundy, and Haoyu Wang. 2023. Large language models for software engineering: A systematic literature review. arXiv preprint arXiv:2308.10620 (2023)."},{"key":"e_1_3_2_1_29_1","volume-title":"International Conference on Machine Learning. PMLR, 2790\u20132799","author":"Houlsby Neil","year":"2019","unstructured":"Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De\u00a0Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for NLP. In International Conference on Machine Learning. PMLR, 2790\u20132799."},{"key":"e_1_3_2_1_30_1","volume-title":"Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685","author":"Hu J","year":"2021","unstructured":"Edward\u00a0J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSTW58534.2023.00078"},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSME.2019.00085"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSM.2015.7332508"},{"key":"e_1_3_2_1_34_1","volume-title":"ChatGPT for good? On opportunities and challenges of large language models for education. Learning and individual differences 103","author":"Kasneci Enkelejda","year":"2023","unstructured":"Enkelejda Kasneci, Kathrin Se\u00dfler, Stefan K\u00fcchemann, Maria Bannert, Daryna Dementieva, Frank Fischer, Urs Gasser, Georg Groh, Stephan G\u00fcnnemann, Eyke H\u00fcllermeier, 2023. ChatGPT for good? On opportunities and challenges of large language models for education. Learning and individual differences 103 (2023), 102274."},{"key":"e_1_3_2_1_35_1","volume-title":"Propile: Probing privacy leakage in large language models. Advances in Neural Information Processing Systems 36","author":"Kim Siwon","year":"2024","unstructured":"Siwon Kim, Sangdoo Yun, Hwaran Lee, Martin Gubri, Sungroh Yoon, and Seong\u00a0Joon Oh. 2024. Propile: Probing privacy leakage in large language models. Advances in Neural Information Processing Systems 36 (2024)."},{"key":"e_1_3_2_1_36_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.cie.2020.106854"},{"key":"e_1_3_2_1_37_1","volume-title":"Federated learning: Challenges, methods, and future directions","author":"Li Tian","year":"2020","unstructured":"Tian Li, Anit\u00a0Kumar Sahu, Ameet Talwalkar, and Virginia Smith. 2020. Federated learning: Challenges, methods, and future directions. IEEE Signal Processing magazine 37, 3 (2020), 50\u201360."},{"key":"e_1_3_2_1_38_1","first-page":"429","article-title":"Federated optimization in heterogeneous networks","volume":"2","author":"Li Tian","year":"2020","unstructured":"Tian Li, Anit\u00a0Kumar Sahu, Manzil Zaheer, Maziar Sanjabi, Ameet Talwalkar, and Virginia Smith. 2020. Federated optimization in heterogeneous networks. Proceedings of Machine Learning and Systems 2 (2020), 429\u2013450.","journal-title":"Proceedings of Machine Learning and Systems"},{"key":"e_1_3_2_1_39_1","volume-title":"On the convergence of fedavg on non-iid data. arXiv preprint arXiv:1907.02189","author":"Li Xiang","year":"2019","unstructured":"Xiang Li, Kaixuan Huang, Wenhao Yang, Shusen Wang, and Zhihua Zhang. 2019. On the convergence of fedavg on non-iid data. arXiv preprint arXiv:1907.02189 (2019)."},{"key":"e_1_3_2_1_40_1","volume-title":"Competition-level code generation with alphacode. Science 378, 6624","author":"Li Yujia","year":"2022","unstructured":"Yujia Li, David Choi, Junyoung Chung, Nate Kushman, Julian Schrittwieser, R\u00e9mi Leblond, Tom Eccles, James Keeling, Felix Gimeno, Agustin Dal\u00a0Lago, 2022. Competition-level code generation with alphacode. Science 378, 6624 (2022), 1092\u20131097."},{"key":"e_1_3_2_1_41_1","volume-title":"Rouge: A package for automatic evaluation of summaries. In Text summarization branches out. 74\u201381.","author":"Lin Chin-Yew","year":"2004","unstructured":"Chin-Yew Lin. 2004. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out. 74\u201381."},{"key":"e_1_3_2_1_42_1","volume-title":"Federated Prompting and Chain-of-Thought Reasoning for Improving LLMs Answering. In International Conference on Knowledge Science, Engineering and Management. Springer, 3\u201311","author":"Liu Xiangyang","year":"2023","unstructured":"Xiangyang Liu, Tianqi Pang, and Chenyou Fan. 2023. Federated Prompting and Chain-of-Thought Reasoning for Improving LLMs Answering. In International Conference on Knowledge Science, Engineering and Management. Springer, 3\u201311."},{"key":"e_1_3_2_1_43_1","volume-title":"Differentially Private Low-Rank Adaptation of Large Language Model Using Federated Learning. arXiv preprint arXiv:2312.17493","author":"Liu Xiao-Yang","year":"2023","unstructured":"Xiao-Yang Liu, Rongyi Zhu, Daochen Zha, Jiechao Gao, Shan Zhong, and Meikang Qiu. 2023. Differentially Private Low-Rank Adaptation of Large Language Model Using Federated Learning. arXiv preprint arXiv:2312.17493 (2023)."},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.5555\/3546258.3546484"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"publisher","DOI":"10.1145\/3450288"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1109\/ISSRE59848.2023.00026"},{"key":"e_1_3_2_1_47_1","doi-asserted-by":"publisher","DOI":"10.1109\/ISSRE.2018.00013"},{"key":"e_1_3_2_1_48_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICSE43902.2021.00041"},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"publisher","DOI":"10.1145\/2635868.2635870"},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1145\/3524842.3528470"},{"key":"e_1_3_2_1_51_1","volume-title":"Codegen: An open large language model for code with multi-turn program synthesis. arXiv preprint arXiv:2203.13474","author":"Nijkamp Erik","year":"2022","unstructured":"Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, and Caiming Xiong. 2022. Codegen: An open large language model for code with multi-turn program synthesis. arXiv preprint arXiv:2203.13474 (2022)."},{"key":"e_1_3_2_1_52_1","doi-asserted-by":"publisher","DOI":"10.1145\/3510003.3510096"},{"key":"e_1_3_2_1_53_1","doi-asserted-by":"publisher","DOI":"10.1145\/1644001.1644009"},{"key":"e_1_3_2_1_54_1","volume-title":"Application of Large Language Models to Software Engineering Tasks: Opportunities, Risks, and Implications","author":"Ozkaya Ipek","year":"2023","unstructured":"Ipek Ozkaya. 2023. Application of Large Language Models to Software Engineering Tasks: Opportunities, Risks, and Implications. IEEE Software 40, 3 (2023)."},{"key":"e_1_3_2_1_55_1","volume-title":"Proceedings of the 40th annual meeting of the Association for Computational Linguistics. 311\u2013318","author":"Papineni Kishore","year":"2002","unstructured":"Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. Bleu: a method for automatic evaluation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computational Linguistics. 311\u2013318."},{"key":"e_1_3_2_1_56_1","volume-title":"Federated learning for emoji prediction in a mobile keyboard. arXiv preprint arXiv:1906.04329","author":"Ramaswamy Swaroop","year":"2019","unstructured":"Swaroop Ramaswamy, Rajiv Mathews, Kanishka Rao, and Fran\u00e7oise Beaufays. 2019. Federated learning for emoji prediction in a mobile keyboard. arXiv preprint arXiv:1906.04329 (2019)."},{"key":"e_1_3_2_1_57_1","doi-asserted-by":"publisher","DOI":"10.1145\/3540250.3560883"},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"publisher","DOI":"10.1145\/3510003.3510060"},{"key":"e_1_3_2_1_59_1","doi-asserted-by":"publisher","DOI":"10.1145\/3540250.3549145"},{"key":"e_1_3_2_1_60_1","doi-asserted-by":"publisher","DOI":"10.1145\/3510003.3510224"},{"key":"e_1_3_2_1_61_1","unstructured":"Rohan Taori Ishaan Gulrajani Tianyi Zhang Yann Dubois Xuechen Li Carlos Guestrin Percy Liang and Tatsunori\u00a0B Hashimoto. 2023. Stanford alpaca: An instruction-following llama model."},{"key":"e_1_3_2_1_62_1","volume-title":"Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)."},{"key":"e_1_3_2_1_63_1","volume-title":"Attention is all you need. Advances in Neural Information Processing Systems 30","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan\u00a0N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. Advances in Neural Information Processing Systems 30 (2017)."},{"key":"e_1_3_2_1_64_1","volume-title":"Can Public Large Language Models Help Private Cross-device Federated Learning?arXiv preprint arXiv:2305.12132","author":"Wang Boxin","year":"2023","unstructured":"Boxin Wang, Yibo\u00a0Jacky Zhang, Yuan Cao, Bo Li, H\u00a0Brendan McMahan, Sewoong Oh, Zheng Xu, and Manzil Zaheer. 2023. Can Public Large Language Models Help Private Cross-device Federated Learning?arXiv preprint arXiv:2305.12132 (2023)."},{"key":"e_1_3_2_1_65_1","doi-asserted-by":"publisher","DOI":"10.1145\/3540250.3549113"},{"key":"e_1_3_2_1_66_1","volume-title":"Prompt Tuning in Code Intelligence: An Experimental Evaluation","author":"Wang Chaozheng","year":"2023","unstructured":"Chaozheng Wang, Yuanhang Yang, Cuiyun Gao, Yun Peng, Hongyu Zhang, and Michael\u00a0R Lyu. 2023. Prompt Tuning in Code Intelligence: An Experimental Evaluation. IEEE Transactions on Software Engineering (2023)."},{"key":"e_1_3_2_1_67_1","doi-asserted-by":"publisher","DOI":"10.1109\/TSE.2020.2979701"},{"key":"e_1_3_2_1_68_1","volume-title":"Adamix: Mixture-of-adapter for parameter-efficient tuning of large language models. arXiv preprint arXiv:2205.12410 1, 2","author":"Wang Yaqing","year":"2022","unstructured":"Yaqing Wang, Subhabrata Mukherjee, Xiaodong Liu, Jing Gao, Ahmed\u00a0Hassan Awadallah, and Jianfeng Gao. 2022. Adamix: Mixture-of-adapter for parameter-efficient tuning of large language models. arXiv preprint arXiv:2205.12410 1, 2 (2022), 4."},{"key":"e_1_3_2_1_69_1","volume-title":"Finetuned language models are zero-shot learners. arXiv preprint arXiv:2109.01652","author":"Wei Jason","year":"2021","unstructured":"Jason Wei, Maarten Bosma, Vincent\u00a0Y Zhao, Kelvin Guu, Adams\u00a0Wei Yu, Brian Lester, Nan Du, Andrew\u00a0M Dai, and Quoc\u00a0V Le. 2021. Finetuned language models are zero-shot learners. arXiv preprint arXiv:2109.01652 (2021)."},{"key":"e_1_3_2_1_70_1","volume-title":"Federated Fine-Tuning of LLMs on the Very Edge: The Good, the Bad, the Ugly. arXiv preprint arXiv:2310.03150","author":"Woisetschl\u00e4ger Herbert","year":"2023","unstructured":"Herbert Woisetschl\u00e4ger, Alexander Isenko, Shiqiang Wang, Ruben Mayer, and Hans-Arno Jacobsen. 2023. Federated Fine-Tuning of LLMs on the Very Edge: The Good, the Bad, the Ugly. arXiv preprint arXiv:2310.03150 (2023)."},{"key":"e_1_3_2_1_71_1","volume-title":"Wilcoxon signed-rank test","author":"Woolson F","year":"2007","unstructured":"Robert\u00a0F Woolson. 2007. Wilcoxon signed-rank test. Wiley encyclopedia of clinical trials (2007), 1\u20133."},{"key":"e_1_3_2_1_72_1","volume-title":"Unveiling security, privacy, and ethical concerns of ChatGPT. Journal of Information and Intelligence","author":"Wu Xiaodong","year":"2023","unstructured":"Xiaodong Wu, Ran Duan, and Jianbing Ni. 2023. Unveiling security, privacy, and ethical concerns of ChatGPT. Journal of Information and Intelligence (2023)."},{"key":"e_1_3_2_1_73_1","volume-title":"International Conference on Machine Learning. PMLR, 38087\u201338099","author":"Xiao Guangxuan","year":"2023","unstructured":"Guangxuan Xiao, Ji Lin, Mickael Seznec, Hao Wu, Julien Demouth, and Song Han. 2023. Smoothquant: Accurate and efficient post-training quantization for large language models. In International Conference on Machine Learning. PMLR, 38087\u201338099."},{"key":"e_1_3_2_1_74_1","volume-title":"Federated fine-tuning of billion-sized language models across mobile devices. arXiv preprint arXiv:2308.13894","author":"Xu Mengwei","year":"2023","unstructured":"Mengwei Xu, Yaozong Wu, Dongqi Cai, Xiang Li, and Shangguang Wang. 2023. Federated fine-tuning of billion-sized language models across mobile devices. arXiv preprint arXiv:2308.13894 (2023)."},{"key":"e_1_3_2_1_75_1","volume-title":"Exploring the Potential of Large Language Models in Personalized Diabetes Treatment Strategies. medRxiv","author":"Yang Hao","year":"2023","unstructured":"Hao Yang, Jiaxi Li, Siru Liu, Lei Du, Xiali Liu, Yong Huang, Qingke Shi, and Jialin Liu. 2023. Exploring the Potential of Large Language Models in Personalized Diabetes Treatment Strategies. medRxiv (2023), 2023\u201306."},{"key":"e_1_3_2_1_76_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP.2019.8803001"},{"key":"e_1_3_2_1_77_1","doi-asserted-by":"publisher","DOI":"10.3390\/sym14030471"},{"key":"e_1_3_2_1_78_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2021.106775"},{"key":"e_1_3_2_1_79_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-67292-8_17"},{"key":"e_1_3_2_1_80_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447454"},{"key":"e_1_3_2_1_81_1","volume-title":"Llamaadapter: Efficient fine-tuning of language models with zero-init attention. arXiv preprint arXiv: 230316199.","author":"Zhang R","year":"2023","unstructured":"R Zhang, J Han, A Zhou, 2023. Llamaadapter: Efficient fine-tuning of language models with zero-init attention. arXiv preprint arXiv: 230316199. (2023)."},{"key":"e_1_3_2_1_82_1","volume-title":"Siren\u2019s Song in the AI Ocean: A Survey on Hallucination in Large Language Models. arXiv preprint arXiv:2309.01219","author":"Zhang Yue","year":"2023","unstructured":"Yue Zhang, Yafu Li, Leyang Cui, Deng Cai, Lemao Liu, Tingchen Fu, Xinting Huang, Enbo Zhao, Yu Zhang, Yulong Chen, 2023. Siren\u2019s Song in the AI Ocean: A Survey on Hallucination in Large Language Models. arXiv preprint arXiv:2309.01219 (2023)."},{"key":"e_1_3_2_1_83_1","volume-title":"Universal and transferable adversarial attacks on aligned language models. arXiv preprint arXiv:2307.15043","author":"Zou Andy","year":"2023","unstructured":"Andy Zou, Zifan Wang, J\u00a0Zico Kolter, and Matt Fredrikson. 2023. Universal and transferable adversarial attacks on aligned language models. arXiv preprint arXiv:2307.15043 (2023)."}],"event":{"name":"EASE 2024: 28th International Conference on Evaluation and Assessment in Software Engineering","location":"Salerno Italy","acronym":"EASE 2024"},"container-title":["Proceedings of the 28th International Conference on Evaluation and Assessment in Software Engineering"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3661167.3661210","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3661167.3661210","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,22]],"date-time":"2025-08-22T11:14:49Z","timestamp":1755861289000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3661167.3661210"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,18]]},"references-count":83,"alternative-id":["10.1145\/3661167.3661210","10.1145\/3661167"],"URL":"https:\/\/doi.org\/10.1145\/3661167.3661210","relation":{},"subject":[],"published":{"date-parts":[[2024,6,18]]},"assertion":[{"value":"2024-06-18","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}