{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,2]],"date-time":"2025-10-02T20:10:29Z","timestamp":1759435829500,"version":"build-2065373602"},"publisher-location":"New York, NY, USA","reference-count":60,"publisher":"ACM","funder":[{"name":"National Natural Science Foundation of China","award":["62402267","62341201"],"award-info":[{"award-number":["62402267","62341201"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,6,23]]},"DOI":"10.1145\/3711875.3729128","type":"proceedings-article","created":{"date-parts":[[2025,10,2]],"date-time":"2025-10-02T19:30:22Z","timestamp":1759433422000},"page":"124-137","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["CrossLM: A Data-Free Collaborative Fine-Tuning Framework for Large and Small Language Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3010-3812","authenticated-orcid":false,"given":"Yongheng","family":"Deng","sequence":"first","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-2811-0515","authenticated-orcid":false,"given":"Ziqing","family":"Qiao","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3038-9259","authenticated-orcid":false,"given":"Ye","family":"Zhang","sequence":"additional","affiliation":[{"name":"Beijing Information Science and Technology University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-4788-7633","authenticated-orcid":false,"given":"Zhenya","family":"Ma","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3800-3533","authenticated-orcid":false,"given":"Yang","family":"Liu","sequence":"additional","affiliation":[{"name":"Institute for AI Industry Research, Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2782-183X","authenticated-orcid":false,"given":"Ju","family":"Ren","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2025,9,25]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Yahya H Ezzeldin, Qingfeng Liu, Kee-Bong Song, Mostafa El-Khamy, and Salman Avestimehr.","author":"Babakniya Sara","year":"2023","unstructured":"Sara Babakniya, Ahmed Roushdy Elkordy, Yahya H Ezzeldin, Qingfeng Liu, Kee-Bong Song, Mostafa El-Khamy, and Salman Avestimehr. 2023. SLoRA: Federated parameter efficient fine-tuning of language models. arXiv preprint arXiv:2308.06522 (2023)."},{"key":"e_1_3_2_1_2_1","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown Tom","year":"2020","unstructured":"Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. NeurIPS 33 (2020), 1877\u20131901.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_3_1","volume-title":"FedGEMS: Federated learning of larger server models via selective knowledge fusion. arXiv preprint arXiv:2110.11027","author":"Cheng Sijie","year":"2021","unstructured":"Sijie Cheng, Jingwen Wu, Yanghua Xiao, and Yang Liu. 2021. FedGEMS: Federated learning of larger server models via selective knowledge fusion. arXiv preprint arXiv:2110.11027 (2021)."},{"key":"e_1_3_2_1_4_1","volume-title":"Heterogeneous Ensemble Knowledge Transfer for Training Large Models in Federated Learning. arXiv preprint arXiv:2204.12703","author":"Cho Yae Jee","year":"2022","unstructured":"Yae Jee Cho, Andre Manoel, Gauri Joshi, Robert Sim, and Dimitrios Dimitriadis. 2022. Heterogeneous Ensemble Knowledge Transfer for Training Large Models in Federated Learning. arXiv preprint arXiv:2204.12703 (2022)."},{"key":"e_1_3_2_1_5_1","volume-title":"Personalized Federated Learning for Heterogeneous Clients with Clustered Knowledge Transfer. arXiv preprint arXiv:2109.08119","author":"Cho Yae Jee","year":"2021","unstructured":"Yae Jee Cho, Jianyu Wang, Tarun Chiruvolu, and Gauri Joshi. 2021. Personalized Federated Learning for Heterogeneous Clients with Clustered Knowledge Transfer. arXiv preprint arXiv:2109.08119 (2021)."},{"key":"e_1_3_2_1_6_1","volume-title":"BoolQ: Exploring the surprising difficulty of natural yes\/no questions. arXiv preprint arXiv:1905.10044","author":"Clark Christopher","year":"2019","unstructured":"Christopher Clark, Kenton Lee, Ming-Wei Chang, Tom Kwiatkowski, Michael Collins, and Kristina Toutanova. 2019. BoolQ: Exploring the surprising difficulty of natural yes\/no questions. arXiv preprint arXiv:1905.10044 (2019)."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"crossref","unstructured":"Yongheng Deng Weining Chen Ju Ren Feng Lyu Yang Liu Yunxin Liu and Yaoxue Zhang. 2022. TailorFL: Dual-Personalized Federated Learning under System and Data Heterogeneity. In ACM SenSys. 592\u2013606.","DOI":"10.1145\/3560905.3568503"},{"key":"e_1_3_2_1_8_1","volume-title":"A hierarchical knowledge transfer framework for heterogeneous federated learning","author":"Deng Yongheng","unstructured":"Yongheng Deng, Ju Ren, Cheng Tang, Feng Lyu, Yang Liu, and Yaoxue Zhang. 2023. A hierarchical knowledge transfer framework for heterogeneous federated learning. In IEEE INFOCOM. 1\u201310."},{"key":"e_1_3_2_1_9_1","volume-title":"Third international workshop on paraphrasing (IWP2005)","author":"Dolan Bill","year":"2005","unstructured":"Bill Dolan and Chris Brockett. 2005. Automatically constructing a corpus of sentential paraphrases. In Third international workshop on paraphrasing (IWP2005)."},{"key":"e_1_3_2_1_10_1","unstructured":"Angela Fan Mike Lewis and Yann Dauphin. 2018. Hierarchical Neural Story Generation. In ACL. 889\u2013898."},{"key":"e_1_3_2_1_11_1","volume-title":"FedMKT: Federated Mutual Knowledge Transfer for Large and Small Language Models. arXiv preprint arXiv:2406.02224","author":"Fan Tao","year":"2024","unstructured":"Tao Fan, Guoqiang Ma, Yan Kang, Hanlin Gu, Lixin Fan, and Qiang Yang. 2024. FedMKT: Federated Mutual Knowledge Transfer for Large and Small Language Models. arXiv preprint arXiv:2406.02224 (2024)."},{"key":"e_1_3_2_1_12_1","volume-title":"GPTQ: Accurate post-training quantization for generative pre-trained transformers. arXiv preprint arXiv:2210.17323","author":"Frantar Elias","year":"2022","unstructured":"Elias Frantar, Saleh Ashkboos, Torsten Hoefler, and Dan Alistarh. 2022. GPTQ: Accurate post-training quantization for generative pre-trained transformers. arXiv preprint arXiv:2210.17323 (2022)."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"crossref","unstructured":"Suchin Gururangan Ana Marasovi\u0107 Swabha Swayamdipta Kyle Lo Iz Beltagy Doug Downey and Noah A Smith. 2020. Don't Stop Pretraining: Adapt Language Models to Domains and Tasks. In ACL. 8342\u20138360.","DOI":"10.18653\/v1\/2020.acl-main.740"},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.23919\/cje.2021.00.370"},{"key":"e_1_3_2_1_15_1","first-page":"14068","article-title":"Group knowledge transfer: Federated learning of large cnns at the edge","volume":"33","author":"He Chaoyang","year":"2020","unstructured":"Chaoyang He, Murali Annavaram, and Salman Avestimehr. 2020. Group knowledge transfer: Federated learning of large cnns at the edge. NeurIPS 33 (2020), 14068\u201314080.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_16_1","volume-title":"The curious case of neural text degeneration. arXiv preprint arXiv:1904.09751","author":"Holtzman Ari","year":"2019","unstructured":"Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2019. The curious case of neural text degeneration. arXiv preprint arXiv:1904.09751 (2019)."},{"key":"e_1_3_2_1_17_1","volume-title":"Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly.","author":"Houlsby Neil","year":"2019","unstructured":"Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for NLP. In ICML. 2790\u20132799."},{"key":"e_1_3_2_1_18_1","volume-title":"Measuring the effects of non-identical data distribution for federated visual classification. arXiv preprint arXiv:1909.06335","author":"Harry Hsu Tzu-Ming","year":"2019","unstructured":"Tzu-Ming Harry Hsu, Hang Qi, and Matthew Brown. 2019. Measuring the effects of non-identical data distribution for federated visual classification. arXiv preprint arXiv:1909.06335 (2019)."},{"key":"e_1_3_2_1_19_1","volume-title":"LoRA: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685","author":"Hu Edward J","year":"2021","unstructured":"Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2021. LoRA: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)."},{"key":"e_1_3_2_1_20_1","volume-title":"Proceedings of naacL-HLT","volume":"1","author":"Ming-Wei Chang Jacob Devlin","year":"2019","unstructured":"Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of naacL-HLT, Vol. 1. 2."},{"key":"e_1_3_2_1_21_1","doi-asserted-by":"crossref","unstructured":"Brian Lester Rami Al-Rfou and Noah Constant. 2021. The Power of Scale for Parameter-Efficient Prompt Tuning. In EMNLP. 3045\u20133059.","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"crossref","unstructured":"Ang Li Jingwei Sun Pengcheng Li Yu Pu Hai Li and Yiran Chen. 2021. Hermes: an efficient federated learning framework for heterogeneous mobile clients. In ACM MobiCom. 420\u2013437.","DOI":"10.1145\/3447993.3483278"},{"key":"e_1_3_2_1_23_1","volume-title":"Federated Domain-Specific Knowledge Transfer on Large Language Models Using Synthetic Data. arXiv preprint arXiv:2405.14212","author":"Li Haoran","year":"2024","unstructured":"Haoran Li, Xinyuan Zhao, Dadi Guo, Hanlin Gu, Ziqian Zeng, Yuxing Han, Yangqiu Song, Lixin Fan, and Qiang Yang. 2024. Federated Domain-Specific Knowledge Transfer on Large Language Models Using Synthetic Data. arXiv preprint arXiv:2405.14212 (2024)."},{"key":"e_1_3_2_1_24_1","volume-title":"Ctrl: Connect tabular and language model for ctr prediction. arXiv preprint arXiv:2306.02841","author":"Li Xiangyang","year":"2023","unstructured":"Xiangyang Li, Bo Chen, Lu Hou, and Ruiming Tang. 2023. Ctrl: Connect tabular and language model for ctr prediction. arXiv preprint arXiv:2306.02841 (2023)."},{"key":"e_1_3_2_1_25_1","unstructured":"Xian Li Ping Yu Chunting Zhou Timo Schick Omer Levy Luke Zettlemoyer Jason E Weston and Mike Lewis. 2024. Self-Alignment with Instruction Backtranslation. In ICLR."},{"key":"e_1_3_2_1_26_1","volume-title":"Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190","author":"Li Xiang Lisa","year":"2021","unstructured":"Xiang Lisa Li and Percy Liang. 2021. Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190 (2021)."},{"key":"e_1_3_2_1_27_1","unstructured":"Zhuoyan Li Hangxiao Zhu Zhuoran Lu and Ming Yin. 2023. Synthetic Data Generation with Large Language Models for Text Classification: Potential and Limitations. In EMNLP. 10443\u201310461."},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"crossref","unstructured":"Lin Long Rui Wang Ruixuan Xiao Junbo Zhao Xiao Ding Gang Chen and Haobo Wang. 2024. On LLMs-Driven Synthetic Data Generation Curation and Evaluation: A Survey. In ACL. 11065\u201311082.","DOI":"10.18653\/v1\/2024.findings-acl.658"},{"key":"e_1_3_2_1_29_1","unstructured":"Andrew Maas Raymond E Daly Peter T Pham Dan Huang Andrew Y Ng and Christopher Potts. 2011. Learning word vectors for sentiment analysis. In ACL. 142\u2013150."},{"key":"e_1_3_2_1_30_1","volume-title":"Human Language Technology: Proceedings of a Workshop held at Plainsboro, New Jersey, March 8\u201311","author":"Marcus Mitch","year":"1994","unstructured":"Mitch Marcus, Grace Kim, Mary Ann Marcinkiewicz, Robert MacIntyre, Ann Bies, Mark Ferguson, Karen Katz, and Britta Schasberger. 1994. The Penn tree-bank: Annotating predicate argument structure. In Human Language Technology: Proceedings of a Workshop held at Plainsboro, New Jersey, March 8\u201311, 1994."},{"key":"e_1_3_2_1_31_1","unstructured":"Brendan McMahan Eider Moore Daniel Ramage Seth Hampson and Blaise Aguera y Arcas. 2017. Communication-efficient learning of deep networks from decentralized data. In Artificial intelligence and statistics. 1273\u20131282."},{"key":"e_1_3_2_1_32_1","volume-title":"An emulator for fine-tuning large language models using small language models. arXiv preprint arXiv:2310.12962","author":"Mitchell Eric","year":"2023","unstructured":"Eric Mitchell, Rafael Rafailov, Archit Sharma, Chelsea Finn, and Christopher D Manning. 2023. An emulator for fine-tuning large language models using small language models. arXiv preprint arXiv:2310.12962 (2023)."},{"key":"e_1_3_2_1_33_1","unstructured":"Long Ouyang Jeffrey Wu Xu Jiang Diogo Almeida Carroll Wainwright Pamela Mishkin Chong Zhang Sandhini Agarwal Katarina Slama Alex Gray et al. 2022. Training language models to follow instructions with human feedback. In NeurIPS."},{"key":"e_1_3_2_1_34_1","unstructured":"Alec Radford Jeffrey Wu Rewon Child David Luan Dario Amodei Ilya Sutskever et al. 2019. Language models are unsupervised multitask learners. OpenAI blog 1 8 (2019) 9."},{"key":"e_1_3_2_1_35_1","volume-title":"Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084","author":"Reimers Nils","year":"2019","unstructured":"Nils Reimers and Iryna Gurevych. 2019. Sentence-bert: Sentence embeddings using siamese bert-networks. arXiv preprint arXiv:1908.10084 (2019)."},{"key":"e_1_3_2_1_36_1","volume-title":"a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108","author":"Sanh Victor","year":"2019","unstructured":"Victor Sanh, Lysandre Debut, Julien Chaumond, and Thomas Wolf. 2019. DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter. arXiv preprint arXiv:1910.01108 (2019)."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"crossref","unstructured":"Richard Socher Alex Perelygin Jean Wu Jason Chuang Christopher D Manning Andrew Y Ng and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment treebank. In ACL. 1631\u20131642.","DOI":"10.18653\/v1\/D13-1170"},{"key":"e_1_3_2_1_38_1","volume-title":"A Simple and Effective Pruning Approach for Large Language Models. arXiv preprint arXiv:2306.11695","author":"Sun Mingjie","year":"2023","unstructured":"Mingjie Sun, Zhuang Liu, Anna Bair, and J Zico Kolter. 2023. A Simple and Effective Pruning Approach for Large Language Models. arXiv preprint arXiv:2306.11695 (2023)."},{"key":"e_1_3_2_1_39_1","volume-title":"Kabilan Elangovan, Laura Gutierrez, Ting Fang Tan, and Daniel Shu Wei Ting.","author":"Thirunavukarasu Arun James","year":"2023","unstructured":"Arun James Thirunavukarasu, Darren Shu Jeng Ting, Kabilan Elangovan, Laura Gutierrez, Ting Fang Tan, and Daniel Shu Wei Ting. 2023. Large language models in medicine. Nature medicine 29, 8 (2023), 1930\u20131940."},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.1145\/3510033"},{"key":"e_1_3_2_1_41_1","volume-title":"Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_1_42_1","volume-title":"SuperGLUE: A stickier benchmark for general-purpose language understanding systems. NeurIPS 32","author":"Pruksachatkun Yada","year":"2019","unstructured":"AlexWang, Yada Pruksachatkun, Nikita Nangia, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2019. SuperGLUE: A stickier benchmark for general-purpose language understanding systems. NeurIPS 32 (2019)."},{"key":"e_1_3_2_1_43_1","volume-title":"GLUE: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461","author":"Wang Alex","year":"2018","unstructured":"Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. 2018. GLUE: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461 (2018)."},{"key":"e_1_3_2_1_44_1","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2020.3023905"},{"key":"e_1_3_2_1_45_1","doi-asserted-by":"crossref","unstructured":"Yizhong Wang Yeganeh Kordi Swaroop Mishra Alisa Liu Noah A Smith Daniel Khashabi and Hannaneh Hajishirzi. 2023. Self-Instruct: Aligning Language Models with Self-Generated Instructions. In ACL.","DOI":"10.18653\/v1\/2023.acl-long.754"},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00290"},{"key":"e_1_3_2_1_47_1","volume-title":"Offsite-tuning: Transfer learning without full model. arXiv preprint arXiv:2302.04870","author":"Xiao Guangxuan","year":"2023","unstructured":"Guangxuan Xiao, Ji Lin, and Song Han. 2023. Offsite-tuning: Transfer learning without full model. arXiv preprint arXiv:2302.04870 (2023)."},{"key":"e_1_3_2_1_48_1","volume-title":"Federated fine-tuning of billion-sized language models across mobile devices. arXiv preprint arXiv:2308.13894","author":"Xu Mengwei","year":"2023","unstructured":"Mengwei Xu, Yaozong Wu, Dongqi Cai, Xiang Li, and Shangguang Wang. 2023. Federated fine-tuning of billion-sized language models across mobile devices. arXiv preprint arXiv:2308.13894 (2023)."},{"key":"e_1_3_2_1_49_1","first-page":"55734","article-title":"Large language model as attributed training data generator: A tale of diversity and bias","volume":"36","author":"Yu Yue","year":"2023","unstructured":"Yue Yu, Yuchen Zhuang, Jieyu Zhang, Yu Meng, Alexander J Ratner, Ranjay Krishna, Jiaming Shen, and Chao Zhang. 2023. Large language model as attributed training data generator: A tale of diversity and bias. Advances in Neural Information Processing Systems 36 (2023), 55734\u201355784.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.23919\/cje.2023.00.288"},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"crossref","unstructured":"Elad Ben Zaken Yoav Goldberg and Shauli Ravfogel. 2022. BitFit: Simple Parameter-efficient Fine-tuning for Transformer-based Masked Language-models. In ACL. 1\u20139.","DOI":"10.18653\/v1\/2022.acl-short.1"},{"key":"e_1_3_2_1_52_1","first-page":"21414","article-title":"Dense: Data-free one-shot federated learning","volume":"35","author":"Zhang Jie","year":"2022","unstructured":"Jie Zhang, Chen Chen, Bo Li, Lingjuan Lyu, Shuang Wu, Shouhong Ding, Chunhua Shen, and Chao Wu. 2022. Dense: Data-free one-shot federated learning. NeurIPS 35 (2022), 21414\u201321428.","journal-title":"NeurIPS"},{"key":"e_1_3_2_1_53_1","volume-title":"Finetuning global model via data-free knowledge distillation for non-iid federated learning","author":"Zhang Lin","unstructured":"Lin Zhang, Li Shen, Liang Ding, Dacheng Tao, and Ling-Yu Duan. 2022. Finetuning global model via data-free knowledge distillation for non-iid federated learning. In IEEE CVPR. 10174\u201310183."},{"key":"e_1_3_2_1_54_1","volume-title":"Text revealer: Private text reconstruction via model inversion attacks against transformers. arXiv preprint arXiv:2209.10505","author":"Zhang Ruisi","year":"2022","unstructured":"Ruisi Zhang, Seira Hidano, and Farinaz Koushanfar. 2022. Text revealer: Private text reconstruction via model inversion attacks against transformers. arXiv preprint arXiv:2209.10505 (2022)."},{"key":"e_1_3_2_1_55_1","volume-title":"Character-level convolutional networks for text classification. NeurIPS 28","author":"Zhang Xiang","year":"2015","unstructured":"Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text classification. NeurIPS 28 (2015)."},{"key":"e_1_3_2_1_56_1","unstructured":"Zhuo Zhang Yuanhang Yang Yong Dai Lizhen Qu and Zenglin Xu. 2022. When Federated Learning Meets Pre-trained Language Models' Parameter-Efficient Tuning Methods. In ACL."},{"key":"e_1_3_2_1_57_1","volume-title":"Fedpetuning: When federated learning meets the parameter-efficient tuning methods of pre-trained language models. In ACL. 9963\u20139977.","author":"Zhang Zhuo","year":"2023","unstructured":"Zhuo Zhang, Yuanhang Yang, Yong Dai, Qifan Wang, Yue Yu, Lizhen Qu, and Zenglin Xu. 2023. Fedpetuning: When federated learning meets the parameter-efficient tuning methods of pre-trained language models. In ACL. 9963\u20139977."},{"key":"e_1_3_2_1_58_1","volume-title":"Reduce communication costs and preserve privacy: Prompt tuning method in federated learning. arXiv preprint arXiv:2208.12268","author":"Zhao Haodong","year":"2022","unstructured":"Haodong Zhao, Wei Du, Fangqi Li, Peixuan Li, and Gongshen Liu. 2022. Reduce communication costs and preserve privacy: Prompt tuning method in federated learning. arXiv preprint arXiv:2208.12268 (2022)."},{"key":"e_1_3_2_1_59_1","volume-title":"Fed-prompt: Communication-efficient and privacy-preserving prompt tuning in federated learning","author":"Zhao Haodong","year":"2023","unstructured":"Haodong Zhao, Wei Du, Fangqi Li, Peixuan Li, and Gongshen Liu. 2023. Fed-prompt: Communication-efficient and privacy-preserving prompt tuning in federated learning. In ICASSP. IEEE, 1\u20135."},{"key":"e_1_3_2_1_60_1","volume-title":"Model Inversion Attacks: A Survey of Approaches and Counter-measures. arXiv preprint arXiv:2411.10023","author":"Zhou Zhanke","year":"2024","unstructured":"Zhanke Zhou, Jianing Zhu, Fengfei Yu, Xuan Li, Xiong Peng, Tongliang Liu, and Bo Han. 2024. Model Inversion Attacks: A Survey of Approaches and Counter-measures. arXiv preprint arXiv:2411.10023 (2024)."}],"event":{"name":"MobiSys '25: 23rd Annual International Conference on Mobile Systems, Applications and Services","location":"Hilton Anaheim Anaheim CA USA","acronym":"MobiSys '25","sponsor":["SIGMOBILE ACM Special Interest Group on Mobility of Systems, Users, Data and Computing","SIGOPS ACM Special Interest Group on Operating Systems"]},"container-title":["Proceedings of the 23rd Annual International Conference on Mobile Systems, Applications and Services"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3711875.3729128","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,2]],"date-time":"2025-10-02T19:30:53Z","timestamp":1759433453000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3711875.3729128"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,23]]},"references-count":60,"alternative-id":["10.1145\/3711875.3729128","10.1145\/3711875"],"URL":"https:\/\/doi.org\/10.1145\/3711875.3729128","relation":{},"subject":[],"published":{"date-parts":[[2025,6,23]]},"assertion":[{"value":"2025-09-25","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}