{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T22:47:52Z","timestamp":1776206872668,"version":"3.50.1"},"publisher-location":"Singapore","reference-count":51,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819681822","type":"print"},{"value":"9789819681839","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-96-8183-9_25","type":"book-chapter","created":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T17:38:09Z","timestamp":1750354689000},"page":"326-344","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":15,"title":["Privacy in\u00a0Fine-Tuning Large Language Models: Attacks, Defenses, and\u00a0Future Directions"],"prefix":"10.1007","author":[{"given":"Hao","family":"Du","sequence":"first","affiliation":[]},{"given":"Shang","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Lele","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Yang","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Atsuyoshi","family":"Nakamura","sequence":"additional","affiliation":[]},{"given":"Lei","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,6,20]]},"reference":[{"key":"25_CR1","doi-asserted-by":"crossref","unstructured":"Behnia, R., Ebrahimi, M.R., Pacheco, J., Padmanabhan, B.: Ew-tune: A framework for privately fine-tuning large language models with differential privacy. In: 2022 IEEE International Conference on Data Mining Workshops. pp. 560\u2013566 (2022)","DOI":"10.1109\/ICDMW58026.2022.00078"},{"key":"25_CR2","doi-asserted-by":"crossref","unstructured":"Ben\u00a0Zaken, E., Goldberg, Y., Ravfogel, S.: BitFit: Simple parameter-efficient fine-tuning for transformer-based masked language-models. In: Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics. pp.\u00a01\u20139 (2022)","DOI":"10.18653\/v1\/2022.acl-short.1"},{"key":"25_CR3","unstructured":"Bu, Z., Wang, Y.X., Zha, S., Karypis, G.: Differentially private optimization on large model at small cost. In: Proceedings of the 40th International Conference on Machine Learning (2023)"},{"key":"25_CR4","doi-asserted-by":"crossref","unstructured":"Chen, J., Yang, D.: Unlearn what you want to forget: Efficient unlearning for llms. In: Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. pp. 12041\u201312052 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.738"},{"key":"25_CR5","unstructured":"Chen, Y., Li, T., Liu, H., Yu, Y.: Hide and seek (has): A lightweight framework for prompt privacy protection. arXiv preprint arXiv:2309.03057 (2023)"},{"key":"25_CR6","unstructured":"Devlin, J.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"25_CR7","unstructured":"Ding, Y., Wu, X., Meng, Y., Luo, Y., Wang, H., Pan, W.: Delving into differentially private transformer. In: Proceedings of the 41st International Conference on Machine Learning. vol.\u00a0235, p.\u00a0TBD (2024)"},{"key":"25_CR8","doi-asserted-by":"crossref","unstructured":"Dwork, C.: Differential privacy. In: International colloquium on automata, languages, and programming. pp. 1\u201312 (2006)","DOI":"10.1007\/11787006_1"},{"key":"25_CR9","unstructured":"Eldan, R., Russinovich, M.: Who\u2019s harry potter? approximate unlearning in llms. arXiv preprint arXiv:2310.02238 (2023)"},{"key":"25_CR10","doi-asserted-by":"crossref","unstructured":"Feyisetan, O., Diethe, T., Drake, T.: Leveraging Hierarchical Representations for Preserving Privacy and Utility in Text . In: 2019 IEEE International Conference on Data Mining. pp. 210\u2013219 (2019)","DOI":"10.1109\/ICDM.2019.00031"},{"key":"25_CR11","unstructured":"Fu, W., Wang, H., Gao, C., Liu, G., Li, Y., Jiang, T.: Practical membership inference attacks against fine-tuned large language models via self-prompt calibration. arXiv preprint arXiv:2311.06062 (2023)"},{"key":"25_CR12","first-page":"8130","volume":"35","author":"S Gupta","year":"2022","unstructured":"Gupta, S., Huang, Y., Zhong, Z., Gao, T., Li, K., Chen, D.: Recovering private text in federated learning of language models. Adv. Neural. Inf. Process. Syst. 35, 8130\u20138143 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"25_CR13","unstructured":"Han, Z., Gao, C., Liu, J., Zhang, J., Zhang, S.Q.: Parameter-efficient fine-tuning for large models: A comprehensive survey. arXiv preprint arXiv:2403.14608 (2024)"},{"key":"25_CR14","unstructured":"Houlsby, N., Giurgiu, A., Jastrzebski, S., Morrone, B., De\u00a0Laroussilhe, Q., Gesmundo, A., Attariyan, M., Gelly, S.: Parameter-efficient transfer learning for nlp. In: International conference on machine learning. pp. 2790\u20132799 (2019)"},{"key":"25_CR15","unstructured":"Hu, E.J., yelong shen, Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., Chen, W.: LoRA: Low-rank adaptation of large language models. In: International Conference on Learning Representations (2022)"},{"key":"25_CR16","unstructured":"Huang, T., Hu, S., Ilhan, F., Tekin, S.F., Liu, L.: Harmful fine-tuning attacks and defenses for large language models: A survey. arXiv preprint arXiv:2409.18169 (2024)"},{"key":"25_CR17","doi-asserted-by":"crossref","unstructured":"Igamberdiev, T., Habernal, I.: Dp-bart for privatized text rewriting under local differential privacy. In: Findings of the Association for Computational Linguistics: ACL 2023. pp. 13914\u201313934 (2023)","DOI":"10.18653\/v1\/2023.findings-acl.874"},{"key":"25_CR18","unstructured":"Jagannatha, A., Rawat, B.P.S., Yu, H.: Membership inference attack susceptibility of clinical language models. arXiv preprint arXiv:2104.08305 (2021)"},{"key":"25_CR19","doi-asserted-by":"crossref","unstructured":"Jang, J., Yoon, D., Yang, S., Cha, S., Lee, M., Logeswaran, L., Seo, M.: Knowledge unlearning for mitigating privacy risks in language models. In: Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics. pp. 14389\u201314408 (2023)","DOI":"10.18653\/v1\/2023.acl-long.805"},{"key":"25_CR20","doi-asserted-by":"crossref","unstructured":"Lester, B., Al-Rfou, R., Constant, N.: The power of scale for parameter-efficient prompt tuning. In: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing. pp. 3045\u20133059 (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"25_CR21","unstructured":"Li, H., Chen, Y., Luo, J., Wang, J., Peng, H., Kang, Y., Zhang, X., Hu, Q., Chan, C., Xu, Z., et\u00a0al.: Privacy in large language models: Attacks, defenses and future directions. arXiv preprint arXiv:2310.10383 (2023)"},{"key":"25_CR22","doi-asserted-by":"crossref","unstructured":"Li, X.L., Liang, P.: Prefix-tuning: Optimizing continuous prompts for generation. In: Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing. pp. 4582\u20134597 (2021)","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"25_CR23","unstructured":"Li, X., Tramer, F., Liang, P., Hashimoto, T.: Large language models can be strong differentially private learners. In: International Conference on Learning Representations (2022)"},{"key":"25_CR24","unstructured":"Li, Y., Tan, Z., Liu, Y.: Privacy-preserving prompt tuning for large language model services. arXiv preprint arXiv:2305.06212 (2023)"},{"key":"25_CR25","doi-asserted-by":"crossref","unstructured":"Liu, R., Wang, T., Cao, Y., Xiong, L.: Precurious: How innocent pre-trained language models turn into privacy traps. arXiv preprint arXiv:2403.09562 (2024)","DOI":"10.1145\/3658644.3690279"},{"key":"25_CR26","doi-asserted-by":"crossref","unstructured":"Liu, X.Y., Zhu, R., Zha, D., Gao, J., Zhong, S., White, M., Qiu, M.: Differentially private low-rank adaptation of large language model using federated learning (2024)","DOI":"10.1145\/3682068"},{"key":"25_CR27","doi-asserted-by":"crossref","unstructured":"Lukas, N., Salem, A., Sim, R., Tople, S., Wutschitz, L., Zanella-B\u00e9guelin, S.: Analyzing leakage of personally identifiable information in language models. In: 2023 IEEE Symposium on Security and Privacy. pp. 346\u2013363 (2023)","DOI":"10.1109\/SP46215.2023.10179300"},{"key":"25_CR28","doi-asserted-by":"crossref","unstructured":"Mamede, N., Baptista, J., Dias, F.: Automated anonymization of text documents. In: 2016 IEEE Congress on Evolutionary Computation. pp. 1287\u20131294 (2016)","DOI":"10.1109\/CEC.2016.7743936"},{"key":"25_CR29","first-page":"17359","volume":"35","author":"K Meng","year":"2022","unstructured":"Meng, K., Bau, D., Andonian, A., Belinkov, Y.: Locating and editing factual associations in gpt. Adv. Neural. Inf. Process. Syst. 35, 17359\u201317372 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"25_CR30","unstructured":"Miranda, M., Ruzzetti, E.S., Santilli, A., Zanzotto, F.M., Brati\u00e8res, S., Rodol\u00e0, E.: Preserving privacy in large language models: A survey on current threats and solutions. arXiv preprint arXiv:2408.05212 (2024)"},{"key":"25_CR31","doi-asserted-by":"crossref","unstructured":"Mireshghallah, F., Uniyal, A., Wang, T., Evans, D.K., Berg-Kirkpatrick, T.: An empirical analysis of memorization in fine-tuned autoregressive language models. In: EMNLP. pp. 1816\u20131826 (2022)","DOI":"10.18653\/v1\/2022.emnlp-main.119"},{"key":"25_CR32","doi-asserted-by":"crossref","unstructured":"Ozdayi, M.S., Peris, C., FitzGerald, J., Dupuy, C., Majmudar, J., Khan, H., Parikh, R., Gupta, R.: Controlling the extraction of memorized data from large language models via prompt-tuning. arXiv preprint arXiv:2305.11759 (2023)","DOI":"10.18653\/v1\/2023.acl-short.129"},{"key":"25_CR33","doi-asserted-by":"crossref","unstructured":"Shi, W., Shea, R., Chen, S., Zhang, C., Jia, R., Yu, Z.: Just fine-tune twice: Selective differential privacy for large language models. In: Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing. pp. 6327\u20136340 (2022)","DOI":"10.18653\/v1\/2022.emnlp-main.425"},{"key":"25_CR34","unstructured":"Sun, J., Xu, Z., Yin, H., Yang, D., Xu, D., Chen, Y., Roth, H.R.: FedBPT: Efficient federated black-box prompt tuning for large language models (2024)"},{"key":"25_CR35","unstructured":"Sweeney, L.: Guaranteeing anonymity when sharing medical data, the datafly system. In: Proceedings of the AMIA Annual Fall Symposium. p.\u00a051 (1997)"},{"key":"25_CR36","doi-asserted-by":"crossref","unstructured":"Tong, M., Chen, K., Zhang, J., Qi, Y., Zhang, W., Yu, N., Zhang, T., Zhang, Z.: Inferdpt: Privacy-preserving inference for black-box large language model (2024)","DOI":"10.1109\/TDSC.2025.3550389"},{"key":"25_CR37","doi-asserted-by":"crossref","unstructured":"Vats, A., Liu, Z., Su, P., Paul, D., Ma, Y., Pang, Y., Ahmed, Z., Kalinli, O.: Recovering from privacy-preserving masking with large language models. In: ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing. pp. 10771\u201310775 (2024)","DOI":"10.1109\/ICASSP48485.2024.10448234"},{"key":"25_CR38","unstructured":"Wan, A., Wallace, E., Shen, S., Klein, D.: Poisoning language models during instruction tuning. In: International Conference on Machine Learning. pp. 35413\u201335425 (2023)"},{"key":"25_CR39","unstructured":"Wen, Y., Marchyok, L., Hong, S., Geiping, J., Goldstein, T., Carlini, N.: Privacy backdoors: Enhancing membership inference through poisoning pre-trained models. arXiv preprint arXiv:2404.01231 (2024)"},{"key":"25_CR40","doi-asserted-by":"crossref","unstructured":"Wu, X., Gong, L., Xiong, D.: Adaptive differential privacy for language model training. In: Proceedings of the First Workshop on Federated Learning for Natural Language Processing. pp. 21\u201326 (2022)","DOI":"10.18653\/v1\/2022.fl4nlp-1.3"},{"key":"25_CR41","unstructured":"Xiao, G., Lin, J., Han, S.: Offsite-tuning: Transfer learning without full model. arXiv (2023)"},{"key":"25_CR42","doi-asserted-by":"crossref","unstructured":"Xu, J., Ma, M.D., Wang, F., Xiao, C., Chen, M.: Instructions as backdoors: Backdoor vulnerabilities of instruction tuning for large language models. arXiv preprint arXiv:2305.14710 (2023)","DOI":"10.18653\/v1\/2024.naacl-long.171"},{"key":"25_CR43","unstructured":"Xu, L., Xie, H., Qin, S.Z.J., Tao, X., Wang, F.L.: Parameter-efficient fine-tuning methods for pretrained language models: A critical review and assessment. arXiv preprint arXiv:2312.12148 (2023)"},{"key":"25_CR44","unstructured":"Xu, M., Cai, D., Wu, Y., Li, X., Wang, S.: FwdLLM: Efficient federated finetuning of large language models with perturbed inferences. In: 2024 USENIX Annual Technical Conference. pp. 579\u2013596 (2024)"},{"key":"25_CR45","doi-asserted-by":"crossref","unstructured":"Yan, B., Li, K., Xu, M., Dong, Y., Zhang, Y., Ren, Z., Cheng, X.: On protecting the data privacy of large language models (llms): A survey. arXiv preprint arXiv:2403.05156 (2024)","DOI":"10.1016\/j.hcc.2025.100300"},{"key":"25_CR46","doi-asserted-by":"crossref","unstructured":"Yan, J., Yadav, V., Li, S., Chen, L., Tang, Z., Wang, H., Srinivasan, V., Ren, X., Jin, H.: Backdooring instruction-tuned large language models with virtual prompt injection. In: Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies. pp. 6065\u20136086 (2024)","DOI":"10.18653\/v1\/2024.naacl-long.337"},{"key":"25_CR47","unstructured":"Yu, D., Naik, S., Backurs, A., Gopi, S., Inan, H.A., Kamath, G., Kulkarni, J., Lee, Y.T., Manoel, A., Wutschitz, L., Yekhanin, S., Zhang, H.: Differentially private fine-tuning of language models. In: International Conference on Learning Representations (2022)"},{"key":"25_CR48","doi-asserted-by":"crossref","unstructured":"Yue, X., Du, M., Wang, T., Li, Y., Sun, H., Chow, S.S.M.: Differential privacy for text analytics via natural text sanitization. In: Findings, ACL-IJCNLP 2021 (2021)","DOI":"10.18653\/v1\/2021.findings-acl.337"},{"key":"25_CR49","doi-asserted-by":"crossref","unstructured":"Zanella-B\u00e9guelin, S., Wutschitz, L., Tople, S., R\u00fchle, V., Paverd, A., Ohrimenko, O., K\u00f6pf, B., Brockschmidt, M.: Analyzing information leakage of updates to natural language models. In: Proceedings of the 2020 ACM SIGSAC conference on computer and communications security. pp. 363\u2013375 (2020)","DOI":"10.1145\/3372297.3417880"},{"key":"25_CR50","doi-asserted-by":"crossref","unstructured":"Zhang, J., Vahidian, S., Kuo, M., Li, C., Zhang, R., Yu, T., Wang, G., Chen, Y.: Towards building the federatedgpt: Federated instruction tuning. In: ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing. pp. 6915\u20136919 (2024)","DOI":"10.1109\/ICASSP48485.2024.10447454"},{"key":"25_CR51","doi-asserted-by":"crossref","unstructured":"Zheng, J.Y., Zhang, H.N., Wang, L.X., Qiu, W.J., Zheng, H.W., Zheng, Z.M.: Safely learning with private data: A federated learning framework for large language model. In: Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing. pp. 5293\u20135306 (2024)","DOI":"10.18653\/v1\/2024.emnlp-main.303"}],"container-title":["Lecture Notes in Computer Science","Advances in Knowledge Discovery and Data Mining"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-96-8183-9_25","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T17:38:27Z","timestamp":1750354707000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-96-8183-9_25"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9789819681822","9789819681839"],"references-count":51,"URL":"https:\/\/doi.org\/10.1007\/978-981-96-8183-9_25","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"20 June 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PAKDD","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Pacific-Asia Conference on Knowledge Discovery and Data Mining","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Sydney, NSW","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Australia","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"10 June 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"13 June 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"pakdd2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/pakdd2025.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}