{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,21]],"date-time":"2025-08-21T16:49:43Z","timestamp":1755794983509,"version":"3.44.0"},"publisher-location":"New York, NY, USA","reference-count":40,"publisher":"ACM","funder":[{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/100006785","name":"Google","doi-asserted-by":"publisher","id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/100006785","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/100000006","name":"Office of Naval Research","doi-asserted-by":"publisher","award":["N00014-21-1-2841"],"award-info":[{"award-number":["N00014-21-1-2841"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/100000006","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,8,3]]},"DOI":"10.1145\/3711896.3736842","type":"proceedings-article","created":{"date-parts":[[2025,8,3]],"date-time":"2025-08-03T21:05:41Z","timestamp":1754255141000},"page":"2090-2101","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":0,"title":["SPARTA: An Optimization Framework for Differentially Private Sparse Fine-Tuning"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-7573-8775","authenticated-orcid":false,"given":"Mehdi","family":"Makni","sequence":"first","affiliation":[{"name":"Operations Research Center, Massachusetts Institute of Technology, Cambridge, Massachusetts, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3482-0421","authenticated-orcid":false,"given":"Kayhan","family":"Behdin","sequence":"additional","affiliation":[{"name":"LinkedIn, Cambridge, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9730-5403","authenticated-orcid":false,"given":"Gabriel","family":"Afriat","sequence":"additional","affiliation":[{"name":"Operations Research Center, Massachusetts Institute of Technology, Cambridge, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-6747-3953","authenticated-orcid":false,"given":"Zheng","family":"Xu","sequence":"additional","affiliation":[{"name":"Google Research, Mountain View, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0235-1624","authenticated-orcid":false,"given":"Sergei","family":"Vassilvitskii","sequence":"additional","affiliation":[{"name":"Google Research, New York, NY, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-6761-1468","authenticated-orcid":false,"given":"Natalia","family":"Ponomareva","sequence":"additional","affiliation":[{"name":"Google Research, New York, NY, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1384-9743","authenticated-orcid":false,"given":"Rahul","family":"Mazumder","sequence":"additional","affiliation":[{"name":"Sloan School of Management, Massachusetts Institute of Technology, Cambridge, MA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4501-0678","authenticated-orcid":false,"given":"Hussein","family":"Hazimeh","sequence":"additional","affiliation":[{"name":"Google Research, Mountain View, CA, USA"}]}],"member":"320","published-online":{"date-parts":[[2025,8,3]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978318"},{"key":"e_1_3_2_1_2_1","unstructured":"Kamil Adamczewski Yingchen He and Mijung Park. 2023. Pre-Pruning and Gradient-Dropping Improve Differentially Private Image Classification. arXiv preprint arXiv:2306.11754(2023)."},{"key":"e_1_3_2_1_3_1","first-page":"464","article-title":"Private empirical risk minimization: Efficient algorithms and tight error bounds. In 2014 IEEE 55th annual symposium on foundations of computer science","author":"Bassily Raef","year":"2014","unstructured":"Raef Bassily, Adam Smith, and Abhradeep Thakurta. 2014. Private empirical risk minimization: Efficient algorithms and tight error bounds. In 2014 IEEE 55th annual symposium on foundations of computer science. IEEE, 464-473.","journal-title":"IEEE"},{"key":"e_1_3_2_1_4_1","volume-title":"International Conference on Machine Learning. PMLR","author":"Benbaki Riade","year":"2023","unstructured":"Riade Benbaki, Wenyu Chen, Xiang Meng, Hussein Hazimeh, Natalia Ponomareva, Zhe Zhao, and Rahul Mazumder. 2023. Fast as chita: Neural network pruning with combinatorial optimization. In International Conference on Machine Learning. PMLR, 2031-2049."},{"key":"e_1_3_2_1_5_1","unstructured":"Zhiqi Bu Yu-Xiang Wang Sheng Zha and George Karypis. 2023. Differentially private Bias-Term Only Fine-tuning of Foundation Models. https:\/\/openreview.net\/forum?id=zoTUH3Fjup"},{"key":"e_1_3_2_1_6_1","unstructured":"Yannis Cattan Christopher A Choquette-Choo Nicolas Papernot and Abhradeep Thakurta. 2022. Fine-tuning with differential privacy necessitates an additional hyperparameter search. arXiv preprint arXiv:2210.02156(2022)."},{"key":"e_1_3_2_1_7_1","volume-title":"Abhradeep Guha Thakurta, and Zheng Xu","author":"Choquette-Choo Christopher A","year":"2024","unstructured":"Christopher A Choquette-Choo, Arun Ganesh, Ryan McKenna, H Brendan McMahan, John Rush, Abhradeep Guha Thakurta, and Zheng Xu. 2024. (Amplified) Banded Matrix Factorization: A unified approach to private training. Advances in Neural Information Processing Systems, Vol. 36 (2024)."},{"key":"e_1_3_2_1_8_1","unstructured":"Soham De Leonard Berrada Jamie Hayes Samuel L Smith and Borja Balle. 2022. Unlocking high-accuracy differentially private image classification through scale. arXiv preprint arXiv:2204.13650(2022)."},{"key":"e_1_3_2_1_9_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"volume-title":"International colloquium on automata, languages, and programming","author":"Dwork Cynthia","key":"e_1_3_2_1_10_1","unstructured":"Cynthia Dwork. 2006. Differential privacy. In International colloquium on automata, languages, and programming. Springer, 1-12."},{"key":"e_1_3_2_1_11_1","first-page":"211","volume-title":"Foundations and Trends\u00ae in Theoretical Computer Science","volume":"9","author":"Dwork Cynthia","year":"2014","unstructured":"Cynthia Dwork, Aaron Roth, et al., 2014. The algorithmic foundations of differential privacy. Foundations and Trends\u00ae in Theoretical Computer Science, Vol. 9, 3-4 (2014), 211-407."},{"key":"e_1_3_2_1_12_1","first-page":"4475","article-title":"Optimal brain compression: A framework for accurate post-training quantization and pruning","volume":"35","author":"Frantar Elias","year":"2022","unstructured":"Elias Frantar and Dan Alistarh. 2022. Optimal brain compression: A framework for accurate post-training quantization and pruning. Advances in Neural Information Processing Systems, Vol. 35 (2022), 4475-4488.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_13_1","volume-title":"International Conference on Machine Learning. PMLR, 10611-10627","author":"Ganesh Arun","year":"2023","unstructured":"Arun Ganesh, Mahdi Haghifam, Milad Nasr, Sewoong Oh, Thomas Steinke, Om Thakkar, Abhradeep Guha Thakurta, and Lun Wang. 2023. Why is public pretraining necessary for private model training?. In International Conference on Machine Learning. PMLR, 10611-10627."},{"key":"e_1_3_2_1_14_1","first-page":"11631","article-title":"Numerical composition of differential privacy","volume":"34","author":"Gopi Sivakanth","year":"2021","unstructured":"Sivakanth Gopi, Yin Tat Lee, and Lukas Wutschitz. 2021. Numerical composition of differential privacy. Advances in Neural Information Processing Systems, Vol. 34 (2021), 11631-11642.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICNN.1993.298572"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"e_1_3_2_1_17_1","unstructured":"Ruidan He Linlin Liu Hai Ye Qingyu Tan Bosheng Ding Liying Cheng Jia-Wei Low Lidong Bing and Luo Si. 2021. On the effectiveness of adapter-based tuning for pretrained language model adaptation. arXiv preprint arXiv:2106.03164(2021)."},{"key":"e_1_3_2_1_18_1","volume-title":"LoRA: Low-Rank Adaptation of Large Language Models. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=nZeVKeeFYf9","author":"Hu Edward J","year":"2022","unstructured":"Edward J Hu, yelong shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. 2022. LoRA: Low-Rank Adaptation of Large Language Models. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=nZeVKeeFYf9"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19827-4_41"},{"key":"e_1_3_2_1_20_1","unstructured":"Alex Krizhevsky Geoffrey Hinton et al. 2009. Learning multiple layers of features from tiny images. (2009)."},{"key":"e_1_3_2_1_21_1","unstructured":"Alexey Kurakin Shuang Song Steve Chien Roxana Geambasu Andreas Terzis and Abhradeep Thakurta. 2022. Toward training at imagenet scale with differential privacy. arXiv preprint arXiv:2201.12328(2022)."},{"key":"e_1_3_2_1_22_1","volume-title":"Optimal brain damage. Advances in neural information processing systems","author":"LeCun Yann","year":"1989","unstructured":"Yann LeCun, John Denker, and Sara Solla. 1989. Optimal brain damage. Advances in neural information processing systems, Vol. 2 (1989)."},{"key":"e_1_3_2_1_23_1","first-page":"28616","article-title":"When does differentially private learning not suffer in high dimensions","volume":"35","author":"Li Xuechen","year":"2022","unstructured":"Xuechen Li, Daogao Liu, Tatsunori B Hashimoto, Huseyin A Inan, Janardhan Kulkarni, Yin-Tat Lee, and Abhradeep Guha Thakurta. 2022a. When does differentially private learning not suffer in high dimensions? Advances in Neural Information Processing Systems, Vol. 35 (2022), 28616-28630.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_24_1","volume-title":"Large Language Models Can Be Strong Differentially Private Learners. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=bVuP3ltATMz","author":"Li Xuechen","year":"2022","unstructured":"Xuechen Li, Florian Tramer, Percy Liang, and Tatsunori Hashimoto. 2022b. Large Language Models Can Be Strong Differentially Private Learners. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=bVuP3ltATMz"},{"key":"e_1_3_2_1_25_1","unstructured":"Xiao-Yang Liu Rongyi Zhu Daochen Zha Jiechao Gao Shan Zhong Matt White and Meikang Qiu. 2023. Differentially private low-rank adaptation of large language model using federated learning. ACM Transactions on Management Information Systems(2023)."},{"key":"e_1_3_2_1_26_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00502"},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1145\/1217299.1217302"},{"key":"e_1_3_2_1_28_1","unstructured":"Harsh Mehta Abhradeep Thakurta Alexey Kurakin and Ashok Cutkosky. 2022. Large scale transfer learning for differentially private image classification. arXiv preprint arXiv:2205.02973(2022)."},{"key":"e_1_3_2_1_29_1","unstructured":"Ilya Mironov Kunal Talwar and Li Zhang. 2019. Renyi differential privacy of the sampled gaussian mechanism. arXiv preprint arXiv:1908.10530(2019)."},{"key":"e_1_3_2_1_30_1","unstructured":"Nicolas Papernot Steve Chien Shuang Song Abhradeep Thakurta and Ulfar Erlingsson. 2019. Making the shoe fit: Architectures initializations and tuning for learning with privacy. (2019)."},{"key":"e_1_3_2_1_31_1","doi-asserted-by":"publisher","DOI":"10.1613\/JAIR.1.14649"},{"key":"e_1_3_2_1_32_1","first-page":"1","article-title":"Interactive supercomputing on 40,000 cores for machine learning and data analysis. In 2018 IEEE High Performance extreme Computing Conference (HPEC)","author":"Reuther Albert","year":"2018","unstructured":"Albert Reuther, Jeremy Kepner, Chansup Byun, Siddharth Samsi, William Arcand, David Bestor, Bill Bergeron, Vijay Gadepally, Michael Houle, Matthew Hubbell, Michael Jones, Anna Klein, Lauren Milechin, Julia Mullen, Andrew Prout, Antonio Rosa, Charles Yee, and Peter Michaleas. 2018. Interactive supercomputing on 40,000 cores for machine learning and data analysis. In 2018 IEEE High Performance extreme Computing Conference (HPEC). IEEE, 1-6.","journal-title":"IEEE"},{"key":"e_1_3_2_1_33_1","unstructured":"Yinchen Shen Zhiguo Wang Ruoyu Sun and Xiaojing Shen. 2021. Towards understanding the impact of model size on differential private classification. arXiv preprint arXiv:2111.13895(2021)."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.1142\/S0218488502001648"},{"key":"e_1_3_2_1_35_1","volume-title":"International conference on machine learning. PMLR, 10347-10357","author":"Touvron Hugo","year":"2021","unstructured":"Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Herv\u00e9 J\u00e9gou. 2021. Training data-efficient image transformers & distillation through attention. In International conference on machine learning. PMLR, 10347-10357."},{"key":"e_1_3_2_1_36_1","volume-title":"International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=YTWGvpFOQD-","author":"Tramer Florian","year":"2021","unstructured":"Florian Tramer and Dan Boneh. 2021. Differentially Private Learning Needs Better Features (or Much More Data). In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=YTWGvpFOQD-"},{"key":"e_1_3_2_1_37_1","volume-title":"Opacus: User-Friendly Differential Privacy Library in PyTorch. arXiv preprint arXiv:2109.12298(2021).","author":"Yousefpour Ashkan","year":"2021","unstructured":"Ashkan Yousefpour, Igor Shilov, Alexandre Sablayrolles, Davide Testuggine, Karthik Prasad, Mani Malek, John Nguyen, Sayan Ghosh, Akash Bharadwaj, Jessica Zhao, Graham Cormode, and Ilya Mironov. 2021. Opacus: User-Friendly Differential Privacy Library in PyTorch. arXiv preprint arXiv:2109.12298(2021)."},{"key":"e_1_3_2_1_38_1","volume-title":"Differentially Private Fine-tuning of Language Models. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=Q42f0dfjECO","author":"Yu Da","year":"2022","unstructured":"Da Yu, Saurabh Naik, Arturs Backurs, Sivakanth Gopi, Huseyin A Inan, Gautam Kamath, Janardhan Kulkarni, Yin Tat Lee, Andre Manoel, Lukas Wutschitz, Sergey Yekhanin, and Huishuai Zhang. 2022. Differentially Private Fine-tuning of Language Models. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=Q42f0dfjECO"},{"key":"e_1_3_2_1_39_1","volume-title":"International Conference on Learning Representations.","author":"Yu Da","year":"2020","unstructured":"Da Yu, Huishuai Zhang, Wei Chen, and Tie-Yan Liu. 2020. Do not Let Privacy Overbill Utility: Gradient Embedding Perturbation for Private Learning. In International Conference on Learning Representations."},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"crossref","unstructured":"Sergey Zagoruyko and Nikos Komodakis. 2016. Wide residual networks. arXiv preprint arXiv:1605.07146(2016).","DOI":"10.5244\/C.30.87"}],"event":{"name":"KDD '25: The 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining","sponsor":["SIGMOD ACM Special Interest Group on Management of Data","SIGKDD ACM Special Interest Group on Knowledge Discovery in Data"],"location":"Toronto ON Canada","acronym":"KDD '25"},"container-title":["Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.2"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3711896.3736842","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,8,16]],"date-time":"2025-08-16T14:35:53Z","timestamp":1755354953000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3711896.3736842"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,3]]},"references-count":40,"alternative-id":["10.1145\/3711896.3736842","10.1145\/3711896"],"URL":"https:\/\/doi.org\/10.1145\/3711896.3736842","relation":{},"subject":[],"published":{"date-parts":[[2025,8,3]]},"assertion":[{"value":"2025-08-03","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}