{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,14]],"date-time":"2026-02-14T10:22:32Z","timestamp":1771064552201,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":69,"publisher":"ACM","license":[{"start":{"date-parts":[[2021,11,12]],"date-time":"2021-11-12T00:00:00Z","timestamp":1636675200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/501100009318","name":"Helmholtz Association","doi-asserted-by":"publisher","award":["ZT-I-OO1 4"],"award-info":[{"award-number":["ZT-I-OO1 4"]}],"id":[{"id":"10.13039\/501100009318","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2021,11,12]]},"DOI":"10.1145\/3460120.3484571","type":"proceedings-article","created":{"date-parts":[[2021,11,13]],"date-time":"2021-11-13T12:05:27Z","timestamp":1636805127000},"page":"845-863","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":28,"title":["Quantifying and Mitigating Privacy Risks of Contrastive Learning"],"prefix":"10.1145","author":[{"given":"Xinlei","family":"He","sequence":"first","affiliation":[{"name":"CISPA Helmholtz Center for Information Security, Saarbr\u00fccken, Germany"}]},{"given":"Yang","family":"Zhang","sequence":"additional","affiliation":[{"name":"CISPA Helmholtz Center for Information Security, Saarbr\u00fccken, Germany"}]}],"member":"320","published-online":{"date-parts":[[2021,11,13]]},"reference":[{"key":"e_1_3_2_1_1_1","unstructured":"https:\/\/www.cs.toronto.edu\/~kriz\/cifar.html."},{"key":"e_1_3_2_1_2_1","unstructured":"https:\/\/github.com\/Trusted-AI\/adversarial-robustness-toolbox."},{"key":"e_1_3_2_1_3_1","first-page":"363","volume-title":"ACM SIGSAC Conference on Computer and Communications Security (CCS)","author":"Santiago~Zanella B\u00e9","year":"2020","unstructured":"Santiago~Zanella B\u00e9 guelin, Lukas Wutschitz, Shruti Tople, Victor R\u00fc hle, Andrew Paverd, Olga Ohrimenko, Boris K\u00f6 pf, and Marc Brockschmidt. Analyzing Information Leakage of Updates to Natural Language Models. In ACM SIGSAC Conference on Computer and Communications Security (CCS), pages 363--375. ACM, 2020."},{"key":"e_1_3_2_1_4_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-40994-3_25"},{"key":"e_1_3_2_1_5_1","unstructured":"Nicholas Carlini Florian Tram\u00e8 r Eric Wallace Matthew Jagielski Ariel Herbert-Voss Katherine Lee Adam Roberts Tom~B. Brown Dawn Song \u00da lfar Erlingsson Alina Oprea and Colin Raffel. Extracting Training Data from Large Language Models. CoRR abs\/2012.07805 2020."},{"key":"e_1_3_2_1_6_1","first-page":"39","volume-title":"Carlini and David Wagner. Towards Evaluating the Robustness of Neural Networks. In IEEE Symposium on Security and Privacy (S&P)","author":"Nicholas","year":"2017","unstructured":"Nicholas Carlini and David Wagner. Towards Evaluating the Robustness of Neural Networks. In IEEE Symposium on Security and Privacy (S&P), pages 39--57. IEEE, 2017."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/3372297.3417238"},{"key":"e_1_3_2_1_8_1","volume-title":"When Machine Unlearning Jeopardizes Privacy. CoRR abs\/2005.02205","author":"Chen Min","year":"2020","unstructured":"Min Chen, Zhikun Zhang, Tianhao Wang, Michael Backes, Mathias Humbert, and Yang Zhang. When Machine Unlearning Jeopardizes Privacy. CoRR abs\/2005.02205, 2020."},{"key":"e_1_3_2_1_9_1","first-page":"1597","volume-title":"Geoffrey~E. Hinton. A Simple Framework for Contrastive Learning of Visual Representations. In International Conference on Machine Learning (ICML)","author":"Chen Ting","year":"2020","unstructured":"Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey~E. Hinton. A Simple Framework for Contrastive Learning of Visual Representations. In International Conference on Machine Learning (ICML), pages 1597--1607. PMLR, 2020."},{"key":"e_1_3_2_1_10_1","volume-title":"Label-Only Membership Inference Attacks. CoRR abs\/2007.14321","author":"Choquette Choo Christopher A.","year":"2020","unstructured":"Christopher A. Choquette Choo, Florian Tram\u00e8r, Nicholas Carlini, and Nicolas Papernot. Label-Only Membership Inference Attacks. CoRR abs\/2007.14321, 2020."},{"key":"e_1_3_2_1_11_1","first-page":"215","volume-title":"Honglak Lee. An Analysis of Single-Layer Networks in Unsupervised Feature Learning. In International Conference on Artificial Intelligence and Statistics (AISTATS)","author":"Coates Adam","year":"2011","unstructured":"Adam Coates, Andrew~Y. Ng, and Honglak Lee. An Analysis of Single-Layer Networks in Unsupervised Feature Learning. In International Conference on Artificial Intelligence and Statistics (AISTATS), pages 215--223. JMLR, 2011."},{"key":"e_1_3_2_1_12_1","first-page":"1","volume-title":"Shay~B. Cohen. Privacy-preserving Neural Representations of Text. In Conference on Empirical Methods in Natural Language Processing (EMNLP)","author":"Coavoux Maximin","year":"2018","unstructured":"Maximin Coavoux, Shashi Narayan, and Shay~B. Cohen. Privacy-preserving Neural Representations of Text. In Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1--10. ACL, 2018."},{"key":"e_1_3_2_1_13_1","volume-title":"International Conference on Learning Representations (ICLR)","author":"Edwards Harrison","year":"2016","unstructured":"Harrison Edwards and Amos~J. Storkey. Censoring Representations with an Adversary. In International Conference on Learning Representations (ICLR), 2016."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1002"},{"key":"e_1_3_2_1_15_1","first-page":"1180","volume-title":"International Conference on Machine Learning (ICML)","author":"Ganin Yaroslav","year":"2015","unstructured":"Yaroslav Ganin and Victor~S. Lempitsky. Unsupervised Domain Adaptation by Backpropagation. In International Conference on Machine Learning (ICML), pages 1180--1189. JMLR, 2015."},{"key":"e_1_3_2_1_16_1","first-page":"879","volume-title":"Gary~D. Bader. DeCLUTR: Deep Contrastive Learning for Unsupervised Textual Representations. In Annual Meeting of the Association for Computational Linguistics (ACL)","author":"Giorgi M.","year":"2021","unstructured":"John~M. Giorgi, Osvald Nitski, Bo~Wang, and Gary~D. Bader. DeCLUTR: Deep Contrastive Learning for Unsupervised Textual Representations. In Annual Meeting of the Association for Computational Linguistics (ACL), pages 879--895. ACL, 2021."},{"key":"e_1_3_2_1_17_1","first-page":"2672","volume-title":"Yoshua Bengio. Generative Adversarial Nets. In Annual Conference on Neural Information Processing Systems (NIPS)","author":"Goodfellow Ian","year":"2014","unstructured":"Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative Adversarial Nets. In Annual Conference on Neural Information Processing Systems (NIPS), pages 2672--2680. NIPS, 2014."},{"key":"e_1_3_2_1_18_1","first-page":"297","volume-title":"Noise-Contrastive Estimation: A New Estimation Principle for Unnormalized Statistical Models. In International Conference on Artificial Intelligence and Statistics (AISTATS)","author":"Gutmann Michael","year":"2010","unstructured":"Michael Gutmann and Aapo Hyv\"a rinen. Noise-Contrastive Estimation: A New Estimation Principle for Unnormalized Statistical Models. In International Conference on Artificial Intelligence and Statistics (AISTATS), pages 297--304. JMLR, 2010."},{"key":"e_1_3_2_1_19_1","volume-title":"Emiliano~De Cristofaro. LOGAN: Evaluating Privacy Leakage of Generative Models Using Generative Adversarial Networks. Symposium on Privacy Enhancing Technologies Symposium","author":"Hayes Jamie","year":"2019","unstructured":"Jamie Hayes, Luca Melis, George Danezis, and Emiliano~De Cristofaro. LOGAN: Evaluating Privacy Leakage of Generative Models Using Generative Adversarial Networks. Symposium on Privacy Enhancing Technologies Symposium, 2019."},{"key":"e_1_3_2_1_20_1","first-page":"9726","volume-title":"Ross~B. Girshick. Momentum Contrast for Unsupervised Visual Representation Learning. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","author":"He Kaiming","year":"2020","unstructured":"Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross~B. Girshick. Momentum Contrast for Unsupervised Visual Representation Learning. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 9726--9735. IEEE, 2020."},{"key":"e_1_3_2_1_21_1","first-page":"770","volume-title":"Jian Sun. Deep Residual Learning for Image Recognition. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","author":"He Kaiming","year":"2016","unstructured":"Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep Residual Learning for Image Recognition. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770--778. IEEE, 2016."},{"key":"e_1_3_2_1_22_1","volume-title":"USENIX Security Symposium (USENIX Security). USENIX","author":"He Xinlei","year":"2021","unstructured":"Xinlei He, Jinyuan Jia, Michael Backes, Neil~Zhenqiang Gong, and Yang Zhang. Stealing Links from Graph Neural Networks. In USENIX Security Symposium (USENIX Security). USENIX, 2021."},{"key":"e_1_3_2_1_23_1","volume-title":"Node-Level Membership Inference Attacks Against Graph Neural Networks. CoRR abs\/2102.05429","author":"He Xinlei","year":"2021","unstructured":"Xinlei He, Rui Wen, Yixin Wu, Michael Backes, Yun Shen, and Yang Zhang. Node-Level Membership Inference Attacks Against Graph Neural Networks. CoRR abs\/2102.05429, 2021."},{"key":"e_1_3_2_1_24_1","volume-title":"International Conference on Learning Representations (ICLR)","author":"Hjelm R. Devon","year":"2019","unstructured":"R. Devon Hjelm, Alex Fedorov, Samuel Lavoie-Marchildon, Karan Grewal, Philip Bachman, Adam Trischler, and Yoshua Bengio. Learning Deep Representations by Mutual Information Estimation and Maximization. In International Conference on Learning Representations (ICLR), 2019."},{"key":"e_1_3_2_1_25_1","first-page":"1345","volume-title":"Nicolas Papernot. High Accuracy and High Fidelity Extraction of Neural Networks. In USENIX Security Symposium (USENIX Security)","author":"Jagielski Matthew","year":"2020","unstructured":"Matthew Jagielski, Nicholas Carlini, David Berthelot, Alex Kurakin, and Nicolas Papernot. High Accuracy and High Fidelity Extraction of Neural Networks. In USENIX Security Symposium (USENIX Security), pages 1345--1362. USENIX, 2020."},{"key":"e_1_3_2_1_26_1","first-page":"19","volume-title":"Bo~Li. Manipulating Machine Learning: Poisoning Attacks and Countermeasures for Regression Learning. In IEEE Symposium on Security and Privacy (S&P)","author":"Jagielski Matthew","year":"2018","unstructured":"Matthew Jagielski, Alina Oprea, Battista Biggio, Chang Liu, Cristina Nita-Rotaru, and Bo~Li. Manipulating Machine Learning: Poisoning Attacks and Countermeasures for Regression Learning. In IEEE Symposium on Security and Privacy (S&P), pages 19--35. IEEE, 2018."},{"key":"e_1_3_2_1_27_1","first-page":"513","volume-title":"USENIX Security Symposium (USENIX Security)","author":"Jia Jinyuan","year":"2018","unstructured":"Jinyuan Jia and Neil~Zhenqiang Gong. AttriGuard: A Practical Defense Against Attribute Inference Attacks via Adversarial Machine Learning. In USENIX Security Symposium (USENIX Security), pages 513--529. USENIX, 2018."},{"key":"e_1_3_2_1_28_1","first-page":"259","volume-title":"ACM SIGSAC Conference on Computer and Communications Security (CCS)","author":"Jia Jinyuan","year":"2019","unstructured":"Jinyuan Jia, Ahmed Salem, Michael Backes, Yang Zhang, and Neil~Zhenqiang Gong. MemGuard: Defending against Black-Box Membership Inference Attacks via Adversarial Examples. In ACM SIGSAC Conference on Computer and Communications Security (CCS), pages 259--274. ACM, 2019."},{"key":"e_1_3_2_1_29_1","volume-title":"Sub-graph Contrast for Scalable Self-Supervised Graph Representation Learning. CoRR abs\/2009.10273","author":"Jiao Yizhu","year":"2020","unstructured":"Yizhu Jiao, Yun Xiong, Jiawei Zhang, Yao Zhang, Tianqi Zhang, and Yangyong Zhu. Sub-graph Contrast for Scalable Self-Supervised Graph Representation Learning. CoRR abs\/2009.10273, 2020."},{"key":"e_1_3_2_1_30_1","volume-title":"International Conference on Learning Representations (ICLR)","author":"Krishna Kalpesh","year":"2020","unstructured":"Kalpesh Krishna, Gaurav~Singh Tomar, Ankur~P. Parikh, Nicolas Papernot, and Mohit Iyyer. Thieves on Sesame Street! Model Extraction of BERT-based APIs. In International Conference on Learning Representations (ICLR), 2020."},{"key":"e_1_3_2_1_31_1","first-page":"1605","volume-title":"Leino and Matt Fredrikson. Stolen Memories: Leveraging Model Memorization for Calibrated White-Box Membership Inference. In USENIX Security Symposium (USENIX Security)","author":"Klas","year":"2020","unstructured":"Klas Leino and Matt Fredrikson. Stolen Memories: Leveraging Model Memorization for Calibrated White-Box Membership Inference. In USENIX Security Symposium (USENIX Security), pages 1605--1622. USENIX, 2020."},{"key":"e_1_3_2_1_32_1","volume-title":"Deep Learning Backdoors. CoRR abs\/2007.08273","author":"Li Shaofeng","year":"2020","unstructured":"Shaofeng Li, Shiqing Ma, Minhui Xue, and Benjamin Zi~Hao Zhao. Deep Learning Backdoors. CoRR abs\/2007.08273, 2020."},{"key":"e_1_3_2_1_33_1","volume-title":"Li and Yang Zhang. Membership Leakage in Label-Only Exposures. In ACM SIGSAC Conference on Computer and Communications Security (CCS). ACM","author":"Zheng","year":"2021","unstructured":"Zheng Li and Yang Zhang. Membership Leakage in Label-Only Exposures. In ACM SIGSAC Conference on Computer and Communications Security (CCS). ACM, 2021."},{"key":"e_1_3_2_1_34_1","volume-title":"Self-supervised Learning: Generative or Contrastive. CoRR abs\/2006.08218","author":"Liu Xiao","year":"2020","unstructured":"Xiao Liu, Fanjin Zhang, Zhenyu Hou, Zhaoyu Wang, Li~Mian, Jing Zhang, and Jie Tang. Self-supervised Learning: Generative or Contrastive. CoRR abs\/2006.08218, 2020."},{"key":"e_1_3_2_1_35_1","first-page":"3730","volume-title":"Xiaoou Tang. Deep Learning Face Attributes in the Wild. In IEEE International Conference on Computer Vision (ICCV)","author":"Liu Ziwei","year":"2015","unstructured":"Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaoou Tang. Deep Learning Face Attributes in the Wild. In IEEE International Conference on Computer Vision (ICCV), pages 3730--3738. IEEE, 2015."},{"key":"e_1_3_2_1_36_1","first-page":"497","volume-title":"Vitaly Shmatikov. Exploiting Unintended Feature Leakage in Collaborative Learning. In IEEE Symposium on Security and Privacy (S&P)","author":"Melis Luca","year":"2019","unstructured":"Luca Melis, Congzheng Song, Emiliano~De Cristofaro, and Vitaly Shmatikov. Exploiting Unintended Feature Leakage in Collaborative Learning. In IEEE Symposium on Security and Privacy (S&P), pages 497--512. IEEE, 2019."},{"key":"e_1_3_2_1_37_1","doi-asserted-by":"publisher","DOI":"10.1145\/3243734.3243855"},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00065"},{"key":"e_1_3_2_1_39_1","volume-title":"Nicholas Carlini. Adversary Instantiation: Lower Bounds for Differentially Private Machine Learning. In IEEE Symposium on Security and Privacy (S&P). IEEE","author":"Nasr Milad","year":"2021","unstructured":"Milad Nasr, Shuang Song, Abhradeep Thakurta, Nicolas Papernot, and Nicholas Carlini. Adversary Instantiation: Lower Bounds for Differentially Private Machine Learning. In IEEE Symposium on Security and Privacy (S&P). IEEE, 2021."},{"key":"e_1_3_2_1_40_1","volume-title":"Mario Fritz. Towards Reverse-Engineering Black-Box Neural Networks. In International Conference on Learning Representations (ICLR)","author":"Oh Seong~Joon","year":"2018","unstructured":"Seong~Joon Oh, Max Augustin, Bernt Schiele, and Mario Fritz. Towards Reverse-Engineering Black-Box Neural Networks. In International Conference on Learning Representations (ICLR), 2018."},{"key":"e_1_3_2_1_41_1","first-page":"4954","volume-title":"Mario Fritz. Knockoff Nets: Stealing Functionality of Black-Box Models. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","author":"Orekondy Tribhuvanesh","year":"2019","unstructured":"Tribhuvanesh Orekondy, Bernt Schiele, and Mario Fritz. Knockoff Nets: Stealing Functionality of Black-Box Models. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4954--4963. IEEE, 2019."},{"key":"e_1_3_2_1_42_1","first-page":"1471","volume-title":"Min Yang. Privacy Risks of General-Purpose Language Models. In IEEE Symposium on Security and Privacy (S&P)","author":"Pan Xudong","year":"2020","unstructured":"Xudong Pan, Mi~Zhang, Shouling Ji, and Min Yang. Privacy Risks of General-Purpose Language Models. In IEEE Symposium on Security and Privacy (S&P), pages 1471--1488. IEEE, 2020."},{"key":"e_1_3_2_1_43_1","first-page":"399","volume-title":"Michael Wellman. SoK: Towards the Science of Security and Privacy in Machine Learning. In IEEE European Symposium on Security and Privacy (Euro S&P)","author":"Papernot Nicolas","year":"2018","unstructured":"Nicolas Papernot, Patrick McDaniel, Arunesh Sinha, and Michael Wellman. SoK: Towards the Science of Security and Privacy in Machine Learning. In IEEE European Symposium on Security and Privacy (Euro S&P), pages 399--414. IEEE, 2018."},{"key":"e_1_3_2_1_44_1","first-page":"372","volume-title":"Ananthram Swami. The Limitations of Deep Learning in Adversarial Settings. In IEEE European Symposium on Security and Privacy (Euro S&P)","author":"Papernot Nicolas","year":"2016","unstructured":"Nicolas Papernot, Patrick~D. McDaniel, Somesh Jha, Matt Fredrikson, Z. Berkay Celik, and Ananthram Swami. The Limitations of Deep Learning in Adversarial Settings. In IEEE European Symposium on Security and Privacy (Euro S&P), pages 372--387. IEEE, 2016."},{"key":"e_1_3_2_1_45_1","volume-title":"Erlingsson. Scalable Private Learning with PATE. In International Conference on Learning Representations (ICLR)","author":"Papernot Nicolas","year":"2018","unstructured":"Nicolas Papernot, Shuang Song, Ilya Mironov, Ananth Raghunathan, Kunal Talwar, and \u00da lfar Erlingsson. Scalable Private Learning with PATE. In International Conference on Learning Representations (ICLR), 2018."},{"key":"e_1_3_2_1_46_1","doi-asserted-by":"publisher","DOI":"10.2478\/popets-2019-0002"},{"key":"e_1_3_2_1_47_1","volume-title":"ImageNet Large Scale Visual Recognition Challenge. CoRR abs\/1409.0575","author":"Russakovsky Olga","year":"2015","unstructured":"Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander~C. Berg, and Li~Fei-Fei. ImageNet Large Scale Visual Recognition Challenge. CoRR abs\/1409.0575, 2015."},{"key":"e_1_3_2_1_48_1","first-page":"1291","volume-title":"Yang Zhang. Updates-Leak: Data Set Inference and Reconstruction Attacks in Online Learning. In USENIX Security Symposium (USENIX Security)","author":"Salem Ahmed","year":"2020","unstructured":"Ahmed Salem, Apratim Bhattacharya, Michael Backes, Mario Fritz, and Yang Zhang. Updates-Leak: Data Set Inference and Reconstruction Attacks in Online Learning. In USENIX Security Symposium (USENIX Security), pages 1291--1308. USENIX, 2020."},{"key":"e_1_3_2_1_49_1","volume-title":"Michael Backes. ML-Leaks: Model and Data Independent Membership Inference Attacks and Defenses on Machine Learning Models. In Network and Distributed System Security Symposium (NDSS). Internet Society","author":"Salem Ahmed","year":"2019","unstructured":"Ahmed Salem, Yang Zhang, Mathias Humbert, Pascal Berrang, Mario Fritz, and Michael Backes. ML-Leaks: Model and Data Independent Membership Inference Attacks and Defenses on Machine Learning Models. In Network and Distributed System Security Symposium (NDSS). Internet Society, 2019."},{"key":"e_1_3_2_1_50_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"e_1_3_2_1_51_1","volume-title":"You Autocomplete Me: Poisoning Vulnerabilities in Neural Code Completion. CoRR abs\/2007.02220","author":"Schuster Roei","year":"2020","unstructured":"Roei Schuster, Congzheng Song, Eran Tromer, and Vitaly Shmatikov. You Autocomplete Me: Poisoning Vulnerabilities in Neural Code Completion. CoRR abs\/2007.02220, 2020."},{"key":"e_1_3_2_1_52_1","first-page":"3","volume-title":"Vitaly Shmatikov. Membership Inference Attacks Against Machine Learning Models. In IEEE Symposium on Security and Privacy (S&P)","author":"Shokri Reza","year":"2017","unstructured":"Reza Shokri, Marco Stronati, Congzheng Song, and Vitaly Shmatikov. Membership Inference Attacks Against Machine Learning Models. In IEEE Symposium on Security and Privacy (S&P), pages 3--18. IEEE, 2017."},{"key":"e_1_3_2_1_53_1","first-page":"377","volume-title":"Song and Ananth Raghunathan. Information Leakage in Embedding Models. In ACM SIGSAC Conference on Computer and Communications Security (CCS)","author":"Congzheng","year":"2020","unstructured":"Congzheng Song and Ananth Raghunathan. Information Leakage in Embedding Models. In ACM SIGSAC Conference on Computer and Communications Security (CCS), pages 377--390. ACM, 2020."},{"key":"e_1_3_2_1_54_1","doi-asserted-by":"publisher","DOI":"10.1145\/3133956.3134077"},{"key":"e_1_3_2_1_55_1","first-page":"196","volume-title":"Song and Vitaly Shmatikov. Auditing Data Provenance in Text-Generation Models. In ACM Conference on Knowledge Discovery and Data Mining (KDD)","author":"Congzheng","year":"2019","unstructured":"Congzheng Song and Vitaly Shmatikov. Auditing Data Provenance in Text-Generation Models. In ACM Conference on Knowledge Discovery and Data Mining (KDD), pages 196--206. ACM, 2019."},{"key":"e_1_3_2_1_56_1","volume-title":"Song and Vitaly Shmatikov. Overlearning Reveals Sensitive Attributes. In International Conference on Learning Representations (ICLR)","author":"Congzheng","year":"2020","unstructured":"Congzheng Song and Vitaly Shmatikov. Overlearning Reveals Sensitive Attributes. In International Conference on Learning Representations (ICLR), 2020."},{"key":"e_1_3_2_1_57_1","volume-title":"Song and Prateek Mittal. Systematic Evaluation of Privacy Risks of Machine Learning Models. In USENIX Security Symposium (USENIX Security). USENIX","author":"Liwei","year":"2021","unstructured":"Liwei Song and Prateek Mittal. Systematic Evaluation of Privacy Risks of Machine Learning Models. In USENIX Security Symposium (USENIX Security). USENIX, 2021."},{"key":"e_1_3_2_1_58_1","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3354211"},{"key":"e_1_3_2_1_59_1","volume-title":"Patrick McDaniel. Ensemble Adversarial Training: Attacks and Defenses. In International Conference on Learning Representations (ICLR)","author":"Tram\u00e8r Florian","year":"2017","unstructured":"Florian Tram\u00e8r, Alexey Kurakin, Nicolas Papernot, Ian Goodfellow, Dan Boneh, and Patrick McDaniel. Ensemble Adversarial Training: Attacks and Defenses. In International Conference on Learning Representations (ICLR), 2017."},{"key":"e_1_3_2_1_60_1","first-page":"601","volume-title":"USENIX Security Symposium (USENIX Security)","author":"Tram\u00e8r Florian","year":"2016","unstructured":"Florian Tram\u00e8r, Fan Zhang, Ari Juels, Michael~K. Reiter, and Thomas Ristenpart. Stealing Machine Learning Models via Prediction APIs. In USENIX Security Symposium (USENIX Security), pages 601--618. USENIX, 2016."},{"key":"e_1_3_2_1_61_1","volume-title":"Representation Learning with Contrastive Predictive Coding. CoRR abs\/1807.03748","author":"Oord ~den","year":"2018","unstructured":"A\"a ron van~den Oord, Yazhe Li, and Oriol Vinyals. Representation Learning with Contrastive Predictive Coding. CoRR abs\/1807.03748, 2018."},{"key":"e_1_3_2_1_62_1","volume-title":"Journal of Machine Learning Research","author":"van~der Maaten Laurens","year":"2008","unstructured":"Laurens van~der Maaten and Geoffrey Hinton. Visualizing Data using t-SNE. Journal of Machine Learning Research, 2008."},{"key":"e_1_3_2_1_63_1","first-page":"36","volume-title":"Wang and Neil~Zhenqiang Gong. Stealing Hyperparameters in Machine Learning. In IEEE Symposium on Security and Privacy (S&P)","author":"Binghui","year":"2018","unstructured":"Binghui Wang and Neil~Zhenqiang Gong. Stealing Hyperparameters in Machine Learning. In IEEE Symposium on Security and Privacy (S&P), pages 36--52. IEEE, 2018."},{"key":"e_1_3_2_1_64_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00393"},{"key":"e_1_3_2_1_65_1","first-page":"585","volume-title":"Annual Conference on Neural Information Processing Systems (NIPS)","author":"Xie Qizhe","year":"2017","unstructured":"Qizhe Xie, Zihang Dai, Yulun Du, Eduard~H. Hovy, and Graham Neubig. Controllable Invariance through Adversarial Feature Learning. In Annual Conference on Neural Information Processing Systems (NIPS), pages 585--596. NIPS, 2017."},{"key":"e_1_3_2_1_66_1","doi-asserted-by":"publisher","DOI":"10.1109\/CSF.2018.00027"},{"key":"e_1_3_2_1_67_1","volume-title":"Yang Shen. Graph Contrastive Learning with Augmentations. In Annual Conference on Neural Information Processing Systems (NeurIPS). NeurIPS","author":"You Yuning","year":"2020","unstructured":"Yuning You, Tianlong Chen, Yongduo Sui, Ting Chen, Zhangyang Wang, and Yang Shen. Graph Contrastive Learning with Augmentations. In Annual Conference on Neural Information Processing Systems (NeurIPS). NeurIPS, 2020."},{"key":"e_1_3_2_1_68_1","first-page":"4352","volume-title":"Conditional Adversarial Autoencoder. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","author":"Zhang Zhifei","year":"2017","unstructured":"Zhifei Zhang, Yang Song, and Hairong Qi. Age Progression\/Regression by Conditional Adversarial Autoencoder. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 4352--4360. IEEE, 2017."},{"key":"e_1_3_2_1_69_1","volume-title":"Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 Million Image Database for Scene Recognition","author":"Zhou Bolei","year":"2018","unstructured":"Bolei Zhou, \u00c0 gata Lapedriza, Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 Million Image Database for Scene Recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2018."}],"event":{"name":"CCS '21: 2021 ACM SIGSAC Conference on Computer and Communications Security","location":"Virtual Event Republic of Korea","acronym":"CCS '21","sponsor":["SIGSAC ACM Special Interest Group on Security, Audit, and Control"]},"container-title":["Proceedings of the 2021 ACM SIGSAC Conference on Computer and Communications Security"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3460120.3484571","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3460120.3484571","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,18]],"date-time":"2025-11-18T20:44:18Z","timestamp":1763498658000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3460120.3484571"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,11,12]]},"references-count":69,"alternative-id":["10.1145\/3460120.3484571","10.1145\/3460120"],"URL":"https:\/\/doi.org\/10.1145\/3460120.3484571","relation":{},"subject":[],"published":{"date-parts":[[2021,11,12]]},"assertion":[{"value":"2021-11-13","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}