{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:11:18Z","timestamp":1777655478496,"version":"3.51.4"},"publisher-location":"New York, NY, USA","reference-count":81,"publisher":"ACM","license":[{"start":{"date-parts":[[2021,11,12]],"date-time":"2021-11-12T00:00:00Z","timestamp":1636675200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"arl","award":["W911NF-13-2-0045"],"award-info":[{"award-number":["W911NF-13-2-0045"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2021,11,12]]},"DOI":"10.1145\/3460120.3485368","type":"proceedings-article","created":{"date-parts":[[2021,11,13]],"date-time":"2021-11-13T12:05:33Z","timestamp":1636805133000},"page":"3104-3122","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":59,"title":["Subpopulation Data Poisoning Attacks"],"prefix":"10.1145","author":[{"given":"Matthew","family":"Jagielski","sequence":"first","affiliation":[{"name":"Northeastern University, Boston, MA, USA"}]},{"given":"Giorgio","family":"Severi","sequence":"additional","affiliation":[{"name":"Northeastern University, Boston, MA, USA"}]},{"given":"Niklas","family":"Pousette Harger","sequence":"additional","affiliation":[{"name":"Northeastern University, Boston, MA, USA"}]},{"given":"Alina","family":"Oprea","sequence":"additional","affiliation":[{"name":"Northeastern University, Boston, MA, USA"}]}],"member":"320","published-online":{"date-parts":[[2021,11,13]]},"reference":[{"key":"e_1_3_2_2_1_1","volume-title":"Proceedings of the Twenty Third International Conference on Artificial Intelligence and Statistics (Proceedings of Machine Learning Research), Silvia Chiappa and Roberto Calandra (Eds.)","volume":"108","author":"Bagdasaryan Eugene","year":"2020","unstructured":"Eugene Bagdasaryan, Andreas Veit, Yiqing Hua, Deborah Estrin, and Vitaly Shmatikov. 2020. How To Backdoor Federated Learning. In Proceedings of the Twenty Third International Conference on Artificial Intelligence and Statistics (Proceedings of Machine Learning Research), Silvia Chiappa and Roberto Calandra (Eds.), Vol. 108. PMLR, 2938--2948. http:\/\/proceedings.mlr.press\/v108\/bagdasaryan20a.html"},{"key":"e_1_3_2_2_2_1","volume-title":"Influence Functions in Deep Learning Are Fragile. International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=xHKVVHGDOEk","author":"Basu Samyadeep","year":"2021","unstructured":"Samyadeep Basu, Phil Pope, and Soheil Feizi. 2021. Influence Functions in Deep Learning Are Fragile. International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=xHKVVHGDOEk"},{"key":"e_1_3_2_2_3_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-40994-3_25"},{"key":"e_1_3_2_2_4_1","doi-asserted-by":"publisher","DOI":"10.5555\/3042573.3042761"},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.1145\/2666652.2666666"},{"key":"e_1_3_2_2_6_1","volume-title":"Conference on fairness, accountability and transparency. 77--91","author":"Buolamwini Joy","year":"2018","unstructured":"Joy Buolamwini and Timnit Gebru. 2018. Gender shades: Intersectional accuracy disparities in commercial gender classification. In Conference on fairness, accountability and transparency. 77--91."},{"key":"e_1_3_2_2_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"e_1_3_2_2_8_1","volume-title":"Towards evaluating the robustness of neural networks. In 2017 ieee symposium on security and privacy (sp)","author":"Carlini Nicholas","unstructured":"Nicholas Carlini and David Wagner. 2017b. Towards evaluating the robustness of neural networks. In 2017 ieee symposium on security and privacy (sp). IEEE, 39--57."},{"key":"e_1_3_2_2_9_1","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00009"},{"key":"e_1_3_2_2_10_1","volume-title":"Sasi Kumar Murakonda, Ehsan Kazemi, and Reza Shokri.","author":"Chang Hongyan","year":"2020","unstructured":"Hongyan Chang, Ta Duy Nguyen, Sasi Kumar Murakonda, Ehsan Kazemi, and Reza Shokri. 2020. On Adversarial Bias and the Robustness of Fair Machine Learning. arXiv preprint arXiv:2006.08669 (2020)."},{"key":"e_1_3_2_2_11_1","volume-title":"Detecting Backdoor Attacks on Deep Neural Networks by Activation Clustering. arXiv:1811.03728 [cs, stat] (Nov","author":"Chen Bryant","year":"2018","unstructured":"Bryant Chen, Wilka Carvalho, Nathalie Baracaldo, Heiko Ludwig, Benjamin Edwards, Taesung Lee, Ian Molloy, and Biplav Srivastava. 2018. Detecting Backdoor Attacks on Deep Neural Networks by Activation Clustering. arXiv:1811.03728 [cs, stat] (Nov. 2018). http:\/\/arxiv.org\/abs\/1811.03728 arXiv: 1811.03728."},{"key":"e_1_3_2_2_12_1","volume-title":"Targeted backdoor attacks on deep learning systems using data poisoning. arXiv preprint arXiv:1712.05526","author":"Chen Xinyun","year":"2017","unstructured":"Xinyun Chen, Chang Liu, Bo Li, Kimberly Lu, and Dawn Song. 2017. Targeted backdoor attacks on deep learning systems using data poisoning. arXiv preprint arXiv:1712.05526 (2017)."},{"key":"e_1_3_2_2_13_1","volume-title":"International Conference on Machine Learning. PMLR, 1310--1320","author":"Cohen Jeremy","year":"2019","unstructured":"Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. 2019. Certified adversarial robustness via randomized smoothing. In International Conference on Machine Learning. PMLR, 1310--1320."},{"key":"e_1_3_2_2_14_1","volume-title":"Explaining Transferability of Evasion and Poisoning Attacks. In 28th USENIX Security Symposium (USENIX Security 19)","author":"Demontis Ambra","year":"2019","unstructured":"Ambra Demontis, Marco Melis, Maura Pintor, Matthew Jagielski, Battista Biggio, Alina Oprea, Cristina Nita-Rotaru, and Fabio Roli. 2019. Why Do Adversarial Attacks Transfer? Explaining Transferability of Evasion and Poisoning Attacks. In 28th USENIX Security Symposium (USENIX Security 19). USENIX Association, Santa Clara, CA, 321--338. https:\/\/www.usenix.org\/conference\/usenixsecurity19\/presentation\/demontis"},{"key":"e_1_3_2_2_15_1","doi-asserted-by":"crossref","unstructured":"J. Deng W. Dong R. Socher L.-J. Li K. Li and L. Fei-Fei. 2009. ImageNet: A Large-Scale Hierarchical Image Database. In CVPR09 .","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19--1423"},{"key":"e_1_3_2_2_17_1","volume-title":"International Conference on Machine Learning. PMLR, 1596--1606","author":"Diakonikolas Ilias","year":"2019","unstructured":"Ilias Diakonikolas, Gautam Kamath, Daniel Kane, Jerry Li, Jacob Steinhardt, and Alistair Stewart. 2019. Sever: A robust meta-algorithm for stochastic optimization. In International Conference on Machine Learning. PMLR, 1596--1606."},{"key":"e_1_3_2_2_18_1","unstructured":"Dheeru Dua and Casey Graff. 2017. UCI Machine Learning Repository. (2017). http:\/\/archive.ics.uci.edu\/ml"},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/2090236.2090255"},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"publisher","DOI":"10.1145\/3357713.3384290"},{"key":"e_1_3_2_2_21_1","volume-title":"International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=01olnfLIbD","author":"Geiping Jonas","year":"2021","unstructured":"Jonas Geiping, Liam H Fowl, W. Ronny Huang, Wojciech Czaja, Gavin Taylor, Michael Moeller, and Tom Goldstein. 2021. Witches' Brew: Industrial Scale Data Poisoning via Gradient Matching. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=01olnfLIbD"},{"key":"e_1_3_2_2_22_1","volume-title":"Explaining and Harnessing Adversarial Examples. In International Conference on Learning Representations. http:\/\/arxiv.org\/abs\/1412","author":"Goodfellow Ian","year":"2015","unstructured":"Ian Goodfellow, Jonathon Shlens, and Christian Szegedy. 2015. Explaining and Harnessing Adversarial Examples. In International Conference on Learning Representations. http:\/\/arxiv.org\/abs\/1412.6572"},{"key":"e_1_3_2_2_23_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2909068"},{"key":"e_1_3_2_2_24_1","volume-title":"Sean Augenstein, Hubert Eichner, Chlo\u00e9 Kiddon, and Daniel Ramage.","author":"Hard Andrew","year":"2018","unstructured":"Andrew Hard, Kanishka Rao, Rajiv Mathews, Francc oise Beaufays, Sean Augenstein, Hubert Eichner, Chlo\u00e9 Kiddon, and Daniel Ramage. 2018. Federated learning for mobile keyboard prediction. arXiv preprint arXiv:1811.03604 (2018)."},{"key":"e_1_3_2_2_25_1","unstructured":"Moritz Hardt Eric Price Nati Srebro et al. 2016. Equality of opportunity in supervised learning. In Advances in neural information processing systems. 3315--3323."},{"key":"e_1_3_2_2_26_1","volume-title":"Long short-term memory. Neural computation","author":"Hochreiter Sepp","year":"1997","unstructured":"Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, Vol. 9, 8 (1997), 1735--1780."},{"key":"e_1_3_2_2_27_1","volume-title":"Characterising bias in compressed models. arXiv preprint arXiv:2010.03058","author":"Hooker Sara","year":"2020","unstructured":"Sara Hooker, Nyalleng Moorosi, Gregory Clark, Samy Bengio, and Emily Denton. 2020. Characterising bias in compressed models. arXiv preprint arXiv:2010.03058 (2020)."},{"key":"e_1_3_2_2_28_1","volume-title":"Subpopulation Data Poisoning Attacks. In Workshop on Robust AI in Financial Services: Data, Fairness, Explainability, Trustworthiness, and Privacy .","author":"Jagielski Matthew","year":"2019","unstructured":"Matthew Jagielski, Paul Hand, and Alina Oprea. 2019. Subpopulation Data Poisoning Attacks. In Workshop on Robust AI in Financial Services: Data, Fairness, Explainability, Trustworthiness, and Privacy ."},{"key":"e_1_3_2_2_29_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2018.00057"},{"key":"e_1_3_2_2_30_1","doi-asserted-by":"publisher","DOI":"10.5555\/3305381.3305576"},{"key":"e_1_3_2_2_31_1","volume-title":"Stronger data poisoning attacks break data sanitization defenses. arXiv preprint arXiv:1811.00741","author":"Koh Pang Wei","year":"2018","unstructured":"Pang Wei Koh, Jacob Steinhardt, and Percy Liang. 2018. Stronger data poisoning attacks break data sanitization defenses. arXiv preprint arXiv:1811.00741 (2018)."},{"key":"e_1_3_2_2_32_1","unstructured":"Pang Wei W Koh Kai-Siang Ang Hubert Teo and Percy S Liang. 2019. On the accuracy of influence functions for measuring group effects. In Advances in Neural Information Processing Systems. 5255--5265."},{"key":"e_1_3_2_2_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00277"},{"key":"e_1_3_2_2_34_1","volume-title":"Deceiving end-to-end deep learning malware detectors using adversarial examples. arXiv preprint arXiv:1802.04528","author":"Kreuk Felix","year":"2018","unstructured":"Felix Kreuk, Assi Barak, Shir Aviv-Reuven, Moran Baruch, Benny Pinkas, and Joseph Keshet. 2018. Deceiving end-to-end deep learning malware detectors using adversarial examples. arXiv preprint arXiv:1802.04528 (2018)."},{"key":"e_1_3_2_2_35_1","unstructured":"Alex Krizhevsky. 2009. Learning multiple layers of features from tiny images. Technical Report."},{"key":"e_1_3_2_2_36_1","doi-asserted-by":"publisher","DOI":"10.1145\/3351095.3372853"},{"key":"e_1_3_2_2_37_1","unstructured":"Ram Shankar Siva Kumar David O Brien Kendra Albert Salom\u00e9 Vilj\u00f6en and Jeffrey Snover. 2019. Failure Modes in Machine Learning Systems. (2019). arxiv: cs.LG\/1911.11034"},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/SPW50608.2020.00028"},{"key":"e_1_3_2_2_39_1","unstructured":"Yann LeCun et al. LeNet-5 convolutional neural networks. ( ????)."},{"key":"e_1_3_2_2_40_1","volume-title":"International Conference on Artificial Intelligence and Statistics. PMLR, 4313--4324","author":"Li Mingchen","year":"2020","unstructured":"Mingchen Li, Mahdi Soltanolkotabi, and Samet Oymak. 2020. Gradient descent with early stopping is provably robust to label noise for overparameterized neural networks. In International Conference on Artificial Intelligence and Statistics. PMLR, 4313--4324."},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"publisher","DOI":"10.1145\/3372297.3423362"},{"key":"e_1_3_2_2_42_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-00470-5_13"},{"key":"e_1_3_2_2_43_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58607-2_11"},{"key":"e_1_3_2_2_44_1","doi-asserted-by":"publisher","DOI":"10.5555\/2002472.2002491"},{"key":"e_1_3_2_2_45_1","doi-asserted-by":"publisher","DOI":"10.5555\/2886521.2886721"},{"key":"e_1_3_2_2_46_1","doi-asserted-by":"publisher","DOI":"10.5555\/2886521.2886721"},{"key":"e_1_3_2_2_47_1","doi-asserted-by":"publisher","DOI":"10.1145\/3442381.3449965"},{"key":"e_1_3_2_2_48_1","doi-asserted-by":"publisher","DOI":"10.1007\/11856214_5"},{"key":"e_1_3_2_2_49_1","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2009.191"},{"key":"e_1_3_2_2_50_1","doi-asserted-by":"publisher","DOI":"10.1145\/3052973.3053009"},{"key":"e_1_3_2_2_51_1","doi-asserted-by":"publisher","DOI":"10.5555\/1953048.2078195"},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2006.26"},{"key":"e_1_3_2_2_53_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-66415-2_4"},{"key":"e_1_3_2_2_54_1","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford Alec","year":"2019","unstructured":"Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI Blog, Vol. 1, 8 (2019), 9.","journal-title":"OpenAI Blog"},{"key":"e_1_3_2_2_55_1","volume-title":"International Conference on Machine Learning. PMLR, 8230--8241","author":"Rosenfeld Elan","year":"2020","unstructured":"Elan Rosenfeld, Ezra Winston, Pradeep Ravikumar, and Zico Kolter. 2020. Certified robustness to label-flipping attacks via randomized smoothing. In International Conference on Machine Learning. PMLR, 8230--8241."},{"key":"e_1_3_2_2_56_1","doi-asserted-by":"publisher","DOI":"10.1145\/1644893.1644895"},{"key":"e_1_3_2_2_57_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00474"},{"key":"e_1_3_2_2_58_1","volume-title":"Adversarial attacks against automatic speech recognition systems via psychoacoustic hiding. arXiv preprint arXiv:1808.05665","author":"Schonherr Lea","year":"2018","unstructured":"Lea Schonherr, Katharina Kohls, Steffen Zeiler, Thorsten Holz, and Dorothea Kolossa. 2018. Adversarial attacks against automatic speech recognition systems via psychoacoustic hiding. arXiv preprint arXiv:1808.05665 (2018)."},{"key":"e_1_3_2_2_59_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP40000.2020.00115"},{"key":"e_1_3_2_2_60_1","unstructured":"Ali Shafahi W Ronny Huang Mahyar Najibi Octavian Suciu Christoph Studer Tudor Dumitras and Tom Goldstein. 2018. Poison frogs! targeted clean-label poisoning attacks on neural networks. In Advances in Neural Information Processing Systems. 6103--6113."},{"key":"e_1_3_2_2_61_1","volume-title":"International Conference on Machine Learning. PMLR, 5739--5748","author":"Shen Yanyao","year":"2019","unstructured":"Yanyao Shen and Sujay Sanghavi. 2019. Learning with bad training data via iterative trimmed loss minimization. In International Conference on Machine Learning. PMLR, 5739--5748."},{"key":"e_1_3_2_2_62_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.41"},{"key":"e_1_3_2_2_63_1","volume-title":"3rd International Conference on Learning Representations, ICLR","author":"Simonyan Karen","year":"2015","unstructured":"Karen Simonyan and Andrew Zisserman. 2015. Very Deep Convolutional Networks for Large-Scale Image Recognition. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7--9, 2015, Conference Track Proceedings, Yoshua Bengio and Yann LeCun (Eds.). http:\/\/arxiv.org\/abs\/1409.1556"},{"key":"e_1_3_2_2_64_1","volume-title":"Poisoning Attacks on Algorithmic Fairness. arXiv preprint arXiv:2004.07401","author":"Solans David","year":"2020","unstructured":"David Solans, Battista Biggio, and Carlos Castillo. 2020. Poisoning Attacks on Algorithmic Fairness. arXiv preprint arXiv:2004.07401 (2020)."},{"key":"e_1_3_2_2_65_1","volume-title":"Overlearning Reveals Sensitive Attributes. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=SJeNz04tDS","author":"Song Congzheng","year":"2020","unstructured":"Congzheng Song and Vitaly Shmatikov. 2020. Overlearning Reveals Sensitive Attributes. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=SJeNz04tDS"},{"key":"e_1_3_2_2_66_1","volume-title":"Proc. IEEE Security and Privacy Symposium .","author":"Srndic N.","unstructured":"N. Srndic and P. Laskov. 2014. Practical Evasion of a Learning-Based Classifier: A Case Study. In Proc. IEEE Security and Privacy Symposium ."},{"key":"e_1_3_2_2_67_1","volume-title":"Hal Daume III, and Tudor Dumitras","author":"Suciu Octavian","year":"2018","unstructured":"Octavian Suciu, Radu Marginean, Yigitcan Kaya, Hal Daume III, and Tudor Dumitras. 2018. When does machine learning $$FAIL$$? generalized transferability for evasion and poisoning attacks. In 27th $$USENIX$$ Security Symposium ($$USENIX$$ Security 18). 1299--1316."},{"key":"e_1_3_2_2_68_1","volume-title":"International Conference on Learning Representations. http:\/\/arxiv.org\/abs\/1312","author":"Szegedy Christian","year":"2014","unstructured":"Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. 2014. Intriguing properties of neural networks. In International Conference on Learning Representations. http:\/\/arxiv.org\/abs\/1312.6199"},{"key":"e_1_3_2_2_69_1","volume-title":"International Conference on Machine Learning. PMLR, 6105--6114","author":"Tan Mingxing","year":"2019","unstructured":"Mingxing Tan and Quoc Le. 2019. Efficientnet: Rethinking model scaling for convolutional neural networks. In International Conference on Machine Learning. PMLR, 6105--6114."},{"key":"e_1_3_2_2_70_1","doi-asserted-by":"publisher","DOI":"10.5555\/3327757.3327896"},{"key":"e_1_3_2_2_71_1","unstructured":"Alexander Turner Dimitris Tsipras and Aleksander Madry. 2019. Clean-Label Backdoor Attacks. In ICLR ."},{"key":"e_1_3_2_2_72_1","unstructured":"Ashish Vaswani Noam Shazeer Niki Parmar Jakob Uszkoreit Llion Jones Aidan N Gomez \u0141ukasz Kaiser and Illia Polosukhin. 2017. Attention is all you need. In Advances in neural information processing systems. 5998--6008."},{"key":"e_1_3_2_2_73_1","volume-title":"NNoculation: Broad Spectrum and Targeted Treatment of Backdoored DNNs. arXiv:2002.08313 [cs] (Feb","author":"Veldanda Akshaj Kumar","year":"2020","unstructured":"Akshaj Kumar Veldanda, Kang Liu, Benjamin Tan, Prashanth Krishnamurthy, Farshad Khorrami, Ramesh Karri, Brendan Dolan-Gavitt, and Siddharth Garg. 2020. NNoculation: Broad Spectrum and Targeted Treatment of Backdoored DNNs. arXiv:2002.08313 [cs] (Feb. 2020). http:\/\/arxiv.org\/abs\/2002.08313 arXiv: 2002.08313."},{"key":"e_1_3_2_2_74_1","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2019.00031"},{"key":"e_1_3_2_2_75_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"e_1_3_2_2_76_1","volume-title":"International Conference on Machine Learning. 1689--1698","author":"Xiao Huang","year":"2015","unstructured":"Huang Xiao, Battista Biggio, Gavin Brown, Giorgio Fumera, Claudia Eckert, and Fabio Roli. 2015. Is feature selection secure against training data poisoning?. In International Conference on Machine Learning. 1689--1698."},{"key":"e_1_3_2_2_77_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.634"},{"key":"e_1_3_2_2_78_1","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2016.23115"},{"key":"e_1_3_2_2_79_1","volume-title":"Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural information processing systems. 5754--5764.","author":"Yang Zhilin","year":"2019","unstructured":"Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural information processing systems. 5754--5764."},{"key":"e_1_3_2_2_80_1","doi-asserted-by":"publisher","DOI":"10.1109\/CSF.2018.00027"},{"key":"e_1_3_2_2_81_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.463"}],"event":{"name":"CCS '21: 2021 ACM SIGSAC Conference on Computer and Communications Security","location":"Virtual Event Republic of Korea","acronym":"CCS '21","sponsor":["SIGSAC ACM Special Interest Group on Security, Audit, and Control"]},"container-title":["Proceedings of the 2021 ACM SIGSAC Conference on Computer and Communications Security"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3460120.3485368","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3460120.3485368","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,18]],"date-time":"2025-11-18T20:48:18Z","timestamp":1763498898000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3460120.3485368"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,11,12]]},"references-count":81,"alternative-id":["10.1145\/3460120.3485368","10.1145\/3460120"],"URL":"https:\/\/doi.org\/10.1145\/3460120.3485368","relation":{},"subject":[],"published":{"date-parts":[[2021,11,12]]},"assertion":[{"value":"2021-11-13","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}