{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T04:46:52Z","timestamp":1750308412346,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":40,"publisher":"ACM","license":[{"start":{"date-parts":[[2018,10,22]],"date-time":"2018-10-22T00:00:00Z","timestamp":1540166400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2018,10,22]]},"DOI":"10.1145\/3207677.3277988","type":"proceedings-article","created":{"date-parts":[[2018,10,18]],"date-time":"2018-10-18T14:19:29Z","timestamp":1539872369000},"page":"1-7","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":2,"title":["Attacks and Defenses towards Machine Learning Based Systems"],"prefix":"10.1145","author":[{"given":"Yingchao","family":"Yu","sequence":"first","affiliation":[{"name":"Jiangnan Institute of Computing Technology, Wuxi, China"}]},{"given":"Xueyong","family":"Liu","sequence":"additional","affiliation":[{"name":"Jiangnan Institute of Computing Technology, Wuxi, China"}]},{"given":"Zuoning","family":"Chen","sequence":"additional","affiliation":[{"name":"National Research Center of Parallel Computer Engineering and Technology, Wuxi, China"}]}],"member":"320","published-online":{"date-parts":[[2018,10,22]]},"reference":[{"volume-title":"International Conference on Machine Learning (ICML).","author":"Biggio B.","key":"e_1_3_2_1_1_1"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_2_1","DOI":"10.1137\/0222052"},{"unstructured":"https:\/\/www.theguardian.com\/technology\/2016\/mar\/26\/microsoft-deeply-sorry-for-offensive-tweets-by-ai-chatbot  https:\/\/www.theguardian.com\/technology\/2016\/mar\/26\/microsoft-deeply-sorry-for-offensive-tweets-by-ai-chatbot","key":"e_1_3_2_1_3_1"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_4_1","DOI":"10.1145\/2046684.2046692"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_5_1","DOI":"10.1145\/1644893.1644895"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_6_1","DOI":"10.1145\/1128817.1128824"},{"doi-asserted-by":"crossref","unstructured":"Biggio B Fumera G and Roli F. 2010. Multiple classifier systems for robust classifier design in adversarial environments. International Journal of Machine Learning and Cybernetics.  Biggio B Fumera G and Roli F. 2010. Multiple classifier systems for robust classifier design in adversarial environments. International Journal of Machine Learning and Cybernetics.","key":"e_1_3_2_1_7_1","DOI":"10.1109\/ICSMC.2011.6083796"},{"doi-asserted-by":"crossref","unstructured":"Biggio B Corona I and Fumera G et al. 2011. Bagging classifiers for fighting poisoning attacks in adversarial classification tasks {C}\/\/LNCS 6713: Proceeding of the 10th International Workshop on Multiple Classifier Systems 350--359.   Biggio B Corona I and Fumera G et al. 2011. Bagging classifiers for fighting poisoning attacks in adversarial classification tasks {C}\/\/LNCS 6713: Proceeding of the 10th International Workshop on Multiple Classifier Systems 350--359.","key":"e_1_3_2_1_8_1","DOI":"10.1007\/978-3-642-21557-5_37"},{"unstructured":"Google Cloud Platform: CLOUD AI. https:\/\/cloud. google.com\/products\/machine-learning  Google Cloud Platform: CLOUD AI. https:\/\/cloud. google.com\/products\/machine-learning","key":"e_1_3_2_1_9_1"},{"unstructured":"Amazon machine learning. https:\/\/aws.amazon.com\/ machine-learning\/  Amazon machine learning. https:\/\/aws.amazon.com\/ machine-learning\/","key":"e_1_3_2_1_10_1"},{"unstructured":"BigML. https:\/\/bigml.com.  BigML. https:\/\/bigml.com.","key":"e_1_3_2_1_11_1"},{"volume-title":"International Conference on Learning Representations.","year":"2017","author":"Papernot Nicolas","key":"e_1_3_2_1_12_1"},{"unstructured":"T. Hunt C. Song R. Shokri V. Shmatikov and E. Witchel. 2018. \"Chiron: Privacy-preserving Machine Learning as a Service \" arXiv:1803.05961.  T. Hunt C. Song R. Shokri V. Shmatikov and E. Witchel. 2018. \"Chiron: Privacy-preserving Machine Learning as a Service \" arXiv:1803.05961.","key":"e_1_3_2_1_13_1"},{"unstructured":"Olga Ohrimenko Felix Schuster Cedric Fournet Aastha Mehta Sebastian Nowozin Kapil Vaswani and Manuel Costa. 2016. Oblivious multi-party machine learning on trusted processors.  Olga Ohrimenko Felix Schuster Cedric Fournet Aastha Mehta Sebastian Nowozin Kapil Vaswani and Manuel Costa. 2016. Oblivious multi-party machine learning on trusted processors.","key":"e_1_3_2_1_14_1"},{"unstructured":"Florian Tram\u00e8r Alexey Kurakin Nicolas Papernot Dan Boneh and Patrick McDaniel. 2017. Ensemble adversarial training: Attacks and defenses. arXiv preprint arXiv:1705.07204.  Florian Tram\u00e8r Alexey Kurakin Nicolas Papernot Dan Boneh and Patrick McDaniel. 2017. Ensemble adversarial training: Attacks and defenses. arXiv preprint arXiv:1705.07204.","key":"e_1_3_2_1_15_1"},{"unstructured":"C. Szegedy W. Zaremba I. Sutskever J. Bruna D. Erhan I. Goodfellow and R. Fergus. 2016. \"Intriguing Properties of Neural Networks\". https:\/\/arxiv.org\/pdf\/1312.6199v4.pdf  C. Szegedy W. Zaremba I. Sutskever J. Bruna D. Erhan I. Goodfellow and R. Fergus. 2016. \"Intriguing Properties of Neural Networks\". https:\/\/arxiv.org\/pdf\/1312.6199v4.pdf","key":"e_1_3_2_1_16_1"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_17_1","DOI":"10.1145\/2976749.2978392"},{"doi-asserted-by":"crossref","unstructured":"Grosse K Papernot N and Manoharan P et al. 2016. Adversarial Perturbations Against Deep Neural Networks for Malware Classification.  Grosse K Papernot N and Manoharan P et al. 2016. Adversarial Perturbations Against Deep Neural Networks for Malware Classification.","key":"e_1_3_2_1_18_1","DOI":"10.1109\/SP.2016.41"},{"doi-asserted-by":"crossref","unstructured":"Pranav Rajpurkar Jian Zhang Konstantin Lopyrev and Percy Liang. 2016. Squad: 100 000+ questions for machine comprehension of text. arXiv preprint arXiv:1606.05250.  Pranav Rajpurkar Jian Zhang Konstantin Lopyrev and Percy Liang. 2016. Squad: 100 000+ questions for machine comprehension of text. arXiv preprint arXiv:1606.05250.","key":"e_1_3_2_1_19_1","DOI":"10.18653\/v1\/D16-1264"},{"unstructured":"I. Evtimov et al. 2017. \"Robust Physical-World Attacks on Deep Learning Models \" arXiv:1707.08945 {cs}.  I. Evtimov et al. 2017. \"Robust Physical-World Attacks on Deep Learning Models \" arXiv:1707.08945 {cs}.","key":"e_1_3_2_1_20_1"},{"unstructured":"C. Szegedy W. Zaremba I. Sutskever J. Bruna D. Erhan I. Goodfellow and R. Fergus. 2013. \"Intriguing properties of neural networks \" arXiv \u2028preprint arXiv:1312.6199.  C. Szegedy W. Zaremba I. Sutskever J. Bruna D. Erhan I. Goodfellow and R. Fergus. 2013. \"Intriguing properties of neural networks \" arXiv \u2028preprint arXiv:1312.6199.","key":"e_1_3_2_1_21_1"},{"unstructured":"I.J.Goodfellow J.Shlens andC.Szegedy. 2014. \"Explaining and harnessing adversarial examples \" arXiv preprint arXiv:1412.6572.  I.J.Goodfellow J.Shlens andC.Szegedy. 2014. \"Explaining and harnessing adversarial examples \" arXiv preprint arXiv:1412.6572.","key":"e_1_3_2_1_22_1"},{"volume-title":"Proceedings of 5th International Conference on Learning Representations (ICLR).","author":"Metzen J. H.","key":"e_1_3_2_1_23_1"},{"doi-asserted-by":"crossref","unstructured":"J. Lu T. Issaranon and D. Forsyth. 2017. \"Safetynet: Detecting and rejecting \u2028adversarial examples robustly \" ICCV.  J. Lu T. Issaranon and D. Forsyth. 2017. \"Safetynet: Detecting and rejecting \u2028adversarial examples robustly \" ICCV.","key":"e_1_3_2_1_24_1","DOI":"10.1109\/ICCV.2017.56"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_25_1","DOI":"10.1109\/SP.2016.41"},{"unstructured":"Ruitong Huang Bing Xu Dale Schuurmans and Csaba Szepesv\u00e1ri. 2015. Learning with a strong adversary. arXiv preprint arXiv:1511.03034.  Ruitong Huang Bing Xu Dale Schuurmans and Csaba Szepesv\u00e1ri. 2015. Learning with a strong adversary. arXiv preprint arXiv:1511.03034.","key":"e_1_3_2_1_26_1"},{"unstructured":"Tramer et al. 2016. \"Stealing ML models via prediction APIs\" UsenixSEC'16. https:\/\/arxiv.org\/abs\/1609.02943   Tramer et al. 2016. \"Stealing ML models via prediction APIs\" UsenixSEC'16. https:\/\/arxiv.org\/abs\/1609.02943","key":"e_1_3_2_1_27_1"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_28_1","DOI":"10.1145\/3052973.3053009"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_29_1","DOI":"10.1109\/SP.2017.41"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_30_1","DOI":"10.1145\/2976749.2978318"},{"volume-title":"2016 IEEE European Symposium on. IEEE, 372--387","author":"Papernot N.","key":"e_1_3_2_1_31_1"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_32_1","DOI":"10.1145\/2810103.2813677"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_33_1","DOI":"10.1109\/TIFS.2012.2190726"},{"volume-title":"Proceedings of the 33rd International Conference on Machine Learning","author":"Gilad-Bachrach","key":"e_1_3_2_1_34_1"},{"unstructured":"Xie P Bilenko M and Finley T et al. 2014. Crypto-nets: Neural networks over encrypted data{DB\/OL}. {2017-09-01}. https:\/\/arxiv.org\/pdf\/1412.6181  Xie P Bilenko M and Finley T et al. 2014. Crypto-nets: Neural networks over encrypted data{DB\/OL}. {2017-09-01}. https:\/\/arxiv.org\/pdf\/1412.6181","key":"e_1_3_2_1_35_1"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_36_1","DOI":"10.1145\/2660267.2660348"},{"unstructured":"https:\/\/www.anquanke.com\/post\/id\/98300  https:\/\/www.anquanke.com\/post\/id\/98300","key":"e_1_3_2_1_37_1"},{"unstructured":"T. Gu B. Dolan-Gavitt and S. Garg Badnets: Identifying vulnerabilities in the machine learning model supply chain  T. Gu B. Dolan-Gavitt and S. Garg Badnets: Identifying vulnerabilities in the machine learning model supply chain","key":"e_1_3_2_1_38_1"},{"unstructured":"X Chen C Liu B Li K Lu and D Song. 2017. \"Targeted Backdoor Attacks on Deep Learning Systems Using Data Poisoning\". In: arXiv preprint:1712.05526.  X Chen C Liu B Li K Lu and D Song. 2017. \"Targeted Backdoor Attacks on Deep Learning Systems Using Data Poisoning\". In: arXiv preprint:1712.05526.","key":"e_1_3_2_1_39_1"},{"doi-asserted-by":"publisher","key":"e_1_3_2_1_40_1","DOI":"10.1145\/3128572.3140444"}],"event":{"acronym":"CSAE '18","name":"CSAE '18: The 2nd International Conference on Computer Science and Application Engineering","location":"Hohhot China"},"container-title":["Proceedings of the 2nd International Conference on Computer Science and Application Engineering"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3207677.3277988","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3207677.3277988","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,18]],"date-time":"2025-06-18T17:49:10Z","timestamp":1750268950000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3207677.3277988"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,10,22]]},"references-count":40,"alternative-id":["10.1145\/3207677.3277988","10.1145\/3207677"],"URL":"https:\/\/doi.org\/10.1145\/3207677.3277988","relation":{},"subject":[],"published":{"date-parts":[[2018,10,22]]},"assertion":[{"value":"2018-10-22","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}