{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,3]],"date-time":"2025-09-03T11:00:27Z","timestamp":1756897227070,"version":"3.28.0"},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,9,7]],"date-time":"2021-09-07T00:00:00Z","timestamp":1630972800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,9,7]],"date-time":"2021-09-07T00:00:00Z","timestamp":1630972800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,9,7]],"date-time":"2021-09-07T00:00:00Z","timestamp":1630972800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,9,7]]},"DOI":"10.1109\/isc253183.2021.9562816","type":"proceedings-article","created":{"date-parts":[[2021,10,16]],"date-time":"2021-10-16T10:05:05Z","timestamp":1634378705000},"page":"1-7","source":"Crossref","is-referenced-by-count":5,"title":["Jangseung: A Guardian for Machine Learning Algorithms to Protect Against Poisoning Attacks"],"prefix":"10.1109","author":[{"given":"Shaya","family":"Wolf","sequence":"first","affiliation":[]},{"given":"Woodrow","family":"Gamboa","sequence":"additional","affiliation":[]},{"given":"Mike","family":"Borowczak","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"journal-title":"UCI Machine Learning Repository","year":"2017","author":"dua","key":"ref39"},{"journal-title":"MNIST Handwritten Digit Database","year":"2010","author":"lecun","key":"ref38"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1145\/3316781.3323470"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/IVS.2018.8500421"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2020.2975048"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TDSC.2020.3037500"},{"key":"ref37","first-page":"118","article-title":"Machine learning with adversaries: Byzantine tolerant gradient descent","author":"blanchard","year":"0","journal-title":"Proceedings of the 31st International Conference on Neural Information Processing Systems"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1145\/3377454"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TIT.2017.2736066"},{"key":"ref34","first-page":"2","article-title":"Mlbase: A distributed machine-learning system","volume":"1","author":"kraska","year":"2013","journal-title":"CIDR"},{"key":"ref10","article-title":"Generative adversarial trainer: Defense to adversarial perturbations with gan","author":"lee","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2018.04.027"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2016.41"},{"key":"ref13","article-title":"Towards robust deep neural networks with bang","author":"rozsa","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref14","article-title":"Ensemble adversarial training: Attacks and defenses","author":"tram\u00e8r","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00465"},{"key":"ref16","article-title":"Univer-sal adversarial perturbations","author":"moosavi-dezfooli","year":"0","journal-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)"},{"key":"ref17","article-title":"Analysis of universal adversarial perturbations","author":"moosavi-dezfooli","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00357"},{"key":"ref19","article-title":"Adversarial perturbations against deep neural networks for malware classification","author":"grosse","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref28","article-title":"Understanding and enhancing the transferability of adversarial examples","author":"wu","year":"2018","journal-title":"ArXiv Preprint"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/PerComWorkshops48775.2020.9156127"},{"key":"ref27","article-title":"Blocking transferability of adversarial examples in black-box learning systems","author":"hosseini","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref3","article-title":"Toward trustworthy ai development: mechanisms for supporting verifiable claims","author":"brundage","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref6","article-title":"Simple black-box adversarial perturbations for deep networks","author":"narodytska","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1016\/j.jisa.2020.102717"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/2046684.2046692"},{"key":"ref8","first-page":"27","article-title":"Towards poisoning of deep learning algorithms with back-gradient optimization","author":"mu\u00f1oz-gonz\u00e1lez","year":"0","journal-title":"Proceedings of the 10th ACM Workshop on Artificial Intelligence and Security"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3219617.3219655"},{"key":"ref2","first-page":"2017","article-title":"Challenges for transparency","volume":"29","author":"weller","year":"2017","journal-title":"ArXiv Preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3134600.3134635"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s13347-017-0293-z"},{"key":"ref20","first-page":"97","article-title":"Support vector machines under adversarial label noise","author":"biggio","year":"0","journal-title":"Asian Conference on Machine Learning"},{"key":"ref22","article-title":"Data poisoning attacks against autoregressive models","volume":"30","author":"alfeld","year":"0","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence"},{"journal-title":"ar Xiv preprint","article-title":"Poisoning attacks against support vector machines","year":"2012","key":"ref21"},{"key":"ref24","article-title":"Transferability in machine learning: from phenomena to black-box attacks using adversarial samples","author":"papernot","year":"2016","journal-title":"arXiv preprinr"},{"key":"ref23","first-page":"1689","article-title":"Is feature selection secure against training data poisoning?","author":"xiao","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref26","article-title":"Adversarial machine learning at scale","author":"kurakin","year":"2016","journal-title":"ArXiv Preprint"},{"key":"ref25","article-title":"The space of transferable adversarial examples","author":"tram\u00e8r","year":"2017","journal-title":"ArXiv Preprint"}],"event":{"name":"2021 IEEE International Smart Cities Conference (ISC2)","start":{"date-parts":[[2021,9,7]]},"location":"Manchester, United Kingdom","end":{"date-parts":[[2021,9,10]]}},"container-title":["2021 IEEE International Smart Cities Conference (ISC2)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9562741\/9562768\/09562816.pdf?arnumber=9562816","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,10]],"date-time":"2022-05-10T15:47:23Z","timestamp":1652197643000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9562816\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,9,7]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/isc253183.2021.9562816","relation":{},"subject":[],"published":{"date-parts":[[2021,9,7]]}}}