{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T05:25:18Z","timestamp":1730265918771,"version":"3.28.0"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,7,1]],"date-time":"2019-07-01T00:00:00Z","timestamp":1561939200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,7,1]],"date-time":"2019-07-01T00:00:00Z","timestamp":1561939200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,7,1]],"date-time":"2019-07-01T00:00:00Z","timestamp":1561939200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,7]]},"DOI":"10.1109\/ijcnn.2019.8852393","type":"proceedings-article","created":{"date-parts":[[2019,10,1]],"date-time":"2019-10-01T03:44:32Z","timestamp":1569901472000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Using Local Convolutional Units to Defend Against Adversarial Examples"],"prefix":"10.1109","author":[{"given":"Matej","family":"Kocian","sequence":"first","affiliation":[]},{"given":"Martin","family":"Pilat","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"The limitations of deep learning in adversarial settings","volume":"abs 1511 7528","author":"papernot","year":"2015","journal-title":"CoRR"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/2955129.2955178"},{"key":"ref12","article-title":"One pixel attack for fooling deep neural networks","author":"su","year":"2017","journal-title":"ArXiv e-prints"},{"key":"ref13","article-title":"Practical black-box attacks against deep learning systems using adversarial examples","volume":"abs 1602 2697","author":"papernot","year":"2016","journal-title":"CoRR"},{"key":"ref14","article-title":"The limitations of deep learning in adversarial settings","volume":"abs 1511 7528","author":"papernot","year":"2015","journal-title":"CoRR"},{"article-title":"Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples","year":"2018","author":"athalye","key":"ref15"},{"article-title":"Towards deep learning models resistant to adversarial attacks","year":"2017","author":"madry","key":"ref16"},{"key":"ref17","article-title":"Towards deep neural network architectures robust to adversarial examples","volume":"abs 1412 5068","author":"gu","year":"2014","journal-title":"CoRR"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/5.726791"},{"key":"ref19","article-title":"Deep-rbf networks revisited: Robust classification with rejection","volume":"abs 1812 3190","author":"zadeh","year":"2018","journal-title":"CoRR"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"ref3","first-page":"1097","article-title":"Imagenet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Advances in Neural Information Processing Systems 25"},{"key":"ref6","article-title":"Imagenet large scale visual recognition challenge","volume":"abs 1409 575","author":"russakovsky","year":"2014","journal-title":"CoRR"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978392"},{"key":"ref7","article-title":"Intriguing properties of neural networks","volume":"abs 1312 6199","author":"szegedy","year":"2013","journal-title":"CoRR"},{"key":"ref2","article-title":"Explaining and harnessing adversarial examples","volume":"abs 1412 6572","author":"goodfellow","year":"2014","journal-title":"CoRR"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref9","article-title":"Adversarial examples in the physical world","volume":"abs 1607 2533","author":"kurakin","year":"2016","journal-title":"CoRR"},{"key":"ref20","article-title":"Adversarial examples in machine learning","author":"koci\u00e1n","year":"2018","journal-title":"Master&#x2019;s thesis"},{"article-title":"Learning multiple layers of features from tiny images","year":"2009","author":"krizhevsky","key":"ref22"},{"key":"ref21","article-title":"Mnist handwritten digit database","volume":"2","author":"lecun","year":"2010","journal-title":"AT&T Labs"},{"article-title":"Lets keep it simple, using simple architectures to outperform deeper and more complex architectures","year":"2016","author":"hasanpour","key":"ref23"}],"event":{"name":"2019 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2019,7,14]]},"location":"Budapest, Hungary","end":{"date-parts":[[2019,7,19]]}},"container-title":["2019 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8840768\/8851681\/08852393.pdf?arnumber=8852393","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,7,17]],"date-time":"2022-07-17T21:48:46Z","timestamp":1658094526000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8852393\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,7]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/ijcnn.2019.8852393","relation":{},"subject":[],"published":{"date-parts":[[2019,7]]}}}