{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,6]],"date-time":"2024-09-06T02:09:45Z","timestamp":1725588585158},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,10,1]],"date-time":"2020-10-01T00:00:00Z","timestamp":1601510400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,10,1]],"date-time":"2020-10-01T00:00:00Z","timestamp":1601510400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,10,1]],"date-time":"2020-10-01T00:00:00Z","timestamp":1601510400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,10]]},"DOI":"10.1109\/sips50750.2020.9195260","type":"proceedings-article","created":{"date-parts":[[2020,9,23]],"date-time":"2020-09-23T16:55:00Z","timestamp":1600880100000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["Fast and Efficient Decision-Based Attack for Deep Neural Network on Edge"],"prefix":"10.1109","author":[{"given":"Himanshu","family":"Jain","sequence":"first","affiliation":[]},{"given":"Sakshi","family":"Rathore","sequence":"additional","affiliation":[]},{"given":"T. P. Abdul","family":"Rahoof","sequence":"additional","affiliation":[]},{"given":"Vivek","family":"Chaturvedi","sequence":"additional","affiliation":[]},{"given":"Satyajit","family":"Das","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00526"},{"article-title":"Prior convictions: Black-box adversarial attacks with bandits and priors","year":"2018","author":"ilyas","key":"ref11"},{"article-title":"Red-attack: Resource efficient decision based attack for machine learning","year":"2019","author":"khalid","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IOLTS.2019.8854425"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.23919\/DATE.2019.8715141"},{"article-title":"Adversarial examples in the physical world","year":"2016","author":"kurakin","key":"ref15"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ISVLSI.2018.00093"},{"article-title":"Towards deep learning models resistant to adversarial attacks","year":"2017","author":"madry","key":"ref17"},{"key":"ref18","first-page":"2574","article-title":"Deepfool: a simple and accurate method to fool deep neural networks","author":"moosavi-dezfooli","year":"2016","journal-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition"},{"article-title":"Transferability in machine learning: from phenomena to black-box attacks using adversarial samples","year":"2016","author":"papernot","key":"ref19"},{"article-title":"Decision-based adversarial attacks: Reliable attacks against black-box machine learning models","year":"2017","author":"brendel","key":"ref4"},{"article-title":"Potrojan: powerful neural-level trojan designs in deep learning models","year":"2018","author":"zou","key":"ref27"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2018.07.023"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3128572.3140448"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"article-title":"Query-efficient meta attack to deep neural networks","year":"2019","author":"du","key":"ref8"},{"key":"ref7","first-page":"10932","article-title":"Improving black-box adversarial attacks with a transfer-based prior","author":"cheng","year":"2019","journal-title":"Advances in neural information processing systems"},{"article-title":"Adversarial transformation networks: Learning to generate adversarial examples","year":"2017","author":"baluja","key":"ref2"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2807385"},{"article-title":"Explaining and harnessing adversarial examples","year":"2014","author":"goodfellow","key":"ref9"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP.2016.36"},{"key":"ref22","first-page":"8312","article-title":"Constructing unrestricted adversarial examples with generative models","author":"song","year":"2018","journal-title":"Advances in neural information processing systems"},{"key":"ref21","article-title":"Bayesopt adversarial attack","author":"ru","year":"2020","journal-title":"International Conference on Learning Representations"},{"article-title":"Intriguing properties of neural networks","year":"2013","author":"szegedy","key":"ref24"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2012.02.016"},{"article-title":"Skip connections matter: On the transferability of adversarial examples generated with resnets","year":"2020","author":"wu","key":"ref26"},{"article-title":"Adversarial attack type i: Cheat classifiers by significant changes","year":"2018","author":"tang","key":"ref25"}],"event":{"name":"2020 IEEE Workshop on Signal Processing Systems (SiPS)","start":{"date-parts":[[2020,10,20]]},"location":"Coimbra, Portugal","end":{"date-parts":[[2020,10,22]]}},"container-title":["2020 IEEE Workshop on Signal Processing Systems (SiPS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9187068\/9195186\/09195260.pdf?arnumber=9195260","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,30]],"date-time":"2022-06-30T11:17:28Z","timestamp":1656587848000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9195260\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,10]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/sips50750.2020.9195260","relation":{},"subject":[],"published":{"date-parts":[[2020,10]]}}}