{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T10:26:38Z","timestamp":1763202398051,"version":"3.28.0"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,7,1]],"date-time":"2020-07-01T00:00:00Z","timestamp":1593561600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,7]]},"DOI":"10.1109\/icme46284.2020.9102886","type":"proceedings-article","created":{"date-parts":[[2020,6,9]],"date-time":"2020-06-09T21:40:07Z","timestamp":1591738807000},"page":"1-6","source":"Crossref","is-referenced-by-count":43,"title":["Universal Adversarial Perturbations Generative Network For Speaker Recognition"],"prefix":"10.1109","author":[{"given":"Jiguo","family":"Li","sequence":"first","affiliation":[]},{"given":"Xinfeng","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Chuanmin","family":"Jia","sequence":"additional","affiliation":[]},{"given":"Jizheng","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Li","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Yue","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Siwei","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Wen","family":"Gao","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Crafting adversarial examples for speech paralinguistics applications","author":"gong","year":"2018","journal-title":"DYNAMICS Workshop"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462693"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1990.115550"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1995.479543"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639585"},{"key":"ref15","article-title":"Darpa timit acoustic-phonetic continous speech corpus cd-rom. nist speech disc 1-1.1","volume":"93","author":"garofolo","year":"1993","journal-title":"NASA STI\/Recon Technical Report N"},{"key":"ref16","first-page":"5206","article-title":"Librispeech: an asr corpus based on public domain audio books","author":"panayotov","year":"2015","journal-title":"ICASSP"},{"key":"ref17","first-page":"1204","article-title":"Guap: Generic universal adversarial perturbation that fools rpn-based detectors","volume":"101","author":"wu","year":"2019","journal-title":"Machine Learning Research"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1353"},{"key":"ref19","first-page":"448","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","author":"ioffe","year":"2015","journal-title":"ICML"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.17"},{"key":"ref3","article-title":"Explaining and harnessing adversarial examples","author":"goodfellow","year":"2015","journal-title":"ICLRE"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.49"},{"key":"ref5","article-title":"Intriguing properties of neural networks","author":"szegedy","year":"2014","journal-title":"ICLRE"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00015"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TEVC.2019.2890858"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref1","first-page":"1097","article-title":"Imagenet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"Advances in Neural IInformation Processing Systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/BTAS.2016.7791200"},{"key":"ref20","first-page":"807","article-title":"Rectified linear units improve restricted boltzmann machines","author":"nair","year":"2010","journal-title":"ICML"},{"journal-title":"TTU-T Draft Recommendation P 862","article-title":"Perceptual evaluation of speech quality (pesq), an objective method for end-to-end speech quality assessment of narrowband telephone networks and speech codecs","year":"2000","key":"ref22"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2001.941023"}],"event":{"name":"2020 IEEE International Conference on Multimedia and Expo (ICME)","start":{"date-parts":[[2020,7,6]]},"location":"London, United Kingdom","end":{"date-parts":[[2020,7,10]]}},"container-title":["2020 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9099125\/9102711\/09102886.pdf?arnumber=9102886","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T00:26:49Z","timestamp":1656376009000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9102886\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,7]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/icme46284.2020.9102886","relation":{},"subject":[],"published":{"date-parts":[[2020,7]]}}}