{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,5]],"date-time":"2025-11-05T18:43:09Z","timestamp":1762368189456,"version":"build-2065373602"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,8,31]],"date-time":"2025-08-31T00:00:00Z","timestamp":1756598400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,8,31]],"date-time":"2025-08-31T00:00:00Z","timestamp":1756598400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,8,31]]},"DOI":"10.1109\/mlsp62443.2025.11204331","type":"proceedings-article","created":{"date-parts":[[2025,10,24]],"date-time":"2025-10-24T17:15:52Z","timestamp":1761326152000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["Demem: Privacy-Enhanced Robust Adversarial Learning via De-Memorization"],"prefix":"10.1109","author":[{"given":"Xiaoyu","family":"Luo","sequence":"first","affiliation":[{"name":"Aalborg University,Department of Electronic Systems,Copenhagen,Denmark"}]},{"given":"Qiongxiu","family":"Li","sequence":"additional","affiliation":[{"name":"Aalborg University,Department of Electronic Systems,Copenhagen,Denmark"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2022.3218715"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/EuroSP51992.2021.00028"},{"journal-title":"Adbm: Adversarial diffusion bridge model for reliable adversarial purification","year":"2024","author":"Li","key":"ref3"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02331"},{"journal-title":"Faster-gcg: Efficient discrete optimization jailbreak attacks against aligned large language models","year":"2024","author":"Li","key":"ref5"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3319535.3354211"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIFS.2024.3381477"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/SP.2017.41"},{"journal-title":"Robustness threats of differential privacy","year":"2020","author":"Tursynbek","key":"ref9"},{"journal-title":"Learning to be adversarially robust and differentially private","year":"2022","author":"Hayes","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/2976749.2978318"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3357713.3384290"},{"key":"ref13","first-page":"2881","article-title":"What neural networks memorize and why: Discovering the long tail via influence estimation","volume":"33","author":"Feldman","year":"2020","journal-title":"Advances in Neural Information Processing Systems"},{"journal-title":"Towards deep learning models resistant to adversarial attacks","year":"2017","author":"Madry","key":"ref14"},{"key":"ref15","first-page":"7472","article-title":"Theoretically principled trade-off between robustness and accuracy","volume-title":"International conference on machine learning","author":"Zhang","year":"2019"},{"journal-title":"Explaining and harnessing adversarial examples","year":"2014","author":"Goodfellow","key":"ref16"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2018.00175"},{"key":"ref18","first-page":"2958","article-title":"Adversarial weight perturbation helps robust generalization","volume":"33","author":"Wu","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/11787006_1"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/icassp.2019.8682523"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/SP46214.2022.9833649"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2019.23119"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CSF.2018.00027"},{"key":"ref24","first-page":"2615","article-title":"Systematic evaluation of privacy risks of machine learning models","volume-title":"30th USENIX Security Symposium (USENIX Security 21)","author":"Song","year":"2021"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00780"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/422"},{"key":"ref27","first-page":"8024","article-title":"Pytorch: An imperative style, highperformance deep learning library","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst. (NeurIPS)"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2016.90"},{"journal-title":"Opacus: User-friendly differential privacy library in pytorch","year":"2021","key":"ref29"}],"event":{"name":"2025 IEEE 35th International Workshop on Machine Learning for Signal Processing (MLSP)","start":{"date-parts":[[2025,8,31]]},"location":"Istanbul, Turkiye","end":{"date-parts":[[2025,9,3]]}},"container-title":["2025 IEEE 35th International Workshop on Machine Learning for Signal Processing (MLSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11204201\/11204202\/11204331.pdf?arnumber=11204331","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,5]],"date-time":"2025-11-05T18:37:36Z","timestamp":1762367856000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11204331\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,31]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/mlsp62443.2025.11204331","relation":{},"subject":[],"published":{"date-parts":[[2025,8,31]]}}}