{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T15:38:13Z","timestamp":1730302693377,"version":"3.28.0"},"reference-count":18,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,10,22]],"date-time":"2022-10-22T00:00:00Z","timestamp":1666396800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,10,22]],"date-time":"2022-10-22T00:00:00Z","timestamp":1666396800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,10,22]]},"DOI":"10.1109\/uv56588.2022.10185524","type":"proceedings-article","created":{"date-parts":[[2023,7,26]],"date-time":"2023-07-26T17:58:12Z","timestamp":1690394292000},"page":"1-5","source":"Crossref","is-referenced-by-count":1,"title":["MSDT: Masked Language Model Scoring Defense in Text Domain"],"prefix":"10.1109","author":[{"given":"Jaechul","family":"Roh","sequence":"first","affiliation":[{"name":"The Hong Kong University of Science and Technology,Department of Electronic and Computer Engineering,Hong Kong,China"}]},{"given":"Minhao","family":"Cheng","sequence":"additional","affiliation":[{"name":"The Hong Kong University of Science and Technology,Department of Computer Science and Engineering,Hong Kong,China"}]},{"given":"Yajun","family":"Fang","sequence":"additional","affiliation":[{"name":"Universal Village Society,Cambridge,USA"}]}],"member":"263","reference":[{"key":"ref13","article-title":"Onion: A simple and effective defense against textual backdoor attacks","author":"qi","year":"2020","journal-title":"arXiv preprint arXiv 2011 10068"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6311"},{"key":"ref15","article-title":"Masked language model scoring","author":"salazar","year":"2019","journal-title":"arXiv preprint arXiv 1910 14192"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2021.04.105"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.37"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.249"},{"key":"ref2","article-title":"Badnets: Identifying vulnerabilities in the machine learning model supply chain","author":"gu","year":"2017","journal-title":"arXiv preprint arXiv 1708 06578"},{"key":"ref1","article-title":"Explaining and harnessing adversarial examples","author":"goodfellow","year":"2014","journal-title":"arXiv preprint arXiv 1412 6572"},{"key":"ref17","first-page":"1631","article-title":"Recursive deep models for semantic compositionality over a sentiment treebank","author":"socher","year":"2013","journal-title":"Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing"},{"key":"ref16","article-title":"Badnl: Backdoor attacks against nlp models","author":"chen","year":"2021","journal-title":"ICML 2021 Workshop on Adversarial Machine Learning"},{"key":"ref18","article-title":"Character-level convolutional networks for text classification","volume":"28","author":"zhang","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/SPW.2018.00009"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-5446"},{"key":"ref9","article-title":"Deep speech: Scaling up end-to-end speech recognition","author":"hannun","year":"2014","journal-title":"arXiv preprint arXiv 1412 5567"},{"key":"ref4","article-title":"Badpre: Task-agnostic backdoor attacks to pre-trained nlp foundation models","author":"chen","year":"2021","journal-title":"arXiv preprint arXiv 2110 07058"},{"key":"ref3","article-title":"What do deep nets learn? class-wise patterns revealed in the input space","author":"zhao","year":"2021","journal-title":"arXiv preprint arXiv 2101 06286"},{"key":"ref6","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"radford","year":"2019","journal-title":"OpenAIRE blog"},{"key":"ref5","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"2018","journal-title":"arXiv preprint arXiv 1810 04805"}],"event":{"name":"2022 6th International Conference on Universal Village (UV)","start":{"date-parts":[[2022,10,22]]},"location":"Boston, MA, USA","end":{"date-parts":[[2022,10,25]]}},"container-title":["2022 6th International Conference on Universal Village (UV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10185200\/10185442\/10185524.pdf?arnumber=10185524","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,14]],"date-time":"2023-08-14T17:34:46Z","timestamp":1692034486000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10185524\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,10,22]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/uv56588.2022.10185524","relation":{},"subject":[],"published":{"date-parts":[[2022,10,22]]}}}