{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,8]],"date-time":"2026-03-08T01:38:01Z","timestamp":1772933881395,"version":"3.50.1"},"reference-count":18,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T00:00:00Z","timestamp":1765152000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T00:00:00Z","timestamp":1765152000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"NSF","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,8]]},"DOI":"10.1109\/bigdata66926.2025.11401655","type":"proceedings-article","created":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T20:57:57Z","timestamp":1772830677000},"page":"1514-1521","source":"Crossref","is-referenced-by-count":0,"title":["Leveraging Pre-Trained Language Models for Realistic Adversarial Attacks"],"prefix":"10.1109","author":[{"given":"Nuzaer","family":"Omar","sequence":"first","affiliation":[{"name":"Missouri University of Science and Technology,Department of Computer Science"}]},{"given":"Ademola","family":"Adesokan","sequence":"additional","affiliation":[{"name":"University of Central Arkansas,Department of Computer Science and Engineering"}]},{"given":"Sanjay","family":"Madria","sequence":"additional","affiliation":[{"name":"Missouri University of Science and Technology,Department of Computer Science"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"crossref","DOI":"10.1609\/aaai.v34i05.6311","article-title":"Is BERT Really Robust? A Strong Baseline for Natural Language Attack on Text Classification and Entailment","volume-title":"arXiv preprint","author":"Jin","year":"2020"},{"key":"ref2","first-page":"6193","article-title":"BERT-ATTACK: Adversarial Attack Against BERT Using BERT","author":"Li","year":"2020","journal-title":"In Proc. EMNLP 2020"},{"key":"ref3","first-page":"2953","article-title":"Perturbations in the Wild: Leveraging Human-Written Text Perturbations for Realistic Adversarial Attack and Defense","author":"Le","year":"2022","journal-title":"In Findings of ACL 2022"},{"key":"ref4","first-page":"7664","article-title":"Character-level White-Box Adversarial Attacks against Transformers via Attachable Subwords Substitution","author":"Liu","year":"2022","journal-title":"In Proc. EMNLP 2022"},{"key":"ref5","article-title":"Exploring the Adversarial Capabilities of Large Language Models","volume-title":"arXiv preprint","author":"Struppek","year":"2024"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2022.119170"},{"key":"ref7","article-title":"Exploiting Class Probabilities for Blackbox Sentence-level Attacks","author":"Moraffah","year":"2024"},{"key":"ref8","article-title":"BERTScore: Evaluating Text Generation with BERT","volume-title":"arXiv preprint","author":"Zhang","year":"2020"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-acl.461"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.14722\/ndss.2019.23138"},{"key":"ref11","first-page":"1634","article-title":"Text Processing Like Humans Do: Visually Attacking and Shielding NLP Systems","volume-title":"In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)","author":"Eger","year":"2019"},{"key":"ref12","article-title":"BERT: Pretraining of Deep Bidirectional Transformers for Language Understanding","author":"Devlin","year":"2019"},{"key":"ref13","article-title":"BERTopic: Neural topic modeling with a class-based TF-IDF procedure","volume-title":"arXiv preprint","author":"Grootendorst","year":"2022"},{"key":"ref14","article-title":"Explaining and Harnessing Adversarial Examples","volume-title":"arXiv preprint","author":"Goodfellow","year":"2015"},{"key":"ref15","article-title":"Adversarial examples in the physical world","volume-title":"arXiv preprint","author":"Kurakin","year":"2017"},{"key":"ref16","article-title":"Adversarial Attacks and Defences: A Survey","volume-title":"arXiv preprint","author":"Chakraborty","year":"2018"},{"key":"ref17","first-page":"1875","article-title":"Adversarial Example Generation with Syntactically Controlled Paraphrase Networks","volume-title":"In Proc. NAACL 2018","volume":"1","author":"Iyyer","year":"2018"},{"key":"ref18","first-page":"6174","article-title":"Garg & Ramakrishnan. BAE: BERT-based Adversarial Examples for Text Classification","year":"2020","journal-title":"In Proc. EMNLP"}],"event":{"name":"2025 IEEE International Conference on Big Data (BigData)","location":"Macau, China","start":{"date-parts":[[2025,12,8]]},"end":{"date-parts":[[2025,12,11]]}},"container-title":["2025 IEEE International Conference on Big Data (BigData)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11400704\/11400712\/11401655.pdf?arnumber=11401655","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T06:54:14Z","timestamp":1772866454000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11401655\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,8]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/bigdata66926.2025.11401655","relation":{},"subject":[],"published":{"date-parts":[[2025,12,8]]}}}