{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T00:46:26Z","timestamp":1774399586630,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":17,"publisher":"ACM","license":[{"start":{"date-parts":[[2022,8,23]],"date-time":"2022-08-23T00:00:00Z","timestamp":1661212800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"DOI":"10.13039\/501100001659","name":"Deutsche Forschungsgemeinschaft","doi-asserted-by":"publisher","award":["AN 996\/1-1"],"award-info":[{"award-number":["AN 996\/1-1"]}],"id":[{"id":"10.13039\/501100001659","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2022,8,23]]},"DOI":"10.1145\/3539813.3545122","type":"proceedings-article","created":{"date-parts":[[2022,8,25]],"date-time":"2022-08-25T22:18:32Z","timestamp":1661465912000},"page":"115-120","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":15,"title":["BERT Rankers are Brittle"],"prefix":"10.1145","author":[{"given":"Yumeng","family":"Wang","sequence":"first","affiliation":[{"name":"L3S Research Center, Hannover, Germany"}]},{"given":"Lijun","family":"Lyu","sequence":"additional","affiliation":[{"name":"L3S Research Center, Hannover, Germany"}]},{"given":"Avishek","family":"Anand","sequence":"additional","affiliation":[{"name":"Delft University of Technology, Delft, Holland"}]}],"member":"320","published-online":{"date-parts":[[2022,8,25]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Hotflip: White-box adversarial examples for text classification. arXiv preprint arXiv:1712.06751","author":"Ebrahimi Javid","year":"2017","unstructured":"Javid Ebrahimi, Anyi Rao, Daniel Lowd, and Dejing Dou. 2017. Hotflip: White-box adversarial examples for text classification. arXiv preprint arXiv:1712.06751 (2017)."},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1145\/3209978.3210012"},{"key":"e_1_3_2_1_3_1","volume-title":"Adversarial Attacks and Defense on Texts: A Survey. arxiv","author":"Huq Aminul","year":"2005","unstructured":"Aminul Huq and Mst. Tasnim Pervin. 2020. Adversarial Attacks and Defense on Texts: A Survey. arxiv: 2005.14108 [cs.CL]"},{"key":"e_1_3_2_1_4_1","volume-title":"Joey Tianyi Zhou, and Peter Szolovits","author":"Jin Di","year":"2019","unstructured":"Di Jin, Zhijing Jin, Joey Tianyi Zhou, and Peter Szolovits. 2019. Is bert really robust? natural language attack on text classification and entailment. arXiv preprint arXiv:1907.11932 , Vol. 2 (2019)."},{"key":"e_1_3_2_1_5_1","volume-title":"Adversarial machine learning at scale. arXiv preprint arXiv:1611.01236","author":"Kurakin Alexey","year":"2016","unstructured":"Alexey Kurakin, Ian Goodfellow, and Samy Bengio. 2016. Adversarial machine learning at scale. arXiv preprint arXiv:1611.01236 (2016)."},{"key":"e_1_3_2_1_6_1","volume-title":"2020 b. Contextualized perturbation for textual adversarial attack. arXiv preprint arXiv:2009.07502","author":"Li Dianqi","year":"2020","unstructured":"Dianqi Li, Yizhe Zhang, Hao Peng, Liqun Chen, Chris Brockett, Ming-Ting Sun, and Bill Dolan. 2020 b. Contextualized perturbation for textual adversarial attack. arXiv preprint arXiv:2009.07502 (2020)."},{"key":"e_1_3_2_1_7_1","volume-title":"2020 a. Bert-attack: Adversarial attack against bert using bert. arXiv preprint arXiv:2004.09984","author":"Li Linyang","year":"2020","unstructured":"Linyang Li, Ruotian Ma, Qipeng Guo, Xiangyang Xue, and Xipeng Qiu. 2020 a. Bert-attack: Adversarial attack against bert using bert. arXiv preprint arXiv:2004.09984 (2020)."},{"key":"e_1_3_2_1_8_1","volume-title":"Adversarial training methods for semi-supervised text classification. arXiv preprint arXiv:1605.07725","author":"Miyato Takeru","year":"2016","unstructured":"Takeru Miyato, Andrew M Dai, and Ian Goodfellow. 2016. Adversarial training methods for semi-supervised text classification. arXiv preprint arXiv:1605.07725 (2016)."},{"key":"e_1_3_2_1_9_1","volume-title":"One word at a time: adversarial attacks on retrieval models. arXiv preprint arXiv:2008.02197","author":"Raval Nisarg","year":"2020","unstructured":"Nisarg Raval and Manisha Verma. 2020. One word at a time: adversarial attacks on retrieval models. arXiv preprint arXiv:2008.02197 (2020)."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1103"},{"key":"e_1_3_2_1_11_1","doi-asserted-by":"publisher","DOI":"10.1145\/3351095.3375234"},{"key":"e_1_3_2_1_12_1","volume-title":"Ensemble adversarial training: Attacks and defenses. arXiv preprint arXiv:1705.07204","author":"Tram\u00e8r Florian","year":"2017","unstructured":"Florian Tram\u00e8r, Alexey Kurakin, Nicolas Papernot, Ian Goodfellow, Dan Boneh, and Patrick McDaniel. 2017. Ensemble adversarial training: Attacks and defenses. arXiv preprint arXiv:1705.07204 (2017)."},{"key":"e_1_3_2_1_13_1","volume-title":"2019 a. Universal adversarial triggers for attacking and analyzing NLP. arXiv preprint arXiv:1908.07125","author":"Wallace Eric","year":"2019","unstructured":"Eric Wallace, Shi Feng, Nikhil Kandpal, Matt Gardner, and Sameer Singh. 2019 a. Universal adversarial triggers for attacking and analyzing NLP. arXiv preprint arXiv:1908.07125 (2019)."},{"key":"e_1_3_2_1_14_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00279"},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"crossref","unstructured":"Eric Wallace Tony Z. Zhao Shi Feng and Sameer Singh. 2021. Concealed Data Poisoning Attacks on NLP Models. In North American Chapter of the Association for Computational Linguistics .","DOI":"10.18653\/v1\/2021.naacl-main.13"},{"key":"e_1_3_2_1_16_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2020.103641"},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3374217","article-title":"Adversarial attacks on deep-learning models in natural language processing: A survey","volume":"11","author":"Zhang Wei Emma","year":"2020","unstructured":"Wei Emma Zhang, Quan Z Sheng, Ahoud Alhazmi, and Chenliang Li. 2020. Adversarial attacks on deep-learning models in natural language processing: A survey. ACM Transactions on Intelligent Systems and Technology (TIST) , Vol. 11, 3 (2020), 1--41.","journal-title":"ACM Transactions on Intelligent Systems and Technology (TIST)"}],"event":{"name":"ICTIR '22: The 2022 ACM SIGIR International Conference on the Theory of Information Retrieval","location":"Madrid Spain","acronym":"ICTIR '22","sponsor":["SIGIR ACM Special Interest Group on Information Retrieval"]},"container-title":["Proceedings of the 2022 ACM SIGIR International Conference on Theory of Information Retrieval"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3539813.3545122","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3539813.3545122","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,17]],"date-time":"2025-06-17T18:10:03Z","timestamp":1750183803000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3539813.3545122"}},"subtitle":["A Study using Adversarial Document Perturbations"],"short-title":[],"issued":{"date-parts":[[2022,8,23]]},"references-count":17,"alternative-id":["10.1145\/3539813.3545122","10.1145\/3539813"],"URL":"https:\/\/doi.org\/10.1145\/3539813.3545122","relation":{},"subject":[],"published":{"date-parts":[[2022,8,23]]},"assertion":[{"value":"2022-08-25","order":2,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}