{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,11]],"date-time":"2026-05-11T14:25:39Z","timestamp":1778509539067,"version":"3.51.4"},"reference-count":40,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,8,1]],"date-time":"2026-08-01T00:00:00Z","timestamp":1785542400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100004608","name":"Jiangsu Province Natural Science Foundation","doi-asserted-by":"publisher","award":["BK20230727"],"award-info":[{"award-number":["BK20230727"]}],"id":[{"id":"10.13039\/501100004608","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62402397"],"award-info":[{"award-number":["62402397"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62302330"],"award-info":[{"award-number":["62302330"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62372236"],"award-info":[{"award-number":["62372236"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100010023","name":"Natural Science Research of Jiangsu Higher Education Institutions of China","doi-asserted-by":"publisher","award":["23KJD520013"],"award-info":[{"award-number":["23KJD520013"]}],"id":[{"id":"10.13039\/501100010023","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Neural Networks"],"published-print":{"date-parts":[[2026,8]]},"DOI":"10.1016\/j.neunet.2026.108833","type":"journal-article","created":{"date-parts":[[2026,3,12]],"date-time":"2026-03-12T17:02:58Z","timestamp":1773334978000},"page":"108833","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["SEEK: A simple defense to model hijacking attack"],"prefix":"10.1016","volume":"200","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-0651-5693","authenticated-orcid":false,"given":"Yi","family":"Zhong","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6094-5995","authenticated-orcid":false,"given":"Zhenzhu","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-7821-8372","authenticated-orcid":false,"given":"Rui","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7566-4819","authenticated-orcid":false,"given":"Lei","family":"Zhou","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1632-5737","authenticated-orcid":false,"given":"Anmin","family":"Fu","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.neunet.2026.108833_bib0001","series-title":"Proceedings of the second conference on machine translation, volume 2: Shared task papers","first-page":"169","article-title":"Findings of the 2017 conference on machine translation WMT17","author":"Bojar","year":"2017"},{"key":"10.1016\/j.neunet.2026.108833_bib0002","series-title":"Proceedings of the 37th annual computer security applications conference","first-page":"554","article-title":"BADNL: Backdoor attacks against NLP models with semantic-preserving improvements","author":"Chen","year":"2021"},{"key":"10.1016\/j.neunet.2026.108833_bib0003","first-page":"1","article-title":"Backdoor attacks and countermeasures in natural language processing models: A comprehensive security review","author":"Cheng","year":"2025","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"10.1016\/j.neunet.2026.108833_bib0004","series-title":"Proceedings of the 2024 ACM conference on fairness, accountability, and transparency","first-page":"2454","article-title":"I am not a lawyer, but...: Engaging legal experts towards responsible LLM policies for legal advice","author":"Cheong","year":"2024"},{"key":"10.1016\/j.neunet.2026.108833_bib0005","series-title":"2025 IEEE international conference on multimedia and expo (ICME)","first-page":"1","article-title":"Neeko: Model hijacking attacks against generative adversarial networks","author":"Chu","year":"2025"},{"key":"10.1016\/j.neunet.2026.108833_bib0006","doi-asserted-by":"crossref","unstructured":"Dai, J., & Chen, C. (2019). A backdoor attack against LSTM-based text classification systems. 10.48550\/arXiv.1905.12457.","DOI":"10.1109\/ACCESS.2019.2941376"},{"key":"10.1016\/j.neunet.2026.108833_bib0007","series-title":"Proceedings of the IEEE\/CVF international conference on computer vision","first-page":"11966","article-title":"LIRA: Learnable, imperceptible and robust backdoor attacks","author":"Doan","year":"2021"},{"key":"10.1016\/j.neunet.2026.108833_bib0008","series-title":"Proceedings of the association for computational linguistics","first-page":"1","article-title":"Using punctuation as an adversarial attack on deep learning-based NLP systems: An empirical study","author":"Formento","year":"2023"},{"key":"10.1016\/j.neunet.2026.108833_bib0009","doi-asserted-by":"crossref","DOI":"10.1016\/j.neunet.2025.107479","article-title":"Red alarm: Controllable backdoor attack in continual learning","volume":"188","author":"Gao","year":"2025","journal-title":"Neural Networks"},{"key":"10.1016\/j.neunet.2026.108833_bib0010","series-title":"Proceedings of the 35th annual computer security applications conference","first-page":"113","article-title":"Strip: A defence against trojan attacks on deep neural networks","author":"Gao","year":"2019"},{"issue":"4","key":"10.1016\/j.neunet.2026.108833_bib0011","doi-asserted-by":"crossref","first-page":"786","DOI":"10.1002\/cpt.3161","article-title":"Applications of advanced natural language processing for clinical pharmacology","volume":"115","author":"Hsu","year":"2024","journal-title":"Clinical Pharmacology & Therapeutics"},{"key":"10.1016\/j.neunet.2026.108833_bib0012","series-title":"Proceedings of the 2021 ACM SIGSAC conference on computer and communications security","first-page":"3104","article-title":"Subpopulation data poisoning attacks","author":"Jagielski","year":"2021"},{"key":"10.1016\/j.neunet.2026.108833_bib0013","unstructured":"Kaddour, J., Harris, J., Mozes, M., Bradley, H., Raileanu, R., & McHardy, R. (2023). Challenges and applications of large language models. 10.48550\/arXiv.2307.10169."},{"key":"10.1016\/j.neunet.2026.108833_sbref0014","series-title":"Proceedings of the 2024 on ACM SIGSAC conference on computer and communications security","article-title":"Phantom: Untargeted poisoning attacks on semi-supervised learning","author":"Knauer","year":"2024"},{"key":"10.1016\/j.neunet.2026.108833_bib0015","series-title":"Proceedings of the 2024 on ACM SIGSAC conference on computer and communications security","first-page":"615","article-title":"Phantom: Untargeted poisoning attacks on semi-supervised learning","author":"Knauer","year":"2024"},{"key":"10.1016\/j.neunet.2026.108833_bib0016","doi-asserted-by":"crossref","unstructured":"Kurita, K., Michel, P., & Neubig, G. (2020a). Weight poisoning attacks on pre-trained models. 10.48550\/arXiv.2004.06660.","DOI":"10.18653\/v1\/2020.acl-main.249"},{"key":"10.1016\/j.neunet.2026.108833_bib0017","series-title":"Proceedings of the 58th annual meeting of the association for computational linguistics","first-page":"2793","article-title":"Weight poisoning attacks on pretrained models","author":"Kurita","year":"2020"},{"key":"10.1016\/j.neunet.2026.108833_bib0018","first-page":"1","article-title":"Large language models in finance (FinLLMs)","author":"Lee","year":"2025","journal-title":"Neural Computing and Applications"},{"issue":"1","key":"10.1016\/j.neunet.2026.108833_bib0019","doi-asserted-by":"crossref","first-page":"5","DOI":"10.1109\/TNNLS.2022.3182979","article-title":"Backdoor learning: A survey","volume":"35","author":"Li","year":"2022","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"10.1016\/j.neunet.2026.108833_bib0020","series-title":"25th Annual network and distributed system security symposium (NDSS 2018)","article-title":"Trojaning attack on neural networks","author":"Liu","year":"2018"},{"key":"10.1016\/j.neunet.2026.108833_bib0021","series-title":"31st USENIX security symposium","first-page":"3611","article-title":"Hidden trigger backdoor attack on NLP models via linguistic style manipulation","author":"Pan","year":"2022"},{"key":"10.1016\/j.neunet.2026.108833_bib0022","series-title":"Proceedings of the 2021 conference on empirical methods in natural language processing","first-page":"9558","article-title":"Onion: A simple and effective defense against textual backdoor attacks","author":"Qi","year":"2021"},{"key":"10.1016\/j.neunet.2026.108833_bib0023","doi-asserted-by":"crossref","unstructured":"Qi, F., Yao, Y., Xu, S., Liu, Z., & Sun, M. (2021b). Turn the combination lock: Learnable textual backdoor attacks via word substitution. 10.48550\/arXiv.2106.06361.","DOI":"10.18653\/v1\/2021.acl-long.377"},{"key":"10.1016\/j.neunet.2026.108833_bib0024","series-title":"Proceedings of the AAAI conference on artificial intelligence","first-page":"11957","article-title":"Hidden trigger backdoor attacks","volume":"vol. 34","author":"Saha","year":"2020"},{"key":"10.1016\/j.neunet.2026.108833_bib0025","series-title":"31st USENIX security symposium","first-page":"1","article-title":"Get a model! model hijacking attack against machine learning models","author":"Salem","year":"2022"},{"key":"10.1016\/j.neunet.2026.108833_bib0026","series-title":"32nd USENIX security symposium","first-page":"2223","article-title":"Two-in-one: A model hijacking attack against text generation models","author":"Si","year":"2023"},{"key":"10.1016\/j.neunet.2026.108833_bib0027","series-title":"Proceedings of the 2013 conference on empirical methods in natural language processing","first-page":"1631","article-title":"Recursive deep models for semantic compositionality over a sentiment treebank\u201d","author":"Socher","year":"2013"},{"issue":"8","key":"10.1016\/j.neunet.2026.108833_bib0028","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3551636","article-title":"A comprehensive survey on poisoning attacks and countermeasures in machine learning","volume":"55","author":"Tian","year":"2023","journal-title":"ACM Computing Surveys"},{"key":"10.1016\/j.neunet.2026.108833_bib0029","unstructured":"Tram\u00e8r, F., Kurakin, A., Papernot, N., Goodfellow, I., Boneh, D., & McDaniel, P. (2017). Ensemble adversarial training: Attacks and defenses. arXiv preprint arXiv: 1705.07204."},{"key":"10.1016\/j.neunet.2026.108833_bib0030","series-title":"Proceedings of the 2021 conference on empirical methods in natural language processing","first-page":"8365","article-title":"Rap: Robustness-aware perturbations for defending against backdoor attacks on NLP models","author":"Yang","year":"2021"},{"key":"10.1016\/j.neunet.2026.108833_bib0031","series-title":"Proceedings of the 59th annual meeting of the association for computational linguistics and the 11th international joint conference on natural language processing","first-page":"5543","article-title":"Rethinking stealthiness of backdoor attack against NLP models","author":"Yang","year":"2021"},{"key":"10.1016\/j.neunet.2026.108833_bib0032","doi-asserted-by":"crossref","first-page":"326","DOI":"10.1016\/j.neunet.2023.09.037","article-title":"How to backdoor split learning","volume":"168","author":"Yu","year":"2023","journal-title":"Neural Networks"},{"key":"10.1016\/j.neunet.2026.108833_bib0033","series-title":"Proceedings of the AAAI conference on artificial intelligence","first-page":"4854","article-title":"Untargeted attack against federated recommendation systems via poisonous item embeddings and the defense","volume":"vol. 37","author":"Yu","year":"2023"},{"key":"10.1016\/j.neunet.2026.108833_bib0034","unstructured":"Zhang, M., Salem, A., Backes, M., & Zhang, Y. (2024). Vera verto: multimodal hijacking attack. arXiv preprint arXiv: 2408.00129."},{"issue":"3","key":"10.1016\/j.neunet.2026.108833_bib0035","first-page":"1","article-title":"Adversarial attacks on deep-learning models in natural language processing: A survey","volume":"11","author":"Zhang","year":"2020","journal-title":"ACM Transactions on Intelligent Systems and Technology"},{"key":"10.1016\/j.neunet.2026.108833_bib0036","series-title":"NIPS","article-title":"Character-level convolutional networks for text classification","author":"Zhang","year":"2015"},{"key":"10.1016\/j.neunet.2026.108833_bib0037","first-page":"1","article-title":"A survey of backdoor attacks and defenses on large language models: Implications for security measures","author":"Zhao","year":"2024","journal-title":"Authorea Preprints"},{"key":"10.1016\/j.neunet.2026.108833_bib0038","unstructured":"Zhao, W. X., Zhou, K., Li, J., Tang, T., Wang, X., Hou, Y., Min, Y., Zhang, B., Zhang, J., Dong, Z. et al. (2023). A survey of large language models. 1(2). arXiv preprint arXiv: 2303.18223."},{"key":"10.1016\/j.neunet.2026.108833_bib0039","doi-asserted-by":"crossref","first-page":"756","DOI":"10.1016\/j.neunet.2023.10.034","article-title":"Learning a robust foundation model against clean-label data poisoning attacks at downstream tasks","volume":"169","author":"Zhou","year":"2024","journal-title":"Neural Networks"},{"key":"10.1016\/j.neunet.2026.108833_bib0040","series-title":"2023 IEEE symposium on security and privacy (SP)","first-page":"701","article-title":"AI-guardian: Defeating adversarial attacks using backdoors","author":"Zhu","year":"2023"}],"container-title":["Neural Networks"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0893608026002959?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0893608026002959?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,5,11]],"date-time":"2026-05-11T13:12:50Z","timestamp":1778505170000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0893608026002959"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,8]]},"references-count":40,"alternative-id":["S0893608026002959"],"URL":"https:\/\/doi.org\/10.1016\/j.neunet.2026.108833","relation":{},"ISSN":["0893-6080"],"issn-type":[{"value":"0893-6080","type":"print"}],"subject":[],"published":{"date-parts":[[2026,8]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"SEEK: A simple defense to model hijacking attack","name":"articletitle","label":"Article Title"},{"value":"Neural Networks","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.neunet.2026.108833","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier Ltd. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"108833"}}