{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,9]],"date-time":"2026-04-09T22:31:01Z","timestamp":1775773861234,"version":"3.50.1"},"reference-count":41,"publisher":"Springer Science and Business Media LLC","issue":"9","license":[{"start":{"date-parts":[[2025,7,29]],"date-time":"2025-07-29T00:00:00Z","timestamp":1753747200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,7,29]],"date-time":"2025-07-29T00:00:00Z","timestamp":1753747200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Mach Learn"],"published-print":{"date-parts":[[2025,9]]},"DOI":"10.1007\/s10994-025-06817-x","type":"journal-article","created":{"date-parts":[[2025,7,29]],"date-time":"2025-07-29T21:13:25Z","timestamp":1753823605000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Improving text processing via adversarial low-rank adaptation"],"prefix":"10.1007","volume":"114","author":[{"given":"Hao","family":"Wu","sequence":"first","affiliation":[]},{"given":"Xiangfeng","family":"Luo","sequence":"additional","affiliation":[]},{"given":"Jianqi","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Dian","family":"Huang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,7,29]]},"reference":[{"key":"6817_CR1","unstructured":"Achiam, J., Adler, S., Agarwal, S., Ahmad, L., Akkaya, I., Aleman, F.L., Almeida, D., Altenschmidt, J., Altman, S., & Anadkat, S., et al.(2023). Gpt-4 technical report. arXiv preprint arXiv:2303.08774"},{"key":"6817_CR2","first-page":"12449","volume":"33","author":"A Baevski","year":"2020","unstructured":"Baevski, A., Zhou, Y., Mohamed, A., & Auli, M. (2020). wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems, 33, 12449\u201312460.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"6817_CR3","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. (2020). Language models are few-shot learners. Advances in Neural Information Processing Systems, 33, 1877\u20131901.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"6817_CR4","unstructured":"Chen, T., Kornblith, S., Norouzi, M., & Hinton, G.(2020). A simple framework for contrastive learning of visual representations.  International Conference on Machine Learning, pp. 1597\u20131607 . PmLR"},{"key":"6817_CR5","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Jiang, L., & Macherey, W.(2019). Robust neural machine translation with doubly adversarial inputs. arXiv preprint arXiv:1906.02443","DOI":"10.18653\/v1\/P19-1425"},{"key":"6817_CR6","first-page":"10088","volume":"36","author":"T Dettmers","year":"2023","unstructured":"Dettmers, T., Pagnoni, A., Holtzman, A., & Zettlemoyer, L. (2023). Qlora: Efficient finetuning of quantized llms. Advances in Neural Information Processing Systems, 36, 10088\u201310115.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"6817_CR7","unstructured":"Devlin, J., Chang, M. W., Lee, K., & Toutanova, K.(2019). Bert: Pre-training of deep bidirectional transformers for language understanding. Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (long and Short Papers), pp. 4171\u20134186"},{"issue":"3","key":"6817_CR8","doi-asserted-by":"publisher","first-page":"220","DOI":"10.1038\/s42256-023-00626-4","volume":"5","author":"N Ding","year":"2023","unstructured":"Ding, N., Qin, Y., Yang, G., Wei, F., Yang, Z., Su, Y., Hu, S., Chen, Y., Chan, C.-M., Chen, W., et al. (2023). Parameter-efficient fine-tuning of large-scale pre-trained language models. Nature Machine Intelligence, 5(3), 220\u2013235.","journal-title":"Nature Machine Intelligence"},{"key":"6817_CR9","unstructured":"Fan, C., Lu, Z., Liu, S., Qu, X., Wei, W., Gu, C., & Cheng, Y.(2025). Make lora great again: Boosting lora with adaptive singular values and mixture-of-experts optimization alignment. arXiv preprint arXiv:2502.16894"},{"key":"6817_CR10","doi-asserted-by":"crossref","unstructured":"Fu, J., Fang, J., Sun, J., Zhuang, S., Geng, L., & Liu, Y.(2024). Loft: Lora-based efficient and robust fine-tuning framework for adversarial training. In: 2024 International Joint Conference on Neural Networks (IJCNN), pp. 1\u20138 . IEEE","DOI":"10.1109\/IJCNN60899.2024.10651480"},{"key":"6817_CR11","doi-asserted-by":"publisher","first-page":"12799","DOI":"10.1609\/aaai.v37i11.26505","volume":"37","author":"Z Fu","year":"2023","unstructured":"Fu, Z., Yang, H., So, A.M.-C., Lam, W., Bing, L., & Collier, N. (2023). On the effectiveness of parameter-efficient fine-tuning. Proceedings of the AAAI Conference on Artificial Intelligence, 37, 12799\u201312807.","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence"},{"key":"6817_CR12","doi-asserted-by":"crossref","unstructured":"Gao, J., Wu, H., Cheung, Y.-m., Cao, J., Yu, H., & Zhang, Y.(2025). Mitigating forgetting in adapting pre-trained language models to text processing tasks via consistency alignment. THE WEB CONFERENCE","DOI":"10.1145\/3696410.3714687"},{"key":"6817_CR13","unstructured":"Goodfellow, I. J., Shlens, J., & Szegedy, C.(2014). Explaining and harnessing adversarial examples. arXiv preprint arXiv:1412.6572"},{"key":"6817_CR14","unstructured":"Han, Z., Gao, C., Liu, J., Zhang, J., & Zhang, S.Q.(2024). Parameter-efficient fine-tuning for large models: A comprehensive survey. arXiv preprint arXiv:2403.14608"},{"key":"6817_CR15","unstructured":"He, P., Liu, X., Gao, J., & Chen, W.(2020). Deberta: Decoding-enhanced bert with disentangled attention. arXiv preprint arXiv:2006.03654"},{"key":"6817_CR16","unstructured":"Houlsby, N., Giurgiu, A., Jastrzebski, S., Morrone, B., De\u00a0Laroussilhe, Q., Gesmundo, A., Attariyan, M., & Gelly, S.(2019). Parameter-efficient transfer learning for nlp. International Conference on Machine Learning, pp. 2790\u20132799 . PMLR"},{"issue":"2","key":"6817_CR17","first-page":"3","volume":"1","author":"EJ Hu","year":"2022","unstructured":"Hu, E. J., Shen, Y., Wallis, P., Allen-Zhu, Z., Li, Y., Wang, S., Wang, L., Chen, W., et al. (2022). Lora: Low-rank adaptation of large language models. ICLR, 1(2), 3.","journal-title":"ICLR"},{"key":"6817_CR18","unstructured":"Ji, Y., Liu, Y., Zhang, Z., Zhang, Z., Zhao, Y., Zhou, G., Zhang, X., Liu, X., & Zheng, X.(2024). Advlora: Adversarial low-rank adaptation of vision-language models. arXiv preprint arXiv:2404.13425"},{"key":"6817_CR19","doi-asserted-by":"crossref","unstructured":"Jiang, H., He, P., Chen, W., Liu, X., Gao, J., & Zhao, T.(2019). Smart: Robust and efficient fine-tuning for pre-trained natural language models through principled regularized optimization. arXiv preprint arXiv:1911.03437","DOI":"10.18653\/v1\/2020.acl-main.197"},{"issue":"3","key":"6817_CR20","doi-asserted-by":"publisher","first-page":"187","DOI":"10.3390\/info14030187","volume":"14","author":"E Kotei","year":"2023","unstructured":"Kotei, E., & Thirunavukarasu, R. (2023). A systematic review of transformer-based pre-trained language models through self-supervised learning. Information, 14(3), 187.","journal-title":"Information"},{"key":"6817_CR21","unstructured":"Lin, H., Huang, B., Ye, H., Chen, Q., Wang, Z., Li, S., Ma, J., Wan, X., Zou, J., & Liang, Y.(2024). Selecting large language model to fine-tune via rectified scaling law. arXiv preprint arXiv:2402.02314"},{"key":"6817_CR22","doi-asserted-by":"crossref","unstructured":"Liu, Q., Wu, X., Zhao, X., Zhu, Y., Xu, D., Tian, F., & Zheng, Y.(2023). Moelora: An moe-based parameter efficient fine-tuning method for multi-task medical applications. CoRR","DOI":"10.1145\/3626772.3657722"},{"key":"6817_CR23","doi-asserted-by":"crossref","unstructured":"Liu, X., Ji, K., Fu, Y., Tam, W.L., Du, Z., Yang, Z., & Tang, J.(2021). P-tuning v2: Prompt tuning can be comparable to fine-tuning universally across scales and tasks. arXiv preprint arXiv:2110.07602","DOI":"10.18653\/v1\/2022.acl-short.8"},{"key":"6817_CR24","unstructured":"Liu, Y., Ott, M., Goyal, N., Du, J., Joshi, M., Chen, D., Levy, O., Lewis, M., Zettlemoyer, L., Stoyanov, V.: Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692 (2019)"},{"key":"6817_CR25","unstructured":"Lv, K., Cao, H., Tu, K., Xu, Y., Zhang, Z., Ding, X., & Wang, Y.(2024). Hyper adversarial tuning for boosting adversarial robustness of pretrained large vision models. arXiv preprint arXiv:2410.05951"},{"key":"6817_CR26","unstructured":"Madry, A., Makelov, A., Schmidt, L., Tsipras, D., & Vladu, A.(2017). Towards deep learning models resistant to adversarial attacks. arXiv preprint arXiv:1706.06083"},{"key":"6817_CR27","unstructured":"Mahabadi, R.K., Ruder, S., Dehghani, M., & Henderson, J.(2021). Parameter-efficient multi-task fine-tuning for transformers via shared hypernetworks. arXiv preprint arXiv:2106.04489"},{"key":"6817_CR28","doi-asserted-by":"crossref","unstructured":"Min, S., Zhong, V., Socher, R., & Xiong, C.(2018). Efficient and robust question answering from minimal context over documents. arXiv preprint arXiv:1805.08092","DOI":"10.18653\/v1\/P18-1160"},{"key":"6817_CR29","unstructured":"Peng, S., Wang, M., He, J., Yang, J., & Jia, X.(2025). Cat: Contrastive adversarial training for evaluating the robustness of protective perturbations in latent diffusion models. arXiv preprint arXiv:2502.07225"},{"issue":"140","key":"6817_CR30","first-page":"1","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel, C., Shazeer, N., Roberts, A., Lee, K., Narang, S., Matena, M., Zhou, Y., Li, W., & Liu, P. J. (2020). Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21(140), 1\u201367.","journal-title":"J. Mach. Learn. Res."},{"key":"6817_CR31","doi-asserted-by":"crossref","unstructured":"Sagduyu, Y. E., & Erpek, T.(2024). Adversarial attack and defense for lora device identification and authentication via deep learning. arXiv preprint arXiv:2412.21164","DOI":"10.1109\/JIOT.2025.3547645"},{"key":"6817_CR32","unstructured":"Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.-A., Lacroix, T., Rozi\u00e8re, B., Goyal, N., Hambro, E., & Azhar, F., et al.(2023). Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971"},{"key":"6817_CR33","doi-asserted-by":"publisher","first-page":"51","DOI":"10.1016\/j.eng.2022.04.024","volume":"25","author":"H Wang","year":"2023","unstructured":"Wang, H., Li, J., Wu, H., Hovy, E., & Sun, Y. (2023). Pre-trained language models and their applications. Engineering, 25, 51\u201365.","journal-title":"Engineering"},{"key":"6817_CR34","unstructured":"Wang, Y., Lin, Y., Zeng, X., & Zhang, G.(2023). Multilora: Democratizing lora for better multi-task learning. arXiv preprint arXiv:2311.11501"},{"key":"6817_CR35","unstructured":"Wang, Z., Panda, R., Karlinsky, L., Feris, R., Sun, H., & Kim, Y.(2023). Multitask prompt tuning enables parameter-efficient transfer learning. arXiv preprint arXiv:2303.02861"},{"key":"6817_CR36","doi-asserted-by":"crossref","unstructured":"Wu, Y., Xiang, Y., Huo, S., Gong, Y., & Liang, P.(2024). Lora-sp: streamlined partial parameter adaptation for resource efficient fine-tuning of large language models. Third International Conference on Algorithms, Microchips, and Network Applications (AMNA 2024), vol. 13171, pp. 488\u2013496 . SPIE","DOI":"10.1117\/12.3032013"},{"key":"6817_CR37","unstructured":"Xia, W., Qin, C., & Hazan, E.(2024). Chain of lora: Efficient fine-tuning of language models via residual learning. arXiv preprint arXiv:2401.04151"},{"key":"6817_CR38","doi-asserted-by":"crossref","unstructured":"Zhan, P., Yang, J., Huang, X., Jing, C., Li, J., & Wang, L.(2023). Contrastive learning with adversarial examples for alleviating pathology of language model. In: Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 6493\u20136508","DOI":"10.18653\/v1\/2023.acl-long.358"},{"key":"6817_CR39","doi-asserted-by":"crossref","unstructured":"Zhang, D., Zhang, K., Chu, S., Wu, L., Li, X., & Si, W.(2025) More: A mixture of low-rank experts for adaptive multi-task learning","DOI":"10.18653\/v1\/2025.findings-acl.68"},{"key":"6817_CR40","doi-asserted-by":"crossref","unstructured":"Zheng, Y., Zhang, R., Zhang, J., Ye, Y., Luo, Z., Feng, Z., & Ma, Y.(2024). Llamafactory: Unified efficient fine-tuning of 100+ language models. arXiv preprint arXiv:2403.13372","DOI":"10.18653\/v1\/2024.acl-demos.38"},{"key":"6817_CR41","unstructured":"Zhu, C., Cheng, Y., Gan, Z., Sun, S., Goldstein, T., & Liu, J.(2019). Freelb: Enhanced adversarial training for natural language understanding. arXiv preprint arXiv:1909.11764"}],"container-title":["Machine Learning"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-025-06817-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10994-025-06817-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10994-025-06817-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,8]],"date-time":"2025-09-08T08:44:21Z","timestamp":1757321061000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10994-025-06817-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,29]]},"references-count":41,"journal-issue":{"issue":"9","published-print":{"date-parts":[[2025,9]]}},"alternative-id":["6817"],"URL":"https:\/\/doi.org\/10.1007\/s10994-025-06817-x","relation":{},"ISSN":["0885-6125","1573-0565"],"issn-type":[{"value":"0885-6125","type":"print"},{"value":"1573-0565","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,7,29]]},"assertion":[{"value":"1 March 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 May 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 June 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 July 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"196"}}