{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,4]],"date-time":"2026-03-04T16:34:31Z","timestamp":1772642071883,"version":"3.50.1"},"reference-count":83,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1109\/tnnls.2023.3330926","type":"journal-article","created":{"date-parts":[[2023,11,30]],"date-time":"2023-11-30T14:14:33Z","timestamp":1701353673000},"page":"1898-1910","source":"Crossref","is-referenced-by-count":13,"title":["Improving Pretrained Language Model Fine-Tuning With Noise Stability Regularization"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5441-5776","authenticated-orcid":false,"given":"Hang","family":"Hua","sequence":"first","affiliation":[{"name":"Department of Computer Science, University of Rochester, Rochester, NY, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8073-7552","authenticated-orcid":false,"given":"Xingjian","family":"Li","sequence":"additional","affiliation":[{"name":"Computational Biology Department, Carnegie Mellon University, Pittsburgh, PA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2949-6874","authenticated-orcid":false,"given":"Dejing","family":"Dou","sequence":"additional","affiliation":[{"name":"BCG in Greater China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9480-0356","authenticated-orcid":false,"given":"Cheng-Zhong","family":"Xu","sequence":"additional","affiliation":[{"name":"State Key Laboratory of IOTSC, Faculty of Science and Technology, University of Macau, Macau, SAR, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4516-9729","authenticated-orcid":false,"given":"Jiebo","family":"Luo","sequence":"additional","affiliation":[{"name":"Department of Computer Science, University of Rochester, Rochester, NY, USA"}]}],"member":"263","reference":[{"key":"ref1","article-title":"REALM: Retrieval-augmented language model pre-training","author":"Guu","year":"2020","journal-title":"arXiv:2002.08909"},{"key":"ref2","article-title":"Fine-tune BERT for extractive summarization","author":"Liu","year":"2019","journal-title":"arXiv:1903.10318"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1585"},{"key":"ref4","article-title":"Incorporating BERT into neural machine translation","author":"Zhu","year":"2020","journal-title":"arXiv:2002.06823"},{"key":"ref5","article-title":"ELECTRA: Pre-training text encoders as discriminators rather than generators","author":"Clark","year":"2020","journal-title":"arXiv:2003.10555"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00300"},{"issue":"140","key":"ref7","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref8","article-title":"BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension","author":"Lewis","year":"2019","journal-title":"arXiv:1910.13461"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W18-5446"},{"key":"ref10","article-title":"SuperGLUE: A stickier benchmark for general-purpose language understanding systems","author":"Wang","year":"2019","journal-title":"arXiv:1905.00537"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1250"},{"key":"ref12","article-title":"How context affects language models\u2019 factual predictions","author":"Petroni","year":"2020","journal-title":"arXiv:2005.04611"},{"key":"ref13","article-title":"Cross-lingual language model pretraining","author":"Lample","year":"2019","journal-title":"arXiv:1901.07291"},{"key":"ref14","article-title":"PromptCap: Prompt-guided task-aware image captioning","author":"Hu","year":"2022","journal-title":"arXiv:2211.09699"},{"key":"ref15","article-title":"VideoXum: Cross-modal visual and textural summarization of videos","author":"Lin","year":"2023","journal-title":"arXiv:2303.12060"},{"key":"ref16","article-title":"GRAPH-BERT: Only attention is needed for learning graph representations","author":"Zhang","year":"2020","journal-title":"arXiv:2001.05140"},{"key":"ref17","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","volume-title":"Proc. NAACL-HLT","author":"Devlin"},{"key":"ref18","article-title":"Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping","author":"Dodge","year":"2020","journal-title":"arXiv:2002.06305"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1995.7.1.108"},{"key":"ref20","article-title":"Adding noise to the input of a model trained with a regularized objective","author":"Rifai","year":"2011","journal-title":"arXiv:1104.3250"},{"key":"ref21","first-page":"254","article-title":"Stronger generalization bounds for deep nets via a compression approach","volume-title":"Proc. 35th Int. Conf. Mach. Learn.","author":"Arora"},{"key":"ref22","first-page":"4356","article-title":"How should pre-trained language models be fine-tuned towards adversarial robustness?","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Dong"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.258"},{"key":"ref24","volume-title":"Method and apparatus for transfer learning","author":"Li","year":"2022"},{"key":"ref25","first-page":"1","article-title":"Explicit inductive bias for transfer learning with convolutional networks","volume-title":"Proc. ICML","author":"Li"},{"key":"ref26","article-title":"Mixout: Effective regularization to finetune large-scale pretrained language models","author":"Lee","year":"2019","journal-title":"arXiv:1909.11299"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.197"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-62010-2"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1016\/0893-6080(91)90033-2"},{"issue":"30","key":"ref30","first-page":"487","volume-title":"Solutions of Ill-Posed Problems","volume":"1","author":"Tikhonov","year":"1977"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1126\/science.290.5500.2323"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2007.70735"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1142\/9789812779861_0015"},{"issue":"1","key":"ref34","first-page":"1","volume-title":"Algorithms for Manifold Learning","volume":"12","author":"Cayton","year":"2005"},{"issue":"11","key":"ref35","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"van der Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."},{"key":"ref36","article-title":"L2 regularization versus batch and weight normalization","author":"van Laarhoven","year":"2017","journal-title":"arXiv:1706.05350"},{"key":"ref37","first-page":"1","article-title":"Three mechanisms of weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Zhang"},{"key":"ref38","first-page":"1","article-title":"Intriguing properties of neural networks","volume-title":"Proc. 2nd Int. Conf. Learn. Represent. (ICLR)","author":"Szegedy"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.06083"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00290"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1016\/0005-2795(75)90109-9"},{"key":"ref42","first-page":"9","article-title":"Automatically constructing a corpus of sentential paraphrases","volume-title":"Proc. 3rd Int. Workshop Paraphrasing (IWP)","author":"Dolan"},{"key":"ref43","first-page":"177","article-title":"The PASCAL recognising textual entailment challenge","volume-title":"Proc. Mach. Learn. Challenges Workshop","author":"Dagan"},{"key":"ref44","first-page":"785","article-title":"The second PASCAL recognising textual entailment challenge","volume-title":"Proc. 2nd PASCAL Challenges Workshop Recognising Textual Entailment","author":"Bar-Haim"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.3115\/1654536.1654538"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/S17-2001"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D16-1264"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-5801"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1907.11692"},{"issue":"56","key":"ref50","first-page":"1929","article-title":"Dropout: A simple way to prevent neural networks from overfitting","volume":"15","author":"Srivastava","year":"2014","journal-title":"J. Mach. Learn. Res."},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.5555\/3042817.3043055"},{"key":"ref52","article-title":"FreeLB: Enhanced adversarial training for natural language understanding","author":"Zhu","year":"2019","journal-title":"arXiv:1909.11764"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.322"},{"key":"ref54","article-title":"HuggingFace\u2019s Transformers: State-of-the-art natural language processing","author":"Wolf","year":"2019","journal-title":"arXiv:1910.03771"},{"key":"ref55","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/tbdata.2019.2921572"},{"key":"ref57","article-title":"Better fine-tuning by reducing representational collapse","author":"Aghajanyan","year":"2020","journal-title":"arXiv:2008.03156"},{"key":"ref58","first-page":"1","article-title":"Robustness may be at odds with accuracy","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Tsipras"},{"key":"ref59","first-page":"1","article-title":"Isotropy in the contextual embedding space: Clusters and manifolds","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Cai"},{"key":"ref60","first-page":"153","article-title":"The difficulty of training deep architectures and the effect of unsupervised pre-training","volume-title":"Proc. 12th Int. Conf. Artif. Intell. Statist.","author":"Erhan"},{"issue":"19","key":"ref61","first-page":"625","article-title":"Why does unsupervised pre-training help deep learning?","volume":"11","author":"Erhan","year":"2010","journal-title":"J. Mach. Learn. Res."},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1162"},{"key":"ref63","article-title":"Distributed representations of words and phrases and their compositionality","author":"Mikolov","year":"2013","journal-title":"arXiv:1310.4546"},{"key":"ref64","first-page":"1","article-title":"Semi-supervised sequence learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Dai"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N18-1202"},{"key":"ref66","article-title":"Improving language understanding by generative pre-training","author":"Radford","year":"2018"},{"issue":"8","key":"ref67","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref68","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Brown"},{"key":"ref69","first-page":"1","article-title":"XLNet: Generalized autoregressive pretraining for language understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Yang"},{"key":"ref70","volume-title":"NeurIPS 2019 Reproducibility Challenge: Controllable Unsupervised Text Attribute Transfer via Editing Entangled Latent Representation","author":"Wen","year":"2019"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3094987"},{"key":"ref72","article-title":"Revisiting few-sample BERT fine-tuning","author":"Zhang","year":"2020","journal-title":"arXiv:2006.05987"},{"key":"ref73","article-title":"On the stability of fine-tuning BERT: Misconceptions, explanations, and strong baselines","author":"Mosbach","year":"2020","journal-title":"arXiv:2006.04884"},{"key":"ref74","first-page":"1","article-title":"Decoupled weight decay regularization","volume-title":"Proc. ICLR","author":"Loshchilov"},{"key":"ref75","article-title":"Spectral norm regularization for improving the generalizability of deep learning","author":"Yoshida","year":"2017","journal-title":"arXiv:1705.10941"},{"key":"ref76","article-title":"Adversarial training is a form of data-dependent operator norm regularization","author":"Roth","year":"2019","journal-title":"arXiv:1906.01527"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3059669"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1186\/s12859-015-0564-6"},{"key":"ref79","article-title":"DROP: A reading comprehension benchmark requiring discrete reasoning over paragraphs","author":"Dua","year":"2019","journal-title":"arXiv:1903.00161"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1156"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1082"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/K17-1034"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.571"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/5962385\/10832116\/10335919.pdf?arnumber=10335919","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,5]],"date-time":"2025-12-05T18:39:24Z","timestamp":1764959964000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10335919\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,1]]},"references-count":83,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2023.3330926","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"value":"2162-237X","type":"print"},{"value":"2162-2388","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,1]]}}}