{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,3]],"date-time":"2025-12-03T18:11:02Z","timestamp":1764785462883,"version":"3.28.0"},"reference-count":22,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,8,4]],"date-time":"2024-08-04T00:00:00Z","timestamp":1722729600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,8,4]],"date-time":"2024-08-04T00:00:00Z","timestamp":1722729600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,8,4]]},"DOI":"10.1109\/ialp63756.2024.10661135","type":"proceedings-article","created":{"date-parts":[[2024,9,10]],"date-time":"2024-09-10T18:23:27Z","timestamp":1725992607000},"page":"168-174","source":"Crossref","is-referenced-by-count":2,"title":["Enhanced Discriminative Fine-Tuning of Large Language Models for Chinese Text Classification"],"prefix":"10.1109","author":[{"given":"Jinwang","family":"Song","sequence":"first","affiliation":[{"name":"Zhengzhou University,School of Computer and Artificial Intelligence,Zhengzhou,China"}]},{"given":"Hongying","family":"Zan","sequence":"additional","affiliation":[{"name":"Zhengzhou University,School of Computer and Artificial Intelligence,Zhengzhou,China"}]},{"given":"Kunli","family":"Zhang","sequence":"additional","affiliation":[{"name":"Zhengzhou University,School of Computer and Artificial Intelligence,Zhengzhou,China"}]}],"member":"263","reference":[{"article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","year":"2018","author":"Devlin","key":"ref1"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"author":"Radford","key":"ref3","article-title":"Improving language understanding by generative pre-training"},{"article-title":"Language models are unsupervised multitask learners","year":"2019","author":"Radford","key":"ref4"},{"key":"ref5","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"article-title":"Llama: Open and efficient foundation language models","year":"2023","author":"Touvron","key":"ref6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.603"},{"article-title":"Sgpt: Gpt sentence embeddings for semantic search","year":"2022","author":"Muennighoff","key":"ref8"},{"article-title":"Label supervised llama finetuning","year":"2023","author":"Li","key":"ref9"},{"article-title":"Lora: Low-rank adaptation of large language models","year":"2021","author":"Hu","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-022-03190-3"},{"article-title":"Utilizing bert intermediate layers for aspect based sentiment analysis and natural language inference","year":"2020","author":"Song","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-short.8"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.coling-main.419"},{"article-title":"A rank stabilization scaling factor for fine-tuning with lora","year":"2023","author":"Kalajdzievski","key":"ref16"},{"article-title":"Mish: A self regularized non-monotonic activation function","year":"2019","author":"Misra","key":"ref17"},{"article-title":"Roberta: A robustly optimized bert pretraining approach","year":"2019","author":"Liu","key":"ref18"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3124365"},{"article-title":"Ernie: Enhanced representation through knowledge integration","year":"2019","author":"Sun","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.58"},{"key":"ref22","first-page":"10524","article-title":"On layer normalization in the transformer architecture","volume-title":"International Conference on Machine Learning","author":"Xiong"}],"event":{"name":"2024 International Conference on Asian Language Processing (IALP)","start":{"date-parts":[[2024,8,4]]},"location":"Hohhot, China","end":{"date-parts":[[2024,8,6]]}},"container-title":["2024 International Conference on Asian Language Processing (IALP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10660661\/10660673\/10661135.pdf?arnumber=10661135","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,11]],"date-time":"2024-09-11T09:12:37Z","timestamp":1726045957000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10661135\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,4]]},"references-count":22,"URL":"https:\/\/doi.org\/10.1109\/ialp63756.2024.10661135","relation":{},"subject":[],"published":{"date-parts":[[2024,8,4]]}}}