{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,17]],"date-time":"2025-11-17T01:11:09Z","timestamp":1763341869751,"version":"3.45.0"},"reference-count":39,"publisher":"Tech Science Press","issue":"2","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["CMC"],"published-print":{"date-parts":[[2025]]},"DOI":"10.32604\/cmc.2025.061359","type":"journal-article","created":{"date-parts":[[2025,3,19]],"date-time":"2025-03-19T04:47:56Z","timestamp":1742359676000},"page":"2809-2828","source":"Crossref","is-referenced-by-count":1,"title":["Causal Representation Enhances Cross-Domain Named Entity Recognition in Large Language Models"],"prefix":"10.32604","volume":"83","author":[{"given":"Jiahao","family":"Wu","sequence":"first","affiliation":[]},{"given":"Jinzhong","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Xiaoming","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Guan","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Jie","family":"Liu","sequence":"additional","affiliation":[]}],"member":"17807","published-online":{"date-parts":[[2025]]},"reference":[{"key":"ref1","unstructured":"Brown TB, Mann B, Ryder N. Language models are few-shot learners. arXiv:2005.14165. 2020."},{"key":"ref2","doi-asserted-by":"crossref","unstructured":"Alqaaidi SK, Bozorgi E. A survey on recent named entity recognition and relation classification methods with focus on few-shot learning approaches. arXiv:2310.19055. 2023.","DOI":"10.5220\/0012791600003756"},{"key":"ref3","unstructured":"Wei J, Wang X. Chain of thought prompting elicits reasoning in large language models. arXiv:2201.11903. 2022."},{"key":"ref4","first-page":"1753","article-title":"Evolution and prospects of foundation models: from large language models to large multimodal models","volume":"80","author":"Chen","year":"2024","journal-title":"Comput Mater Contin"},{"key":"ref5","series-title":"Proceedings of the ACM Turing Award Celebration Conference\u2014China 2023. ACM TURC \u201923","first-page":"3","article-title":"Causal inspired trustworthy machine learning","author":"Kuang","year":"2023"},{"key":"ref6","series-title":"Findings of the Association for Computational Linguistics: ACL 2023","first-page":"4631","article-title":"SEAG: structure-aware event causality generation","author":"Tao","year":"2023"},{"key":"ref7","unstructured":"Lin J, Zhou J, Chen Q. Causal intervention-based prompt debiasing for event argument extraction. arXiv:2210.01561. 2022."},{"key":"ref8","first-page":"2627","author":"Ren","year":"2023","journal-title":"Findings of the association for computational linguistics: EMNLP 2023"},{"key":"ref9","series-title":"Findings of the Association for Computational Linguistics: NAACL 2022","first-page":"2222","article-title":"A label-aware autoregressive framework for cross-domain NER","author":"Hu","year":"2022"},{"key":"ref10","series-title":"Findings of the Association for Computational Linguistics: EMNLP 2023","first-page":"15635","article-title":"Causal intervention-based few-shot named entity recognition","author":"Yang","year":"2023"},{"key":"ref11","doi-asserted-by":"crossref","unstructured":"Kalyan KS. A survey of GPT-3 family large language models including ChatGPT and GPT-4. arXiv:2310.12321. 2023.","DOI":"10.2139\/ssrn.4593895"},{"key":"ref12","series-title":"Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing","first-page":"479","article-title":"How transferable are neural networks in NLP applications?","author":"Mou","year":"2016"},{"key":"ref13","series-title":"Proceedings of the 29th International Conference on Computational Linguistics","first-page":"2147","article-title":"DoSEA: a domain-specific entity-aware framework for cross-domain named entity recogition","author":"Tang","year":"2022"},{"key":"ref14","series-title":"Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics","first-page":"5849","article-title":"A unified MRC framework for named entity recognition","author":"Li","year":"2020"},{"key":"ref15","series-title":"Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics","first-page":"8476","article-title":"Multi-domain named entity recognition with genre-aware and agnostic inference","author":"Wang","year":"2020"},{"key":"ref16","series-title":"Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","first-page":"6338","article-title":"CONTaiNER: few-shot named entity recognition via contrastive learning","author":"Das","year":"2022"},{"key":"ref17","series-title":"Findings of the Association for Computational Linguistics: ACL 2023","first-page":"3869","article-title":"Improving named entity recognition via bridge-based domain adaptation","author":"Xu","year":"2023"},{"key":"ref18","unstructured":"Han R, Yang C, Peng T. An empirical study on information extraction using large language models. arXiv:2305.14450. 2024."},{"key":"ref19","doi-asserted-by":"crossref","unstructured":"Qin C, Zhang A, Zhang Z. Is ChatGPT a general-purpose natural language processing task solver? arXiv:2302.06476. 2023.","DOI":"10.18653\/v1\/2023.emnlp-main.85"},{"key":"ref20","series-title":"Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","first-page":"14014","article-title":"Mitigating label biases for in-context learning","author":"Fei","year":"2023"},{"key":"ref21","series-title":"Findings of the Association for Computational Linguistics: EMNLP 2023","first-page":"15173","article-title":"A causal view of entity bias in (Large) language models","author":"Wang","year":"2023"},{"key":"ref22","unstructured":"Ye J, Xu N, Wang Y, Zhou J, Zhang Q, Gui T, et al. LLM-DA: data augmentation via large language models for few-shot named entity recognition. arXiv:2402.14568. 2024."},{"key":"ref23","series-title":"Findings of the Association for Computational Linguistics: EMNLP 2022","first-page":"4497","article-title":"Thinking about GPT-3 In-context learning for biomedical IE? Think again","author":"Jimenez Gutierrez","year":"2022"},{"key":"ref24","series-title":"Findings of the Association for Computational Linguistics: ACL 2023","first-page":"794","article-title":"Aligning instruction tasks unlocks large language models as zero-shot relation extractors","author":"Zhang","year":"2023"},{"key":"ref25","doi-asserted-by":"crossref","unstructured":"Tang K, Niu Y, Huang J. Unbiased scene graph generation from biased training. arXiv:2002.11949. 2020.","DOI":"10.1109\/CVPR42600.2020.00377"},{"key":"ref26","doi-asserted-by":"crossref","unstructured":"Lin Z, Ding H, Hoang NT. Pre-trained recommender systems: a causal debiasing perspective. arXiv:2310.19251. 2024.","DOI":"10.1145\/3616855.3635779"},{"key":"ref27","series-title":"Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","first-page":"5796","article-title":"Can prompt probe pretrained language models? Understanding the invisible risks from a causal view","author":"Cao","year":"2022"},{"key":"ref28","series-title":"Findings of the Association for Computational Linguistics: ACL 2022","first-page":"2670","article-title":"Cross-domain Named entity recognition via graph matching","author":"Zheng","year":"2022"},{"key":"ref29","series-title":"Proceedings of the Seventh Conference on Natural Language Learning at HLT-NAACL 2003","first-page":"142","article-title":"Introduction to the CoNLL-2003 Shared Task: language-Independent Named Entity Recognition","author":"Tjong Kim Sang","year":"2003"},{"key":"ref30","series-title":"Proceedings of the BioNLP Shared Task 2013 Workshop","first-page":"1","article-title":"Overview of BioNLP shared task 2013","author":"Bossy","year":"2013"},{"key":"ref31","series-title":"2013 IEEE International Conference on Acoustics, Speech and Signal Processing","first-page":"8386","article-title":"Asgard: a portable architecture for multilingual dialogue systems","author":"Liu","year":"2013"},{"key":"ref32","doi-asserted-by":"crossref","unstructured":"Liu Z, Xu Y, Yu T, Dai W, Ji Z, Cahyawijaya S, et al. CrossNER: evaluating cross-domain named entity recognition. arXiv:2012.04373. 2020.","DOI":"10.1609\/aaai.v35i15.17587"},{"key":"ref33","series-title":"Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers)","first-page":"4171","article-title":"BERT: pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2019"},{"key":"ref34","series-title":"Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics","first-page":"19","article-title":"Coach: a coarse-to-fine approach for cross-domain slot filling","author":"Liu","year":"2020"},{"key":"ref35","series-title":"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)","first-page":"6365","article-title":"Simple and effective few-shot named entity recognition with structured nearest neighbor learning","author":"Yang","year":"2020"},{"key":"ref36","series-title":"Proceedings of the 29th International Conference on Computational Linguistics","first-page":"2374","article-title":"LightNER: a lightweight tuning paradigm for low-resource NER via pluggable prompting","author":"Chen","year":"2022"},{"key":"ref37","series-title":"Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","first-page":"5721","article-title":"Template-free prompt tuning for few-shot NER","author":"Ma","year":"2022"},{"key":"ref38","doi-asserted-by":"crossref","unstructured":"Chen X, Li L, Qiao S. One model for all domains: collaborative domain-prefix tuning for cross-domain NER. arXiv:2301.10410. 2023.","DOI":"10.24963\/ijcai.2023\/559"},{"key":"ref39","doi-asserted-by":"crossref","unstructured":"Ma Y, Cao Y, Hong Y. Large language model is not a good few-shot information extractor, but a good reranker for hard samples! In: Findings of the Association for Computational Linguistics: EMNLP 2023; 2023; Singapore: Association for Computational Linguistics. p. 10572\u2013601.","DOI":"10.18653\/v1\/2023.findings-emnlp.710"}],"container-title":["Computers, Materials &amp; Continua"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/cdn.techscience.cn\/files\/cmc\/2025\/TSP_CMC-83-2\/TSP_CMC_61359\/TSP_CMC_61359.pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,17]],"date-time":"2025-11-17T01:06:27Z","timestamp":1763341587000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.techscience.com\/cmc\/v83n2\/60551"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":39,"journal-issue":{"issue":"2","published-online":{"date-parts":[[2025]]},"published-print":{"date-parts":[[2025]]}},"URL":"https:\/\/doi.org\/10.32604\/cmc.2025.061359","relation":{},"ISSN":["1546-2226"],"issn-type":[{"type":"electronic","value":"1546-2226"}],"subject":[],"published":{"date-parts":[[2025]]}}}