{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,16]],"date-time":"2026-03-16T10:54:49Z","timestamp":1773658489425,"version":"3.50.1"},"reference-count":40,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T00:00:00Z","timestamp":1769644800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T00:00:00Z","timestamp":1769644800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100021171","name":"Guangdong Basic and Applied Basic Research Foundation","doi-asserted-by":"crossref","award":["2023A1515011370"],"award-info":[{"award-number":["2023A1515011370"]}],"id":[{"id":"10.13039\/501100021171","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["32371114"],"award-info":[{"award-number":["32371114"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Characteristic Innovation Projects of Guangdong Colleges and Universities","award":["2018KTSCX049"],"award-info":[{"award-number":["2018KTSCX049"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int. J. Mach. Learn. &amp; Cyber."],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1007\/s13042-025-02849-w","type":"journal-article","created":{"date-parts":[[2026,1,29]],"date-time":"2026-01-29T14:44:00Z","timestamp":1769697840000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Chain of evidence for few-shot NER with large language models"],"prefix":"10.1007","volume":"17","author":[{"given":"Chengyan","family":"Wu","sequence":"first","affiliation":[]},{"given":"Qi","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Yun","family":"Xue","sequence":"additional","affiliation":[]},{"given":"Hongya","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,29]]},"reference":[{"key":"2849_CR1","doi-asserted-by":"crossref","unstructured":"Fritzler A, Logacheva V, Kretov M (2019) Few-shot classification in named entity recognition task. In: Proceedings of the 34th ACM\/SIGAPP symposium on applied computing, pp 993\u20131000","DOI":"10.1145\/3297280.3297378"},{"key":"2849_CR2","doi-asserted-by":"crossref","unstructured":"Yang Y, Katiyar A (2020) Simple and effective few-shot named entity recognition with structured nearest neighbor learning. In: Proceedings of the 2020 conference on empirical methods in natural language processing (EMNLP), pp 6365\u20136375","DOI":"10.18653\/v1\/2020.emnlp-main.516"},{"key":"2849_CR3","doi-asserted-by":"crossref","unstructured":"Li Y, Yu Y, Qian T (2023) Type-aware decomposed framework for few-shot named entity recognition. In: Findings of the association for computational linguistics (EMNLP), pp 8911\u20138927","DOI":"10.18653\/v1\/2023.findings-emnlp.598"},{"key":"2849_CR4","doi-asserted-by":"crossref","unstructured":"Ma T, Jiang H, Wu Q, Zhao T, Lin C (2022) Decomposed meta-learning for few-shot named entity recognition. In: Findings of the association for computational linguistics (ACL), pp 1584\u20131596","DOI":"10.18653\/v1\/2022.findings-acl.124"},{"key":"2849_CR5","doi-asserted-by":"crossref","unstructured":"Qin C, Zhang A, Zhang Z, Chen J, Yasunaga M, Yang D (2023) Is chatgpt a general-purpose natural language processing task solver? In: Proceedings of the 2023 conference on empirical methods in natural language processing (EMNLP), pp 1339\u20131384","DOI":"10.18653\/v1\/2023.emnlp-main.85"},{"key":"2849_CR6","doi-asserted-by":"crossref","unstructured":"Liang Y, Wang J, Zhu H, Wang L, Qian W, Lan Y (2023) Prompting large language models with chain-of-thought for few-shot knowledge base question generation. In: Proceedings of the 2023 conference on empirical methods in natural language processing (EMNLP), pp 4329\u20134343","DOI":"10.18653\/v1\/2023.emnlp-main.263"},{"key":"2849_CR7","doi-asserted-by":"crossref","unstructured":"Min S, Lyu X, Holtzman A, Artetxe M, Lewis M, Hajishirzi H, Zettlemoyer L (2022) Rethinking the role of demonstrations: What makes in-context learning work? In: Proceedings of the 2022 conference on empirical methods in natural language processing (EMNLP), pp 11048\u201311064","DOI":"10.18653\/v1\/2022.emnlp-main.759"},{"key":"2849_CR8","doi-asserted-by":"crossref","unstructured":"Xie T, Li Q, Zhang J, Zhang Y, Liu Z, Wang H (2023) Empirical study of zero-shot NER with chatgpt. In: Proceedings of the 2023 conference on empirical methods in natural language processing (EMNLP), pp 7935\u20137956","DOI":"10.18653\/v1\/2023.emnlp-main.493"},{"key":"2849_CR9","first-page":"24824","volume":"35","author":"J Wei","year":"2022","unstructured":"Wei J, Wang X, Schuurmans D, Bosma M, Xia F, Chi E, Le QV, Zhou D et al (2022) Chain-of-thought prompting elicits reasoning in large language models. Adv Neural Inf Process Syst 35:24824\u201324837","journal-title":"Adv Neural Inf Process Syst"},{"key":"2849_CR10","first-page":"22199","volume":"35","author":"T Kojima","year":"2022","unstructured":"Kojima T, Gu SS, Reid M, Matsuo Y, Iwasawa Y (2022) Large language models are zero-shot reasoners. Adv Neural Inf Process Syst 35:22199\u201322213","journal-title":"Adv Neural Inf Process Syst"},{"key":"2849_CR11","doi-asserted-by":"publisher","unstructured":"Wu Y, Han X, Song W, Cheng M, Li F (2024) Mindmap: Constructing evidence chains for multi-step reasoning in large language models. In: Wooldridge MJ, Dy JG, Natarajan S (eds) Thirty-eighth AAAI conference on artificial intelligence, AAAI 2024, thirty-sixth conference on innovative applications of artificial intelligence, IAAI 2024, fourteenth symposium on educational advances in artificial intelligence, EAAI 2014, February 20\u201327, 2024, Vancouver, Canada, pp 19270\u201319278. AAAI Press. https:\/\/doi.org\/10.1609\/AAAI.V38I17.29896","DOI":"10.1609\/AAAI.V38I17.29896"},{"key":"2849_CR12","doi-asserted-by":"crossref","unstructured":"Ma X, Li J, Zhang M (2023) Chain of thought with explicit evidence reasoning for few-shot relation extraction. In: Findings of the association for computational linguistics (EMNLP), pp 2334\u20132352","DOI":"10.18653\/v1\/2023.findings-emnlp.153"},{"key":"2849_CR13","doi-asserted-by":"crossref","unstructured":"Josifoski M, Sakota M, Peyrard M, West R (2023) Exploiting asymmetry for synthetic training data generation: Synthie and the case of information extraction. In: Proceedings of the 2023 conference on empirical methods in natural language processing (EMNLP), pp 1555\u20131574","DOI":"10.18653\/v1\/2023.emnlp-main.96"},{"key":"2849_CR14","doi-asserted-by":"crossref","unstructured":"Wan Z, Cheng F, Mao Z, Liu Q, Song H, Li J, Kurohashi S (2023) GPT-RE: in-context learning for relation extraction using large language models. In: Proceedings of the 2023 conference on empirical methods in natural language processing (EMNLP), pp 3534\u20133547","DOI":"10.18653\/v1\/2023.emnlp-main.214"},{"key":"2849_CR15","doi-asserted-by":"crossref","unstructured":"Cui L, Wu Y, Liu J, Yang S, Zhang Y (2021) Template-based named entity recognition using BART. In: Findings of the association for computational linguistics (ACL\/IJCNLP), pp 1835\u20131845","DOI":"10.18653\/v1\/2021.findings-acl.161"},{"key":"2849_CR16","unstructured":"Huang Y, He K, Wang Y, Zhang X, Gong T, Mao R, Li C (2022) COPNER: contrastive learning with prompt guiding for few-shot named entity recognition. In: Proceedings of the 29th international conference on computational linguistics (COLING), pp 2515\u20132527"},{"key":"2849_CR17","doi-asserted-by":"crossref","unstructured":"Lee D, Kadakia A, Tan K, Agarwal M, Feng X, Shibuya T, Mitani R, Sekiya T, Pujara J, Ren X (2022) Good examples make A faster learner: Simple demonstration-based learning for low-resource NER. In: Proceedings of the 60th annual meeting of the association for computational linguistics (ACL), pp 2687\u20132700","DOI":"10.18653\/v1\/2022.acl-long.192"},{"key":"2849_CR18","unstructured":"Snell J, Swersky K, Zemel R (2017) Prototypical networks for few-shot learning. Adv Neural Inform Process Syst 30"},{"key":"2849_CR19","unstructured":"Ji B, Li S, Gan S, Yu J, Ma J, Liu H, Yang J (2022) Few-shot named entity recognition with entity-level prototypical network enhanced by dispersedly distributed prototypes. In: Proceedings of the 29th international conference on computational linguistics (COLING), pp 1842\u20131854"},{"key":"2849_CR20","doi-asserted-by":"crossref","unstructured":"Das SSS, Katiyar A, Passonneau RJ, Zhang R (2022) Container: Few-shot named entity recognition via contrastive learning. In: Proceedings of the 60th annual meeting of the association for computational linguistics (ACL), pp 6338\u20136353","DOI":"10.18653\/v1\/2022.acl-long.439"},{"key":"2849_CR21","doi-asserted-by":"crossref","unstructured":"Hou Y, Che W, Lai Y, Zhou Z, Liu Y, Liu H, Liu T (2020) Few-shot slot tagging with collapsed dependency transfer and label-enhanced task-adaptive projection network. In: Proceedings of the 58th annual meeting of the association for computational linguistics (ACL), pp 1381\u20131393","DOI":"10.18653\/v1\/2020.acl-main.128"},{"key":"2849_CR22","unstructured":"Ji B (2023) Vicunaner: Zero\/few-shot named entity recognition using vicuna. Preprint arXiv:2305.03253"},{"key":"2849_CR23","unstructured":"Smith S, Patwary M, et al (2022) Using deepspeed and megatron to train megatron-turing nlg 530b, a large-scale generative language model. Preprint arXiv:2201.11990"},{"key":"2849_CR24","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown T, Mann B, Ryder N, Subbiah M, Kaplan JD, Dhariwal P, Neelakantan A, Shyam P, Sastry G, Askell A et al (2020) Language models are few-shot learners. Adv Neural Inf Process Syst 33:1877\u20131901","journal-title":"Adv Neural Inf Process Syst"},{"key":"2849_CR25","unstructured":"Rae JW, Borgeaud S, Cai T, Millican K, Hoffmann J, Song F, Aslanides J, Henderson S, Ring R, Young S, et al (2021) Scaling language models: methods, analysis and insights from training gopher. Preprint arXiv:2112.11446"},{"issue":"240","key":"2849_CR26","first-page":"1","volume":"24","author":"A Chowdhery","year":"2023","unstructured":"Chowdhery A, Narang S et al (2023) Palm: scaling language modeling with pathways. J Mach Learn Res 24(240):1\u2013113","journal-title":"J Mach Learn Res"},{"key":"2849_CR27","doi-asserted-by":"crossref","unstructured":"Roberts A, Raffel C, Shazeer N (2020) How much knowledge can you pack into the parameters of a language model? In: Proceedings of the 2020 conference on empirical methods in natural language processing (EMNLP), pp 5418\u20135426","DOI":"10.18653\/v1\/2020.emnlp-main.437"},{"key":"2849_CR28","unstructured":"Raffel C, Shazeer N, Roberts, et al (2020) Exploring the limits of transfer learning with a unified text-to-text transformer. J Mach Learn Res 21(140):1\u201367"},{"key":"2849_CR29","unstructured":"Wang S, Sun X, Li X, Ouyang R, Wu F, Zhang T, Li J, Wang G (2023) Gpt-ner: named entity recognition via large language models. Preprint arXiv:2304.10428"},{"key":"2849_CR30","unstructured":"Ashok D, Lipton ZC (2023) Promptner: Prompting for named entity recognition. Preprint arXiv:2305.15444"},{"key":"2849_CR31","doi-asserted-by":"crossref","unstructured":"Bogdanov S, Constantin A, Bernard T, Crabb\u00e9 B, Bernard E (2024) Nuner: Entity recognition encoder pre-training via llm-annotated data. In: Al-Onaizan Y, Bansal M, Chen Y (eds) Proceedings of the 2024 conference on empirical methods in natural language processing, EMNLP pp 11829\u201311841","DOI":"10.18653\/v1\/2024.emnlp-main.660"},{"key":"2849_CR32","doi-asserted-by":"crossref","unstructured":"Wang S, Sun X, Li X, Ouyang R, Wu F, Zhang T, Li J, Wang G, Guo C (2025) GPT-NER: Named entity recognition via large language models. In: Chiruzzo L, Ritter A, Wang L (eds) Findings of the association for computational linguistics: NAACL 2025, pp 4257\u20134275. Association for computational linguistics, Albuquerque, New Mexico. https:\/\/aclanthology.org\/2025.findings-naacl.239\/","DOI":"10.18653\/v1\/2025.findings-naacl.239"},{"key":"2849_CR33","doi-asserted-by":"crossref","unstructured":"Liu J, Shen D, Zhang Y, Dolan B, Carin L, Chen W (2022) What makes good in-context examples for gpt-3? In: Proceedings of deep learning inside out: The 3rd workshop on knowledge extraction and integration for deep learning architectures, DeeLIO@ACL, pp 100\u2013114","DOI":"10.18653\/v1\/2022.deelio-1.10"},{"key":"2849_CR34","doi-asserted-by":"crossref","unstructured":"Lu Y, Bartolo M, Moore A, Riedel S, Stenetorp P (2022) Fantastically ordered prompts and where to find them: Overcoming few-shot prompt order sensitivity. In: Proceedings of the 60th annual meeting of the association for computational linguistics (ACL), pp 8086\u20138098","DOI":"10.18653\/v1\/2022.acl-long.556"},{"key":"2849_CR35","unstructured":"Xie T, Li Q, Zhang Y, Liu Z, Wang H (2023) Self-improving for zero-shot named entity recognition with large language models. Preprint arXiv:2311.08921"},{"key":"2849_CR36","unstructured":"Zhang Z, Zhang A, Li M, Smola A (2023) Automatic chain of thought prompting in large language models. In: The eleventh international conference on learning representations (ICLR)"},{"key":"2849_CR37","doi-asserted-by":"crossref","unstructured":"Ma J, Ballesteros M, Doss S, Anubhai R, Mallya S, Al-Onaizan Y, Roth D (2022) Label semantics for few shot named entity recognition. In: Findings of the association for computational linguistics (ACL), pp 1956\u20131971","DOI":"10.18653\/v1\/2022.findings-acl.155"},{"key":"2849_CR38","doi-asserted-by":"crossref","unstructured":"Ding N, Xu G, Chen Y, Wang X, Han X, Xie P, Zheng H, Liu Z (2021) Few-nerd: A few-shot named entity recognition dataset. In: Proceedings of the 59th annual meeting of the association for computational linguistics and the 11th international joint conference on natural language processing (ACL\/IJCNLP), pp 3198\u20133213","DOI":"10.18653\/v1\/2021.acl-long.248"},{"key":"2849_CR39","doi-asserted-by":"crossref","unstructured":"Sang EFTK, Meulder FD (2003) Introduction to the conll-2003 shared task: language-independent named entity recognition. In: Proceedings of the seventh conference on natural language learning (CoNLL), pp 142\u2013147","DOI":"10.3115\/1119176.1119195"},{"key":"2849_CR40","unstructured":"AI@Meta: Llama 3.1 Model Card (2024) https:\/\/github.com\/meta-llama\/llama-models\/blob\/main\/models\/llama3_1MODEL_CARD.md"}],"container-title":["International Journal of Machine Learning and Cybernetics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-025-02849-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s13042-025-02849-w","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-025-02849-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,16]],"date-time":"2026-03-16T09:55:59Z","timestamp":1773654959000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s13042-025-02849-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,1,29]]},"references-count":40,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2026,2]]}},"alternative-id":["2849"],"URL":"https:\/\/doi.org\/10.1007\/s13042-025-02849-w","relation":{},"ISSN":["1868-8071","1868-808X"],"issn-type":[{"value":"1868-8071","type":"print"},{"value":"1868-808X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,1,29]]},"assertion":[{"value":"29 December 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 November 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 January 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"This article does not contain any studies with human participants or animals performed by any of the authors.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}}],"article-number":"57"}}