{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,16]],"date-time":"2025-11-16T14:13:36Z","timestamp":1763302416268,"version":"3.45.0"},"publisher-location":"Singapore","reference-count":29,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819533510","type":"print"},{"value":"9789819533527","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,11,17]],"date-time":"2025-11-17T00:00:00Z","timestamp":1763337600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,11,17]],"date-time":"2025-11-17T00:00:00Z","timestamp":1763337600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-3352-7_14","type":"book-chapter","created":{"date-parts":[[2025,11,16]],"date-time":"2025-11-16T14:09:28Z","timestamp":1763302168000},"page":"172-184","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Detecting and\u00a0Correcting Hallucinations in\u00a0LLMs via\u00a0Substantive Uncertainty and\u00a0Iterative Validation"],"prefix":"10.1007","author":[{"given":"Zheng","family":"Chen","sequence":"first","affiliation":[]},{"given":"Yijie","family":"Cheng","sequence":"additional","affiliation":[]},{"given":"Yuxin","family":"Gao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,17]]},"reference":[{"key":"14_CR1","unstructured":"Carlini, N., Tramer, F., Wallace, E.e.a.: Extracting training data from large language models. arXiv preprint arXiv:2012.07805 (2021)"},{"key":"14_CR2","doi-asserted-by":"crossref","unstructured":"Chiang, D., Cholak, P.: Overcoming a theoretical limitation of self-attention. In: Proceedings of ACL 2022, pp. 7654\u20137664 (2022)","DOI":"10.18653\/v1\/2022.acl-long.527"},{"key":"14_CR3","doi-asserted-by":"crossref","unstructured":"Filippova, K.: Controlled hallucinations: learning to generate faithfully from noisy data. In: Findings of EMNLP 2020, pp. 864\u2013870 (2020)","DOI":"10.18653\/v1\/2020.findings-emnlp.76"},{"key":"14_CR4","doi-asserted-by":"publisher","first-page":"156","DOI":"10.1162\/tacl_a_00306","volume":"8","author":"M Hahn","year":"2020","unstructured":"Hahn, M.: Theoretical limitations of self-attention in neural sequence models. Trans. Assoc. Comput. Linguist. 8, 156\u2013171 (2020)","journal-title":"Trans. Assoc. Comput. Linguist."},{"key":"14_CR5","doi-asserted-by":"crossref","unstructured":"Ji, Z.e.a.: Survey of hallucination in natural language generation. ACM Comput. Surv. 55(12), 1\u201338 (2023)","DOI":"10.1145\/3571730"},{"key":"14_CR6","unstructured":"Kadavath, S.e.a.: Language models (mostly) know what they know. arXiv preprint arXiv:2207.05221 (2022)"},{"key":"14_CR7","unstructured":"Kasai, J.e.a.: Realtime qa: What\u2019s the answer right now? arXiv preprint arXiv:2207.13332 (2022)"},{"key":"14_CR8","doi-asserted-by":"crossref","unstructured":"Katz, D.M.e.a.: Gpt-4 passes the bar exam. Philos. Trans. R. Soc. A 382(2261), 20230254 (2024)","DOI":"10.1098\/rsta.2023.0254"},{"key":"14_CR9","doi-asserted-by":"crossref","unstructured":"Li, D.e.a.: Large language models with controllable working memory. In: Findings of ACL 2023, pp. 1774\u20131793 (2023)","DOI":"10.18653\/v1\/2023.findings-acl.112"},{"key":"14_CR10","unstructured":"Li, W.e.a.: Faithfulness in natural language generation: a systematic survey. arXiv preprint arXiv:2203.05227 (2022)"},{"key":"14_CR11","unstructured":"Li, Y.e.a.: Chatdoctor: a medical chat model fine-tuned on llama using domain knowledge. Cureus 15(7), e40895 (2023)"},{"key":"14_CR12","unstructured":"Liang, X.e.a.: Uhgeval: benchmarking hallucination in Chinese LLMs via unconstrained generation. In: Proceedings of ACL 2024 (2024)"},{"key":"14_CR13","doi-asserted-by":"crossref","unstructured":"Lin, S.e.a.: Truthfulqa: measuring how models mimic human falsehoods. In: Proceedings of ACL 2022, pp. 3214\u20133252 (2022)","DOI":"10.18653\/v1\/2022.acl-long.229"},{"key":"14_CR14","doi-asserted-by":"crossref","unstructured":"Luo, J.e.a.: Zero-resource hallucination prevention for large language models. In: Findings of EMNLP 2024 (2024)","DOI":"10.18653\/v1\/2024.findings-emnlp.204"},{"key":"14_CR15","doi-asserted-by":"crossref","unstructured":"Maynez, J.e.a.: On faithfulness and factuality in abstractive summarization. In: Proceedings of ACL 2020, pp. 1906\u20131919 (2020)","DOI":"10.18653\/v1\/2020.acl-main.173"},{"key":"14_CR16","unstructured":"Min, S.e.a.: Factscore: factual consistency evaluation via retrieval and entailment. In: Proceedings of EMNLP 2023 (2023)"},{"key":"14_CR17","doi-asserted-by":"crossref","unstructured":"Onoe, Y.e.a.: Entity cloze by date: what LMS know about unseen entities. In: Findings of NAACL 2022, pp. 693\u2013702 (2022)","DOI":"10.18653\/v1\/2022.findings-naacl.52"},{"key":"14_CR18","doi-asserted-by":"crossref","unstructured":"Patel, A.e.a.: Are NLP models really able to solve simple math word problems? In: Proceedings of NAACL 2021 (2021)","DOI":"10.18653\/v1\/2021.naacl-main.168"},{"key":"14_CR19","unstructured":"Sharma, M.e.a.: Towards understanding sycophancy in language models. In: Proceedings of ICLR 2024 (2024)"},{"key":"14_CR20","unstructured":"Singhal, K.e.a.: Towards expert-level medical question answering with large language models. arXiv preprint arXiv:2305.09617 (2023)"},{"key":"14_CR21","doi-asserted-by":"crossref","unstructured":"Su, W.e.a.: Unsupervised real-time hallucination detection from LLM internal states. In: Findings of ACL 2024, pp. 14379\u201314391 (2024)","DOI":"10.18653\/v1\/2024.findings-acl.854"},{"key":"14_CR22","doi-asserted-by":"crossref","unstructured":"Wang, X.e.a.: Hallucination detection for generative LLMs by Bayesian sequential estimation. In: Proceedings of EMNLP 2023,d pp. 15361\u201315371 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.949"},{"key":"14_CR23","unstructured":"Wei, J.e.a.: Simple synthetic data reduces sycophancy in large language models. arXiv preprint arXiv:2308.03958 (2024)"},{"key":"14_CR24","unstructured":"Weidinger, L.e.a.: Ethical and social risks of harm from language models. arXiv preprint arXiv:2112.04359 (2021)"},{"key":"14_CR25","unstructured":"Yao, J.Y.e.a.: LLM lies: Hallucinations are not bugs, but features as adversarial examples. arXiv preprint arXiv:2310.01469 (2023)"},{"key":"14_CR26","unstructured":"Yu, F.e.a.: Legal prompting: teaching a language model to think like a lawyer. In: Proceedings of NLLP 2022, pp. 1\u201312 (2022)"},{"key":"14_CR27","unstructured":"Zhang, R.e.a.: Vl-uncertainty: detecting hallucination in vision-language models via uncertainty estimation. arXiv preprint arXiv:2411.11919 (2024)"},{"key":"14_CR28","doi-asserted-by":"crossref","unstructured":"Zhang, T.e.a.: Enhancing uncertainty-based hallucination detection with stronger focus. In: Proceedings of EMNLP 2023, pp. 915\u2013932 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.58"},{"key":"14_CR29","unstructured":"Zhang, T.e.a.: Bertscore: evaluating text generation with bert. In: Proceedings of ICLR 2020 (2020)"}],"container-title":["Lecture Notes in Computer Science","Natural Language Processing and Chinese Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-3352-7_14","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,16]],"date-time":"2025-11-16T14:09:33Z","timestamp":1763302173000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-3352-7_14"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,17]]},"ISBN":["9789819533510","9789819533527"],"references-count":29,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-3352-7_14","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,17]]},"assertion":[{"value":"17 November 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"NLPCC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"CCF International Conference on Natural Language Processing and Chinese Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Urumqi","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"7 August 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"9 August 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"nlpcc2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/tcci.ccf.org.cn\/conference\/2025\/index.php","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}