{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,22]],"date-time":"2026-01-22T03:21:40Z","timestamp":1769052100552,"version":"3.49.0"},"reference-count":50,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2025,2,19]],"date-time":"2025-02-19T00:00:00Z","timestamp":1739923200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,2,19]],"date-time":"2025-02-19T00:00:00Z","timestamp":1739923200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Complex Intell. Syst."],"published-print":{"date-parts":[[2025,3]]},"DOI":"10.1007\/s40747-025-01795-y","type":"journal-article","created":{"date-parts":[[2025,2,19]],"date-time":"2025-02-19T14:42:52Z","timestamp":1739976172000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Cl2sum: abstractive summarization via contrastive prompt constructed by LLMs hallucination"],"prefix":"10.1007","volume":"11","author":[{"given":"Xiang","family":"Huang","sequence":"first","affiliation":[]},{"given":"Qiong","family":"Nong","sequence":"additional","affiliation":[]},{"given":"Xiaobo","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Hongcheng","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Kunpeng","family":"Du","sequence":"additional","affiliation":[]},{"given":"Chunlin","family":"Yin","sequence":"additional","affiliation":[]},{"given":"Li","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Bin","family":"Yan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2929-2126","authenticated-orcid":false,"given":"Xuan","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,19]]},"reference":[{"issue":"4","key":"1795_CR1","doi-asserted-by":"publisher","first-page":"399","DOI":"10.1162\/089120102762671927","volume":"28","author":"D Radev","year":"2002","unstructured":"Radev D, Hovy E, McKeown K (2002) Introduction to the special issue on summarization. Comput Linguist 28(4):399\u2013408","journal-title":"Comput Linguist"},{"key":"1795_CR2","first-page":"27730","volume":"35","author":"L Ouyang","year":"2022","unstructured":"Ouyang L, Wu J, Jiang X, Almeida D, Wainwright C, Mishkin P, Zhang C, Agarwal S, Slama K, Ray A (2022) Training language models to follow instructions with human feedback. Adv Neural Inf Process Syst 35:27730\u201327744","journal-title":"Adv Neural Inf Process Syst"},{"key":"1795_CR3","unstructured":"Zeng A, Liu X, Du Z, Wang Z, Lai H, Ding M, Yang Z, Xu Y, Zheng W, Xia X et al (2022) Glm-130b: an open bilingual pre-trained model. arXiv preprint arXiv:2210.02414"},{"key":"1795_CR4","unstructured":"Yang X, Li Y, Zhang X, Chen H, Cheng W (2023) Exploring the limits of ChatGPT for query or aspect-based text summarization. arXiv preprint arXiv:2302.08081"},{"key":"1795_CR5","unstructured":"Lin C-Y (2004) Rouge: a package for automatic evaluation of summaries. In: Text summarization branches out, pp 74\u201381"},{"key":"1795_CR6","doi-asserted-by":"publisher","unstructured":"Van\u00a0Veen D, Van\u00a0Uden C, Blankemeier L, Delbrouck J-B, Aali A, Bluethgen C, Pareek A, Polacin M, Reis EP, Seehofnerov\u00e1 A, Rohatgi N, Hosamani P, Collins W, Ahuja N, Langlotz CP, Hom J, Gatidis S, Pauly J, Chaudhari AS (2024) Clinical text summarization: adapting large language models can outperform human experts. Nat Med 30(4):1134\u20131142. doi: https:\/\/doi.org\/10.1038\/s41591-024-02855-5","DOI":"10.1038\/s41591-024-02855-5"},{"key":"1795_CR7","doi-asserted-by":"crossref","unstructured":"Laskar MTR, Fu X-Y, Chen C, Tn SB (2023) Building real-world meeting summarization systems using large language models: a practical perspective. arXiv preprint arXiv:2310.19233","DOI":"10.18653\/v1\/2023.emnlp-industry.33"},{"key":"1795_CR8","unstructured":"Tonmoy S, Zaman S, Jain V, Rani A, Rawte V, Chadha A, Das A (2024) A comprehensive survey of hallucination mitigation techniques in large language models. arXiv preprint arXiv:2401.01313"},{"key":"1795_CR9","unstructured":"Zhang Y, Li Y, Cui L, Cai D, Liu L, Fu T, Huang X, Zhao E, Zhang Y, Chen Y et al. (2023) Siren\u2019s song in the AI ocean: a survey on hallucination in large language models. arXiv preprint arXiv:2309.01219"},{"key":"1795_CR10","doi-asserted-by":"crossref","unstructured":"Bouyamourn A (2023) Why llms hallucinate, and how to get (evidential) closure: perceptual, intensional, and extensional learning for faithful natural language generation. arXiv preprint arXiv:2310.15355","DOI":"10.18653\/v1\/2023.emnlp-main.192"},{"key":"1795_CR11","unstructured":"Wei J, Tay Y, Bommasani R, Raffel C, Zoph B, Borgeaud S, Yogatama D, Bosma M, Zhou D, Metzler D et al (2022) Emergent abilities of large language models. arXiv preprint arXiv:2206.07682"},{"key":"1795_CR12","first-page":"24824","volume":"35","author":"J Wei","year":"2022","unstructured":"Wei J, Wang X, Schuurmans D, Bosma M, Xia F, Chi E, Le QV, Zhou D (2022) Chain-of-thought prompting elicits reasoning in large language models. Adv Neural Inf Process Syst 35:24824\u201324837","journal-title":"Adv Neural Inf Process Syst"},{"key":"1795_CR13","unstructured":"Dong Q, Li L, Dai D, Zheng C, Wu Z, Chang B, Sun X, Xu J, Sui Z (2022) A survey on in-context learning. arXiv preprint arXiv:2301.00234"},{"key":"1795_CR14","doi-asserted-by":"crossref","unstructured":"Hu J, Li Z, Chen Z, Li Z, Wan X, Chang T-H (2022) Graph enhanced contrastive learning for radiology findings summarization. arXiv preprint arXiv:2204.00203","DOI":"10.18653\/v1\/2022.acl-long.320"},{"key":"1795_CR15","doi-asserted-by":"crossref","unstructured":"Xu S, Zhang X, Wu Y, Wei F (2022) Sequence level contrastive learning for text summarization. In: Proceedings of the AAAI conference on artificial intelligence, vol 36, pp 11556\u201311565","DOI":"10.1609\/aaai.v36i10.21409"},{"key":"1795_CR16","doi-asserted-by":"crossref","unstructured":"Liu Y, Liu P (2021) Simcls: a simple framework for contrastive learning of abstractive summarization. arXiv preprint arXiv:2106.01890","DOI":"10.18653\/v1\/2021.acl-short.135"},{"key":"1795_CR17","unstructured":"Xie Q, Huang J, Saha T, Ananiadou S (2022) Gretel: graph contrastive topic enhanced language model for long document extractive summarization. arXiv preprint arXiv:2208.09982"},{"key":"1795_CR18","unstructured":"Devlin J, Chang M-W, Lee K, Toutanova K (2018) Bert: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805"},{"key":"1795_CR19","doi-asserted-by":"crossref","unstructured":"Lewis M, Liu Y, Goyal N, Ghazvininejad M, Mohamed A, Levy O, Stoyanov V, Zettlemoyer L (2019) Bart: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"1795_CR20","unstructured":"Zhang J, Zhao Y, Saleh M, Liu P (2020) Pegasus: pre-training with extracted gap-sentences for abstractive summarization. In: International conference on machine learning. PMLR, pp 11328\u201311339"},{"key":"1795_CR21","unstructured":"Song K, Tan X, Qin T, Lu J, Liu T-Y (2019) Mass: masked sequence to sequence pre-training for language generation. arXiv preprint arXiv:1905.02450"},{"key":"1795_CR22","doi-asserted-by":"crossref","unstructured":"Kry\u015bci\u0144ski W, McCann B, Xiong C, Socher R (2019) Evaluating the factual consistency of abstractive text summarization. arXiv preprint arXiv:1910.12840","DOI":"10.18653\/v1\/2020.emnlp-main.750"},{"key":"1795_CR23","doi-asserted-by":"crossref","unstructured":"Islam SOU, \u0160krjanec I, Du\u0161ek O, Demberg V (2023) Tackling hallucinations in neural chart summarization. In: Proceedings of the 16th international natural language generation conference, pp 414\u2013423","DOI":"10.18653\/v1\/2023.inlg-main.30"},{"key":"1795_CR24","doi-asserted-by":"crossref","unstructured":"Akani E, Favre B, Bechet F, Gemignani R (2023) Reducing named entity hallucination risk to ensure faithful summary generation. In: Proceedings of the 16th international natural language generation conference, pp 437\u2013442","DOI":"10.18653\/v1\/2023.inlg-main.33"},{"key":"1795_CR25","doi-asserted-by":"crossref","unstructured":"Shi W, Han X, Lewis M, Tsvetkov Y, Zettlemoyer L, Yih SW (2023) Trusting your evidence: hallucinate less with context-aware decoding. arXiv preprint arXiv:2305.14739","DOI":"10.18653\/v1\/2024.naacl-short.69"},{"key":"1795_CR26","doi-asserted-by":"crossref","unstructured":"Cao Z, Wei F, Li W, Li S (2018) Faithful to the original: fact aware neural abstractive summarization. In: Proceedings of the AAAI conference on artificial intelligence, vol 32","DOI":"10.1609\/aaai.v32i1.11912"},{"key":"1795_CR27","doi-asserted-by":"crossref","unstructured":"Cao M, Dong Y, Wu J, Cheung JCK (2020) Factual error correction for abstractive summarization models. arXiv preprint arXiv:2010.08712","DOI":"10.18653\/v1\/2020.emnlp-main.506"},{"key":"1795_CR28","doi-asserted-by":"crossref","unstructured":"Cao S, Wang L (2021) Cliff: contrastive learning for improving faithfulness and factuality in abstractive summarization. arXiv preprint arXiv:2109.09209","DOI":"10.18653\/v1\/2021.emnlp-main.532"},{"key":"1795_CR29","unstructured":"Xie Q, Zhou J, Peng Y, Wang F (2023) Factreranker: fact-guided reranker for faithful radiology report summarization. arXiv preprint arXiv:2303.08335"},{"key":"1795_CR30","doi-asserted-by":"crossref","unstructured":"Mishra N, Sahu G, Calixto I, Abu-Hanna A, Laradji IH (2023) LLM aided semi-supervision for extractive dialog summarization. arXiv preprint arXiv:2311.11462","DOI":"10.18653\/v1\/2023.findings-emnlp.670"},{"key":"1795_CR31","doi-asserted-by":"crossref","unstructured":"Liu Y, Shi K, He KS, Ye L, Fabbri AR, Liu P, Radev D, Cohan A (2023) On learning to summarize with large language models as references. arXiv preprint arXiv:2305.14239","DOI":"10.18653\/v1\/2024.naacl-long.478"},{"key":"1795_CR32","doi-asserted-by":"crossref","unstructured":"Fetahu B, Chen Z, Rokhlenko O, Malmasi S (2023) InstructPTS: instruction-tuning LLMs for product title summarization. arXiv preprint arXiv:2310.16361","DOI":"10.18653\/v1\/2023.emnlp-industry.63"},{"key":"1795_CR33","unstructured":"Jones E, Palangi H, Sim\u00f5es C, Chandrasekaran V, Mukherjee S, Mitra A, Awadallah A, Kamar E (2023) Teaching language models to hallucinate less with synthetic tasks. arXiv preprint arXiv:2310.06827"},{"key":"1795_CR34","doi-asserted-by":"crossref","unstructured":"Zhang H, Liu X, Zhang J (2023) Extractive summarization via ChatGPT for faithful summary generation. arXiv preprint arXiv:2304.04193","DOI":"10.18653\/v1\/2023.findings-emnlp.214"},{"key":"1795_CR35","doi-asserted-by":"crossref","unstructured":"Zhang H, Liu X, Zhang J (2023) Summit: iterative text summarization via ChatGPT. arXiv preprint arXiv:2305.14835","DOI":"10.18653\/v1\/2023.findings-emnlp.714"},{"key":"1795_CR36","doi-asserted-by":"crossref","unstructured":"Hu J, Li Z, Chen Z, Li Z, Wan X, Chang T-H (2022) Graph enhanced contrastive learning for radiology findings summarization. arXiv preprint arXiv:2204.00203","DOI":"10.18653\/v1\/2022.acl-long.320"},{"key":"1795_CR37","first-page":"21271","volume":"33","author":"J-B Grill","year":"2020","unstructured":"Grill J-B, Strub F, Altch\u00e9 F, Tallec C, Richemond P, Buchatskaya E, Doersch C, Avila Pires B, Guo Z, Gheshlaghi Azar M (2020) Bootstrap your own latent-a new approach to self-supervised learning. Adv Neural Inf Process Syst 33:21271\u201321284","journal-title":"Adv Neural Inf Process Syst"},{"key":"1795_CR38","unstructured":"Meng Y, Michalski M, Huang J, Zhang Y, Abdelzaher T, Han J (2023) Tuning language models as training data generators for augmentation-enhanced few-shot learning. In: International conference on machine learning. PMLR, pp 24457\u201324477"},{"key":"1795_CR39","unstructured":"Gero Z, Singh C, Cheng H, Naumann T, Galley M, Gao J, Poon H (2023) Self-verification improves few-shot clinical information extraction. arXiv preprint arXiv:2306.00024"},{"key":"1795_CR40","unstructured":"Ma C, Wu Z, Wang J, Xu S, Wei Y, Liu Z, Jiang X, Guo L, Cai X, Zhang S et al (2023) Impressiongpt: an iterative optimizing framework for radiology report summarization with ChatGPT. arxiv 2023. arXiv preprint arXiv:2304.08448"},{"key":"1795_CR41","unstructured":"Lei D, Li Y, Hu M, Wang M, Yun V, Ching E, Kamal E (2023) Chain of natural language inference for reducing large language model ungrounded hallucinations. arXiv preprint arXiv:2310.03951"},{"key":"1795_CR42","unstructured":"Chia YK, Chen G, Tuan LA, Poria S, Bing L (2023) Contrastive chain-of-thought prompting. arXiv preprint arXiv:2311.09277"},{"key":"1795_CR43","doi-asserted-by":"crossref","unstructured":"Nan F, Nallapati R, Wang Z, Santos CNd, Zhu H, Zhang D, McKeown K, Xiang B (2021) Entity-level factual consistency of abstractive text summarization. arXiv preprint arXiv:2102.09130","DOI":"10.18653\/v1\/2021.eacl-main.235"},{"key":"1795_CR44","doi-asserted-by":"crossref","unstructured":"Zhao Z, Cohen SB, Webber B (2020) Reducing quantity hallucinations in abstractive summarization. arXiv preprint arXiv:2009.13312","DOI":"10.18653\/v1\/2020.findings-emnlp.203"},{"key":"1795_CR45","unstructured":"Hermann KM, Kocisky T, Grefenstette E, Espeholt L, Kay W, Suleyman M, Blunsom P (2015) Teaching machines to read and comprehend. In: Advances in neural information processing systems, vol 28"},{"key":"1795_CR46","doi-asserted-by":"crossref","unstructured":"Manning CD, Surdeanu M, Bauer J, Finkel JR, Bethard S, McClosky D (2014) The Stanford CoreNLP natural language processing toolkit. In: Proceedings of 52nd annual meeting of the Association for Computational Linguistics: system demonstrations, pp 55\u201360","DOI":"10.3115\/v1\/P14-5010"},{"key":"1795_CR47","doi-asserted-by":"crossref","unstructured":"Narayan S, Cohen SB, Lapata M (2018) Don\u2019t give me the details, just the summary! topic-aware convolutional neural networks for extreme summarization. arXiv preprint arXiv:1808.08745","DOI":"10.18653\/v1\/D18-1206"},{"issue":"3","key":"1795_CR48","doi-asserted-by":"publisher","first-page":"13513","DOI":"10.1111\/exsy.13513","volume":"41","author":"C Yin","year":"2024","unstructured":"Yin C, Du K, Nong Q, Zhang H, Yang L, Yan B, Huang X, Wang X, Zhang X (2024) Powerpulse: power energy chat model with LLaMA model fine-tuned on Chinese and power sector domain knowledge. Expert Syst 41(3):13513","journal-title":"Expert Syst"},{"key":"1795_CR49","doi-asserted-by":"crossref","unstructured":"Scialom T, Dray P-A, Gallinari P, Lamprier S, Piwowarski B, Staiano J, Wang A (2021) Questeval: summarization asks for fact-based evaluation. arXiv preprint arXiv:2103.12693","DOI":"10.18653\/v1\/2021.emnlp-main.529"},{"key":"1795_CR50","doi-asserted-by":"crossref","unstructured":"Liu Y, Iter D, Xu Y, Wang S, Xu R, Zhu C (2023) G-eval: Nlg evaluation using gpt-4 with better human alignment. arXiv preprint arXiv:2303.16634","DOI":"10.18653\/v1\/2023.emnlp-main.153"}],"container-title":["Complex &amp; Intelligent Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-025-01795-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s40747-025-01795-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-025-01795-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,4]],"date-time":"2025-03-04T12:07:45Z","timestamp":1741090065000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s40747-025-01795-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,2,19]]},"references-count":50,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2025,3]]}},"alternative-id":["1795"],"URL":"https:\/\/doi.org\/10.1007\/s40747-025-01795-y","relation":{},"ISSN":["2199-4536","2198-6053"],"issn-type":[{"value":"2199-4536","type":"print"},{"value":"2198-6053","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,2,19]]},"assertion":[{"value":"23 August 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 January 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 February 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"This article does not contain studies with human participants or animals. Statement of informed consent is not applicable since the manuscript does not contain any patient data.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical and informed consent for data used"}}],"article-number":"178"}}