{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,25]],"date-time":"2025-07-25T10:18:51Z","timestamp":1753438731770,"version":"3.37.3"},"reference-count":42,"publisher":"Springer Science and Business Media LLC","issue":"25","license":[{"start":{"date-parts":[[2024,5,15]],"date-time":"2024-05-15T00:00:00Z","timestamp":1715731200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,5,15]],"date-time":"2024-05-15T00:00:00Z","timestamp":1715731200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"nrf"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Neural Comput &amp; Applic"],"published-print":{"date-parts":[[2024,9]]},"DOI":"10.1007\/s00521-024-09864-y","type":"journal-article","created":{"date-parts":[[2024,5,15]],"date-time":"2024-05-15T19:01:45Z","timestamp":1715799705000},"page":"15337-15351","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Enhancing abstractive summarization of implicit datasets with contrastive attention"],"prefix":"10.1007","volume":"36","author":[{"given":"Soonki","family":"Kwon","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4199-936X","authenticated-orcid":false,"given":"Younghoon","family":"Lee","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,5,15]]},"reference":[{"doi-asserted-by":"crossref","unstructured":"Rush AM, Chopra S, Weston J (2015) A neural attention model for abstractive sentence summarization. In: Proceedings of the 2015 conference on empirical methods in natural language processing, pp. 379\u2013389","key":"9864_CR1","DOI":"10.18653\/v1\/D15-1044"},{"doi-asserted-by":"crossref","unstructured":"Nallapati R, Zhou B, Santos C, Gul\u00e7ehre \u00c7, Xiang B (2016) Abstractive text summarization using sequence-to-sequence RNNs and beyond. In: Proceedings of the 20th SIGNLL conference on computational natural language learning, pp. 280\u2013290","key":"9864_CR2","DOI":"10.18653\/v1\/K16-1028"},{"doi-asserted-by":"crossref","unstructured":"See A, Liu PJ, Manning CD (2017) Get to the point: summarization with pointer-generator networks. In: Proceedings of the 55th annual meeting of the association for computational linguistics (Volume 1: Long Papers), pp. 1073\u20131083","key":"9864_CR3","DOI":"10.18653\/v1\/P17-1099"},{"unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser \u0141, Polosukhin I (2017) Attention is all you need. Advances in neural information processing systems. 30","key":"9864_CR4"},{"unstructured":"Devlin J, Chang M-W, Lee K, Toutanova K (2019) Bert: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, Volume 1 (Long and Short Papers), pp. 4171\u20134186","key":"9864_CR5"},{"doi-asserted-by":"crossref","unstructured":"Lewis M, Liu Y, Goyal N, Ghazvininejad M, Mohamed A, Levy O, Stoyanov V, Zettlemoyer L (2020) Bart: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In: Proceedings of the 58th annual meeting of the association for computational linguistics, pp. 7871\u20137880","key":"9864_CR6","DOI":"10.18653\/v1\/2020.acl-main.703"},{"issue":"1","key":"9864_CR7","first-page":"5485","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel C, Shazeer N, Roberts A, Lee K, Narang S, Matena M, Zhou Y, Li W, Liu PJ (2020) Exploring the limits of transfer learning with a unified text-to-text transformer. J Mach Learn Res 21(1):5485\u20135551","journal-title":"J Mach Learn Res"},{"unstructured":"Zhang J, Zhao Y, Saleh M, Liu P (2020) Pegasus: pre-training with extracted gap-sentences for abstractive summarization. In: International conference on machine learning, pp. 11328\u201311339. PMLR","key":"9864_CR8"},{"doi-asserted-by":"crossref","unstructured":"Zheng C, Zhang K, Wang HJ, Fan L, Wang Z (2021) Enhanced seq2seq autoencoder via contrastive learning for abstractive text summarization. In: 2021 IEEE international conference on big data (Big Data), pp. 1764\u20131771. IEEE","key":"9864_CR9","DOI":"10.1109\/BigData52589.2021.9671819"},{"doi-asserted-by":"crossref","unstructured":"Xu S, Zhang X, Wu Y, Wei F (2022) Sequence level contrastive learning for text summarization. In: Proceedings of the AAAI conference on artificial intelligence, pp. 11556\u201311565","key":"9864_CR10","DOI":"10.1609\/aaai.v36i10.21409"},{"doi-asserted-by":"crossref","unstructured":"Wang F, Song K, Zhang H, Jin L, Cho S, Yao W, Wang X, Chen M, Yu D (2022) Salience allocation as guidance for abstractive summarization. In: Proceedings of the 2022 conference on empirical methods in natural language processing, pp. 6094\u20136106","key":"9864_CR11","DOI":"10.18653\/v1\/2022.emnlp-main.409"},{"doi-asserted-by":"crossref","unstructured":"Dou Z-Y, Liu P, Hayashi H, Jiang Z, Neubig G (2021) Gsum: a general framework for guided neural abstractive summarization. In: Proceedings of the 2021 conference of the North American chapter of the association for computational linguistics: human language technologies, pp. 4830\u20134842","key":"9864_CR12","DOI":"10.18653\/v1\/2021.naacl-main.384"},{"unstructured":"Ranzato M, Chopra S, Auli M, Zaremba W (2015) Sequence level training with recurrent neural networks. arXiv preprint arXiv:1511.06732","key":"9864_CR13"},{"doi-asserted-by":"crossref","unstructured":"Liu Y, Liu P (2021) Simcls: A simple framework for contrastive learning of abstractive summarization. In: Proceedings of the 59th annual meeting of the association for computational linguistics and the 11th international joint conference on natural language processing (Volume 2: Short Papers), pp. 1065\u20131072","key":"9864_CR14","DOI":"10.18653\/v1\/2021.acl-short.135"},{"doi-asserted-by":"crossref","unstructured":"Ravaut M, Joty S, Chen N (2022) Summareranker: a multi-task mixture-of-experts re-ranking framework for abstractive summarization. In: Proceedings of the 60th annual meeting of the association for computational linguistics (Volume 1: Long Papers), pp. 4504\u20134524","key":"9864_CR15","DOI":"10.18653\/v1\/2022.acl-long.309"},{"unstructured":"Zhao Y, Khalman M, Joshi R, Narayan S, Saleh M, Liu PJ (2023) Calibrating sequence likelihood improves conditional language generation. In: The eleventh international conference on learning representations","key":"9864_CR16"},{"doi-asserted-by":"crossref","unstructured":"Liu Y, Liu P, Radev D, Neubig G (2022) Brio: Bringing order to abstractive summarization. In: Proceedings of the 60th annual meeting of the association for computational linguistics (Volume 1: Long Papers), pp. 2890\u20132903","key":"9864_CR17","DOI":"10.18653\/v1\/2022.acl-long.207"},{"unstructured":"Zhang X, Liu Y, Wang X, He P, Yu Y, Chen S-Q, Xiong W, Wei F (2022) Momentum calibration for text generation. arXiv preprint arXiv:2212.04257","key":"9864_CR18"},{"doi-asserted-by":"crossref","unstructured":"Hadsell R, Chopra S, LeCun Y (2006) Dimensionality reduction by learning an invariant mapping. In: 2006 IEEE computer society conference on computer vision and pattern recognition (CVPR\u201906), vol. 2, pp. 1735\u20131742. IEEE","key":"9864_CR19","DOI":"10.1109\/CVPR.2006.100"},{"unstructured":"Chen T, Kornblith S, Norouzi M, Hinton G (2020) A simple framework for contrastive learning of visual representations. In: International conference on machine learning, pp. 1597\u20131607. PMLR","key":"9864_CR20"},{"doi-asserted-by":"crossref","unstructured":"He K, Fan H, Wu Y, Xie S, Girshick R (2020) Momentum contrast for unsupervised visual representation learning. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 9729\u20139738","key":"9864_CR21","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"9864_CR22","first-page":"21271","volume":"33","author":"J-B Grill","year":"2020","unstructured":"Grill J-B, Strub F, Altch\u00e9 F, Tallec C, Richemond P, Buchatskaya E, Doersch C, Avila Pires B, Guo Z, Gheshlaghi Azar M et al (2020) Bootstrap your own latent-a new approach to self-supervised learning. Adv Neural Inf Process Syst 33:21271\u201321284","journal-title":"Adv Neural Inf Process Syst"},{"key":"9864_CR23","first-page":"18661","volume":"33","author":"P Khosla","year":"2020","unstructured":"Khosla P, Teterwak P, Wang C, Sarna A, Tian Y, Isola P, Maschinot A, Liu C, Krishnan D (2020) Supervised contrastive learning. Adv Neural Inf Process Syst 33:18661\u201318673","journal-title":"Adv Neural Inf Process Syst"},{"unstructured":"Gunel B, Du J, Conneau A, Stoyanov V (2021) Supervised contrastive learning for pre-trained language model fine-tuning. In: International conference on learning representations","key":"9864_CR24"},{"unstructured":"Lee S, Lee DB, Hwang SJ (2021) Contrastive learning with adversarial perturbations for conditional text generation. In: International conference on learning representations","key":"9864_CR25"},{"doi-asserted-by":"crossref","unstructured":"Liu W, Wu H, Mu W, Li Z, Chen T, Nie D (2021) Co2sum: contrastive learning for factual-consistent abstractive summarization. arXiv preprint arXiv:2112.01147","key":"9864_CR26","DOI":"10.18653\/v1\/2021.findings-emnlp.106"},{"unstructured":"An C, Zhong M, Wu Z, Zhu Q, Huang X-J, Qiu X (2022) Colo: A contrastive learning based re-ranking framework for one-stage summarization. In: Proceedings of the 29th international conference on computational linguistics, pp. 5783\u20135793","key":"9864_CR27"},{"doi-asserted-by":"crossref","unstructured":"Cao S, Wang L (2021) Cliff: contrastive learning for improving faithfulness and factuality in abstractive summarization. In: Proceedings of the 2021 conference on empirical methods in natural language processing, pp. 6633\u20136649","key":"9864_CR28","DOI":"10.18653\/v1\/2021.emnlp-main.532"},{"doi-asserted-by":"crossref","unstructured":"Schroff F, Kalenichenko D, Philbin J (2015) Facenet: a unified embedding for face recognition and clustering. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 815\u2013823","key":"9864_CR29","DOI":"10.1109\/CVPR.2015.7298682"},{"doi-asserted-by":"crossref","unstructured":"Zhong M, Liu P, Chen Y, Wang D, Qiu X, Huang X-J (2020) Extractive summarization as text matching. In: Proceedings of the 58th annual meeting of the association for computational linguistics, pp. 6197\u20136208","key":"9864_CR30","DOI":"10.18653\/v1\/2020.acl-main.552"},{"doi-asserted-by":"crossref","unstructured":"Hsu W-T, Lin C-K, Lee M-Y, Min K, Tang J, Sun M (2018) A unified model for extractive and abstractive summarization using inconsistency loss. In: Proceedings of the 56th annual meeting of the association for computational linguistics (Volume 1: Long Papers), pp. 132\u2013141","key":"9864_CR31","DOI":"10.18653\/v1\/P18-1013"},{"doi-asserted-by":"crossref","unstructured":"Gehrmann S, Deng Y, Rush A (2018) Bottom-up abstractive summarization. In: Proceedings of the 2018 conference on empirical methods in natural language processing, pp. 4098\u20134109","key":"9864_CR32","DOI":"10.18653\/v1\/D18-1443"},{"unstructured":"Zhang T, Kishore V, Wu F, Weinberger KQ, Artzi, Y (2020) Bertscore: evaluating text generation with bert. In: International conference on learning representations","key":"9864_CR33"},{"unstructured":"Lin C-Y (2004) Rouge: a package for automatic evaluation of summaries. In: Text summarization branches Out, pp. 74\u201381","key":"9864_CR34"},{"unstructured":"Hermann KM, Kocisky T, Grefenstette E, Espeholt L, Kay W, Suleyman M, Blunsom P (2015) Teaching machines to read and comprehend. Advances in neural information processing systems. 28","key":"9864_CR35"},{"unstructured":"Sandhaus E (2008) The new york times annotated corpus. Linguistic Data Consortium, Philadelphia 6(12):26752","key":"9864_CR36"},{"doi-asserted-by":"crossref","unstructured":"Deutsch D, Roth D (2020) SacreROUGE: An Open-Source Library for Using and Developing Summarization Evaluation Metrics. In: Proceedings of second workshop for NLP open source software (NLP-OSS), pp. 120\u2013125. Association for Computational Linguistics, Online. https:\/\/www.aclweb.org\/anthology\/2020.nlposs-1.17","key":"9864_CR37","DOI":"10.18653\/v1\/2020.nlposs-1.17"},{"doi-asserted-by":"crossref","unstructured":"Xu J, Gan Z, Cheng Y, Liu J (2020) Discourse-aware neural extractive text summarization. In: Proceedings of the 58th annual meeting of the association for computational linguistics, pp. 5021\u20135031","key":"9864_CR38","DOI":"10.18653\/v1\/2020.acl-main.451"},{"doi-asserted-by":"crossref","unstructured":"Narayan S, Cohen SB, Lapata M (2018) Don\u2019t give me the details, just the summary! topic-aware convolutional neural networks for extreme summarization. In: Proceedings of the 2018 conference on empirical methods in natural language processing, pp. 1797\u20131807","key":"9864_CR39","DOI":"10.18653\/v1\/D18-1206"},{"unstructured":"Kim B, Kim H, Kim G (2019) Abstractive summarization of reddit posts with multi-level memory networks. In: Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, Volume 1 (Long and Short Papers), pp. 2519\u20132531","key":"9864_CR40"},{"doi-asserted-by":"crossref","unstructured":"Gliwa B, Mochol I, Biesek M, Wawer A (2019) SAMSum corpus: a human-annotated dialogue dataset for abstractive summarization. In: Proceedings of the 2nd workshop on new frontiers in summarization, pp. 70\u201379","key":"9864_CR41","DOI":"10.18653\/v1\/D19-5409"},{"unstructured":"Loshchilov I, Hutter F (2019) Decoupled weight decay regularization. In: International conference on learning representations","key":"9864_CR42"}],"container-title":["Neural Computing and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00521-024-09864-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00521-024-09864-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00521-024-09864-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,23]],"date-time":"2024-08-23T14:22:55Z","timestamp":1724422975000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00521-024-09864-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,15]]},"references-count":42,"journal-issue":{"issue":"25","published-print":{"date-parts":[[2024,9]]}},"alternative-id":["9864"],"URL":"https:\/\/doi.org\/10.1007\/s00521-024-09864-y","relation":{},"ISSN":["0941-0643","1433-3058"],"issn-type":[{"type":"print","value":"0941-0643"},{"type":"electronic","value":"1433-3058"}],"subject":[],"published":{"date-parts":[[2024,5,15]]},"assertion":[{"value":"1 August 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 April 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 May 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}