{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,24]],"date-time":"2025-06-24T06:29:37Z","timestamp":1750746577237,"version":"3.37.3"},"reference-count":56,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2022,9,20]],"date-time":"2022-09-20T00:00:00Z","timestamp":1663632000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,9,20]],"date-time":"2022-09-20T00:00:00Z","timestamp":1663632000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["11871248"],"award-info":[{"award-number":["11871248"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003453","name":"Natural Science Foundation of Guangdong Province","doi-asserted-by":"publisher","award":["2021A515010857"],"award-info":[{"award-number":["2021A515010857"]}],"id":[{"id":"10.13039\/501100003453","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Neural Process Lett"],"published-print":{"date-parts":[[2023,8]]},"DOI":"10.1007\/s11063-022-11031-0","type":"journal-article","created":{"date-parts":[[2022,9,20]],"date-time":"2022-09-20T17:03:25Z","timestamp":1663693405000},"page":"4111-4126","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["A Step-by-Step Gradient Penalty with Similarity Calculation for Text Summary Generation"],"prefix":"10.1007","volume":"55","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5174-5182","authenticated-orcid":false,"given":"Shuai","family":"Zhao","sequence":"first","affiliation":[]},{"given":"Qing","family":"Li","sequence":"additional","affiliation":[]},{"given":"Tengjiao","family":"He","sequence":"additional","affiliation":[]},{"given":"Jinming","family":"Wen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,9,20]]},"reference":[{"key":"11031_CR1","doi-asserted-by":"crossref","unstructured":"Huang D, Cui L, Yang S et al (2020) What have we achieved on text summarization? In: Proceedings of the conference on empirical methods in natural language processing (EMNLP) 2020:446\u2013469","DOI":"10.18653\/v1\/2020.emnlp-main.33"},{"key":"11031_CR2","doi-asserted-by":"crossref","unstructured":"Xu J, Durrett G (2019) Neural extractive text summarization with syntactic compression. In: Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP), 3292\u20133303","DOI":"10.18653\/v1\/D19-1324"},{"key":"11031_CR3","doi-asserted-by":"crossref","unstructured":"Zhao Z, Cohen S B, Webber B (2020) Reducing quantity hallucinations in abstractive summarization. Findings of the Association for Computational Linguistics: EMNLP 2020: 2237\u20132249","DOI":"10.18653\/v1\/2020.findings-emnlp.203"},{"key":"11031_CR4","first-page":"6256","volume":"33","author":"Q Xie","year":"2020","unstructured":"Xie Q, Dai Z, Hovy E et al (2020) Unsupervised data augmentation for consistency training. Adv Neural Inf Process Syst 33:6256\u20136268","journal-title":"Adv Neural Inf Process Syst"},{"key":"11031_CR5","unstructured":"Chen T, Kornblith S, Norouzi M et al (2020) A simple framework for contrastive learning of visual representations. In: International conference on machine learning. PMLR, 1597\u20131607"},{"key":"11031_CR6","doi-asserted-by":"crossref","unstructured":"Edunov S, Ott M, Auli M et al (2018) Understanding back-translation at scale. In: Proceedings of the conference on empirical methods in natural language processing, 2018:489\u2013500","DOI":"10.18653\/v1\/D18-1045"},{"key":"11031_CR7","first-page":"649","volume":"28","author":"X Zhang","year":"2015","unstructured":"Zhang X, Zhao J, LeCun Y (2015) Character-level convolutional networks for text classification. Adv Neural Inf Process Syst 28:649\u2013657","journal-title":"Adv Neural Inf Process Syst"},{"key":"11031_CR8","first-page":"20","volume":"1050","author":"IJ Goodfellow","year":"2015","unstructured":"Goodfellow IJ, Shlens J, Szegedy C (2015) Explaining and harnessing adversarial examples. Stat 1050:20","journal-title":"Stat"},{"key":"11031_CR9","unstructured":"Madry A, Makelov A, Schmidt L et al (2018) Towards deep learning models resistant to adversarial attacks. In: International conference on learning representations"},{"key":"11031_CR10","unstructured":"Jianlin Su (2020) A brief talk on adversarial training: significance, methods and thinking. https:\/\/spaces.ac.cn\/archives\/7234. 01 Mar 2020"},{"key":"11031_CR11","doi-asserted-by":"crossref","unstructured":"Ross A, Doshi-Velez F (2018) Improving the adversarial robustness and interpretability of deep neural networks by regularizing their input gradients. In: Proceedings of the AAAI conference on artificial intelligence 32(1)","DOI":"10.1609\/aaai.v32i1.11504"},{"key":"11031_CR12","doi-asserted-by":"crossref","unstructured":"Karras T, Laine S, Aila T (2019) A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, 4401\u20134410","DOI":"10.1109\/CVPR.2019.00453"},{"key":"11031_CR13","unstructured":"Bergstra J, Bengio Y (2012) Random search for hyper-parameter optimization. J Mach Learn Res 13(2):281\u2013305"},{"key":"11031_CR14","unstructured":"Snoek J, Larochelle H, Adams RP (2012) Practical Bayesian optimization of machine learning algorithms. Adv Neural Inf Process Syst 25"},{"key":"11031_CR15","doi-asserted-by":"crossref","unstructured":"Xiao D, Zhang H, Li Y, et al (2020) Ernie-gen: an enhanced multi-flow pre-training and fine-tuning framework for natural language generation. arXiv:2001.11314,","DOI":"10.24963\/ijcai.2020\/553"},{"key":"11031_CR16","doi-asserted-by":"publisher","unstructured":"Zhao S, He T, Wen J (2022) Sparse summary generation. Appl Intell. https:\/\/doi.org\/10.1007\/s10489-022-03450-2","DOI":"10.1007\/s10489-022-03450-2"},{"issue":"2","key":"11031_CR17","doi-asserted-by":"publisher","first-page":"264","DOI":"10.1145\/321510.321519","volume":"16","author":"HP Edmundson","year":"1969","unstructured":"Edmundson HP (1969) New methods in automatic extracting. J ACM (JACM) 16(2):264\u2013285","journal-title":"J ACM (JACM)"},{"key":"11031_CR18","unstructured":"Mihalcea R, Tarau P (2004) Textrank: Bringing order into text. In: Proceedings of the 2004 conference on empirical methods in natural language processing, 404\u2013411"},{"key":"11031_CR19","unstructured":"Liu Y (2019) Fine-tune BERT for extractive summarization. arXiv:1903.10318"},{"key":"11031_CR20","unstructured":"Kenton J D M W C, Toutanova L K (2019) BERT: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of NAACL-HLT, 4171\u20134186"},{"key":"11031_CR21","doi-asserted-by":"crossref","unstructured":"Bouscarrat L, Bonnefoy A, Peel T et al (2019) STRASS: a light and effective method for extractive summarization based on sentence embeddings. In: Proceedings of the 57th annual meeting of the association for computational linguistics: student research workshop, 243\u2013252","DOI":"10.18653\/v1\/P19-2034"},{"key":"11031_CR22","unstructured":"Vaswani A, Shazeer N, Parmar N et al (2017) Attention is all you need. In: Advances in neural information processing systems, 5998\u20136008"},{"key":"11031_CR23","doi-asserted-by":"crossref","unstructured":"Liu Y, Titov I, Lapata M (2019) Single document summarization as tree induction. In: Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, Vol 1 (Long and Short Papers), 1745\u20131755","DOI":"10.18653\/v1\/N19-1173"},{"key":"11031_CR24","doi-asserted-by":"crossref","unstructured":"Zhang X, Wei F, Zhou M (2019) HIBERT: document level pre-training of hierarchical bidirectional transformers for document summarization. In: Proceedings of the 57th annual meeting of the association for computational linguistics, 5059\u20135069","DOI":"10.18653\/v1\/P19-1499"},{"key":"11031_CR25","doi-asserted-by":"crossref","unstructured":"Zhong M, Liu P, Chen Y et al (2020) Extractive summarization as text matching. In: Proceedings of the 58th annual meeting of the association for computational linguistics, 6197\u20136208","DOI":"10.18653\/v1\/2020.acl-main.552"},{"key":"11031_CR26","doi-asserted-by":"crossref","unstructured":"Rush AM, Chopra S, Weston J (2015) A neural attention model for abstractive sentence summarization. In: Proceedings of the conference on empirical methods in natural language processing, 2015:379\u2013389","DOI":"10.18653\/v1\/D15-1044"},{"key":"11031_CR27","doi-asserted-by":"crossref","unstructured":"Gu J, Lu Z, Li H et al (2016) Incorporating copying mechanism in sequence-to-sequence learning. In: Proceedings of the 54th annual meeting of the association for computational linguistics (Vo 1: Long Papers), 1631\u20131640","DOI":"10.18653\/v1\/P16-1154"},{"key":"11031_CR28","doi-asserted-by":"crossref","unstructured":"See A, Liu PJ, Manning CD (2017) Get to the point: summarization with pointer-generator networks. In: Proceedings of the 55th annual meeting of the association for computational linguistics (Vol 1: Long Papers), 1073\u20131083","DOI":"10.18653\/v1\/P17-1099"},{"key":"11031_CR29","unstructured":"Paulus R, Xiong C, Socher R (2018) A deep reinforced model for abstractive summarization. In: International conference on learning representations"},{"key":"11031_CR30","doi-asserted-by":"crossref","unstructured":"Liu L, Lu Y, Yang M et al (2018) Generative adversarial network for abstractive text summarization. In: Thirty-second AAAI conference on artificial intelligence","DOI":"10.1609\/aaai.v32i1.12141"},{"key":"11031_CR31","doi-asserted-by":"crossref","unstructured":"Ayd1n S, G\u00fcd\u00fcc\u00fc \u00c7, Kutluk F et al (2019) The impact of musical experience on neural sound encoding performance. Neurosci Lett 694:124-128","DOI":"10.1016\/j.neulet.2018.11.034"},{"key":"11031_CR32","doi-asserted-by":"crossref","unstructured":"Ayd1n S (2011) Computer based synchronization analysis on sleep EEG in insomnia. J Med Syst 35(4):517\u2013520","DOI":"10.1007\/s10916-009-9387-1"},{"key":"11031_CR33","doi-asserted-by":"crossref","unstructured":"Liu Y, Lapata M (2019) Text summarization with pretrained encoders. In: Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP), 3730\u20133740","DOI":"10.18653\/v1\/D19-1387"},{"key":"11031_CR34","doi-asserted-by":"crossref","unstructured":"Lin C Y (2003) Automatic evaluation of summaries using N-gram cooccurrence statistics. The association for computational linguistics 1","DOI":"10.3115\/1073445.1073465"},{"key":"11031_CR35","doi-asserted-by":"crossref","unstructured":"Lewis M, Liu Y, Goyal N et al (2020) BART: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In: Proceedings of the 58th annual meeting of the association for computational linguistics, 7871\u20137880","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"11031_CR36","unstructured":"Dong L, Yang N, Wang W et al (2019) Unified language model pre-training for natural language understanding and generation. In: Proceedings of the 33rd international conference on neural information processing systems, 13063\u201313075"},{"key":"11031_CR37","unstructured":"Coulombe C (2018) Text data augmentation made simple by leveraging nlp cloud apis. arXiv:1812.04718"},{"key":"11031_CR38","unstructured":"Xie Z, Wang SI, Li J et al (2019) Data noising as smoothing in neural network language models. In: 5th international conference on learning representations, ICLR 2017"},{"key":"11031_CR39","doi-asserted-by":"crossref","unstructured":"Wei J, Zou K (2019) EDA: easy data augmentation techniques for boosting performance on text classification tasks. In: Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP), 6382\u20136388","DOI":"10.18653\/v1\/D19-1670"},{"key":"11031_CR40","unstructured":"Guo H, Mao Y, Zhang R (2019) Augmenting data with mixup for sentence classification: an empirical study. arXiv:1905.08941"},{"key":"11031_CR41","doi-asserted-by":"crossref","unstructured":"Wu X, Lv S, Zang L, et al (2019) Conditional bert contextual augmentation. In: International conference on computational science. Springer, Cham, 84\u201395","DOI":"10.1007\/978-3-030-22747-0_7"},{"key":"11031_CR42","unstructured":"Qu Y, Shen D, Shen Y et al (2020) CoDA: contrast-enhanced and diversity-promoting data augmentation for natural language understanding. In: International conference on learning representations"},{"issue":"1","key":"11031_CR43","first-page":"1929","volume":"15","author":"N Srivastava","year":"2014","unstructured":"Srivastava N, Hinton G, Krizhevsky A et al (2014) Dropout: a simple way to prevent neural networks from overfitting. J Mach Learn Res 15(1):1929\u20131958","journal-title":"J Mach Learn Res"},{"key":"11031_CR44","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S et al (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition, 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"11031_CR45","unstructured":"Ma S, Sun X (2017) A semantic relevance based neural network for text summarization and text simplification. Comput Linguist 1(1)"},{"key":"11031_CR46","doi-asserted-by":"crossref","unstructured":"Chang DJ, Desoky AH, Ouyang M et al (2009) Compute pairwise manhattan distance and pearson correlation coefficient of data points with gpu. In: 10th ACIS International Conference on Software Engineering, Artificial Intelligences, Networking and Parallel\/Distributed Computing. IEEE, pp 501\u2013506","DOI":"10.1109\/SNPD.2009.34"},{"key":"11031_CR47","doi-asserted-by":"crossref","unstructured":"Hu B, Chen Q, Zhu F (2015) LCSTS: a large scale chinese short text summarization dataset. In: Proceedings of the conference on empirical methods in natural language processing, 2015:1967\u20131972","DOI":"10.18653\/v1\/D15-1229"},{"key":"11031_CR48","doi-asserted-by":"crossref","unstructured":"Papineni K, Roukos S, Ward T et al (2002) Bleu: a method for automatic evaluation of machine translation. In: Proceedings of the 40th annual meeting of the Association for Computational Linguistics, 311\u2013318","DOI":"10.3115\/1073083.1073135"},{"key":"11031_CR49","volume-title":"Introduction to information retrieval","author":"H Sch\u00fctze","year":"2008","unstructured":"Sch\u00fctze H, Manning CD, Raghavan P (2008) Introduction to information retrieval. Cambridge University Press, Cambridge"},{"key":"11031_CR50","unstructured":"Song K, Tan X, Qin T et al (2019) MASS: masked sequence to sequence pre-training for language generation. In: International conference on machine learning. PMLR, 5926\u20135936"},{"key":"11031_CR51","doi-asserted-by":"crossref","unstructured":"Yadav AK, Singh A, Dhiman M et al (2022) Extractive text summarization using deep learning approach. Int J Inf Technol 1-9","DOI":"10.1007\/s41870-022-00863-7"},{"key":"11031_CR52","doi-asserted-by":"publisher","first-page":"399","DOI":"10.1613\/jair.2433","volume":"31","author":"J Clarke","year":"2008","unstructured":"Clarke J, Lapata M (2008) Global inference for sentence compression: an integer linear programming approach. J Artif Intell Res 31:399\u2013429","journal-title":"J Artif Intell Res"},{"key":"11031_CR53","doi-asserted-by":"crossref","unstructured":"Ott M, Edunov S, Baevski A et al (2019) fairseq: a fast, extensible toolkit for sequence modeling. In: Proceedings of the conference of the North American chapter of the association for computational linguistics (Demonstrations), 2019:48\u201353","DOI":"10.18653\/v1\/N19-4009"},{"key":"11031_CR54","unstructured":"Lan Z, Chen M, Goodman S et al (2019) ALBERT: a lite BERT for self-supervised learning of language representations. In: International conference on learning representations"},{"key":"11031_CR55","unstructured":"Wei J, Ren X, Li X et al (2019) Nezha: neural contextualized representation for Chinese language understanding. arXiv:1909.00204"},{"key":"11031_CR56","doi-asserted-by":"publisher","first-page":"3504","DOI":"10.1109\/TASLP.2021.3124365","volume":"29","author":"Y Cui","year":"2021","unstructured":"Cui Y, Che W, Liu T et al (2021) Pre-training with whole word masking for Chinese bert. IEEE\/ACM Trans Audio Speech Lang Process 29:3504\u20133514","journal-title":"IEEE\/ACM Trans Audio Speech Lang Process"}],"container-title":["Neural Processing Letters"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11063-022-11031-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11063-022-11031-0\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11063-022-11031-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,7,31]],"date-time":"2023-07-31T16:36:35Z","timestamp":1690821395000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11063-022-11031-0"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,9,20]]},"references-count":56,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2023,8]]}},"alternative-id":["11031"],"URL":"https:\/\/doi.org\/10.1007\/s11063-022-11031-0","relation":{},"ISSN":["1370-4621","1573-773X"],"issn-type":[{"type":"print","value":"1370-4621"},{"type":"electronic","value":"1573-773X"}],"subject":[],"published":{"date-parts":[[2022,9,20]]},"assertion":[{"value":"9 September 2022","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 September 2022","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}