{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T13:37:49Z","timestamp":1742996269624,"version":"3.40.3"},"publisher-location":"Cham","reference-count":38,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783031217425"},{"type":"electronic","value":"9783031217432"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-21743-2_42","type":"book-chapter","created":{"date-parts":[[2022,12,8]],"date-time":"2022-12-08T14:24:05Z","timestamp":1670509445000},"page":"532-544","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["A Survey of Abstractive Text Summarization Utilising Pretrained Language Models"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3113-8980","authenticated-orcid":false,"given":"Ayesha Ayub","family":"Syed","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5116-5708","authenticated-orcid":false,"given":"Ford Lumban","family":"Gaol","sequence":"additional","affiliation":[]},{"given":"Alfred","family":"Boediman","sequence":"additional","affiliation":[]},{"given":"Tokuro","family":"Matsuo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2681-0901","authenticated-orcid":false,"given":"Widodo","family":"Budiharto","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,12,9]]},"reference":[{"key":"42_CR1","doi-asserted-by":"crossref","unstructured":"Klymenko, O.,\u00a0 Braun, D.,\u00a0 Matthes, F.:\u00a0 Automatic Text Summarization: a State-of-the-Art Review, vol. 1, pp. 648\u2013655 (2020)","DOI":"10.5220\/0009723306480655"},{"key":"42_CR2","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2021.08.002","author":"X Han","year":"2021","unstructured":"Han, X., et al.: Pre-Trained Models: Past, Present and Future. AI Open (2021). https:\/\/doi.org\/10.1016\/j.aiopen.2021.08.002","journal-title":"AI Open"},{"key":"42_CR3","doi-asserted-by":"crossref","unstructured":"Hasan,\u00a0T., et al.:\u00a0 XL-Sum: large-scale multilingual abstractive summarization for 44 languages. In: ACL-IJCNLP, pp. 4693\u20134703 (2021)","DOI":"10.18653\/v1\/2021.findings-acl.413"},{"issue":"01","key":"42_CR4","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1609\/aaai.v34i01.5328","volume":"34","author":"Y Cao","year":"2020","unstructured":"Cao, Y., Wan, X., Yao, J., Yu, D.: MultiSumm: Towards a Unified Model for Multi-Lingual Abstractive Summarization. Proc. AAAI 34(01), 11\u201318 (2020). https:\/\/doi.org\/10.1609\/aaai.v34i01.5328","journal-title":"Proc. AAAI"},{"key":"42_CR5","doi-asserted-by":"publisher","first-page":"13248","DOI":"10.1109\/ACCESS.2021.3052783","volume":"9","author":"AA Syed","year":"2021","unstructured":"Syed, A.A., Gaol, F.L., Matsuo, T.: A survey of the state-of-the-art models in neural abstractive text summarization. IEEE Access 9, 13248\u201313265 (2021)","journal-title":"IEEE Access"},{"key":"42_CR6","first-page":"4171","volume":"1","author":"J Devlin","year":"2019","unstructured":"Devlin, J., et al.: BERT: Pre-training of deep bidirectional transformers for language understanding. NAACL-HLT 1, 4171\u20134186 (2019)","journal-title":"NAACL-HLT"},{"key":"42_CR7","unstructured":"\u00a0Peters,\u00a0M.E., et al.: Improving language understanding by generative pre-training. In: OpenAI, pp. 1\u201310 (2018)"},{"key":"42_CR8","unstructured":"K. Song, X. Tan, T. Qin, J. Lu, and T. Y. Liu, \u201cMASS: Masked sequence to sequence pre-training for language generation,\u201d 36th ICML, pp. 10384\u201310394, 2019."},{"key":"42_CR9","unstructured":"Dong, L., et al.:\u00a0 Unified language model pre-training for natural language understanding and generation. In: NIPS., vol. 32 (2019)"},{"key":"42_CR10","doi-asserted-by":"crossref","unstructured":"Lewis,\u00a0M., et al.:\u00a0 BART: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In: ACL, pp. 7871\u20137880 (2020)","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"42_CR11","unstructured":"Zhang, J., Zhao, Y.,\u00a0 Saleh, M.,\u00a0 Liu, P. J.: PEGASUS: pre-training with extracted gap-sentences for abstractive summarization. In: ICML, pp. 11328\u201311339 (2020)"},{"key":"42_CR12","first-page":"1","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res. 21, 1\u201367 (2020)","journal-title":"J. Mach. Learn. Res."},{"key":"42_CR13","unstructured":"Beltagy,\u00a0I., et al.:\u00a0 Longformer: The Long-Document Transformer (2020)"},{"key":"42_CR14","unstructured":"Zaheer,\u00a0M., et al.:\u00a0 \u201cBig bird: Transformers for longer sequences,\u201d 2020."},{"key":"42_CR15","unstructured":"Aksenov,\u00a0D., et al.:\u00a0 Abstractive text summarization based on language model conditioning and locality modeling. In: 12th International Conference on Language Resources and Evaluation, pp. 6680\u20136689 (2020)"},{"key":"42_CR16","first-page":"75","volume":"2718","author":"E Zolotareva","year":"2020","unstructured":"Zolotareva, E., et al.: Abstractive text summarization using transfer learning. CEUR Workshop Proc. 2718, 75\u201380 (2020)","journal-title":"CEUR Workshop Proc."},{"key":"42_CR17","doi-asserted-by":"crossref","unstructured":"\u00a0Zhao, S., You, F.,\u00a0 Liu, Z.: Leveraging pre-trained language model for summary generation on short text.\u00a0 IEEE Access 1\u20136 (2020)","DOI":"10.1109\/ACCESS.2020.3045748"},{"key":"42_CR18","unstructured":"Hoang,\u00a0A., et al.: Efficient adaptation of pretrained transformers for abstractive summarization (2019)"},{"key":"42_CR19","doi-asserted-by":"publisher","unstructured":"Kim, S.:\u00a0 Using pre-Trained Transformer for Better Lay Summarization, pp. 328\u2013335 (2020).\u00a0\u00a0https:\/\/doi.org\/10.18653\/v1\/2020.sdp-1.38.","DOI":"10.18653\/v1\/2020.sdp-1.38"},{"key":"42_CR20","doi-asserted-by":"publisher","unstructured":"Bajaj,\u00a0A., et al.:\u00a0 Long Document Summarization in a Low Resource Setting using Pretrained Language Models, pp. 71\u201380 (2021).\u00a0\u00a0https:\/\/doi.org\/10.18653\/v1\/2021.acl-srw.7.","DOI":"10.18653\/v1\/2021.acl-srw.7"},{"key":"42_CR21","doi-asserted-by":"crossref","unstructured":"Xiao, L., Wang, L.,\u00a0 He, H.: Modeling Content Importance for Summarization with Pre-trained Language Models, pp. 3606\u20133611 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.293"},{"key":"42_CR22","doi-asserted-by":"crossref","unstructured":"Pilault, J.,\u00a0 Li, R.,\u00a0 Subramanian, S.,\u00a0 Pal, C.: On Extractive and Abstractive Neural Document Summarization with Transformer Language Models, pp. 9308\u20139319 (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.748"},{"key":"42_CR23","doi-asserted-by":"crossref","unstructured":"Yu, T.,\u00a0 Liu, Z.,\u00a0 Fung, P.: AdaptSum: Towards Low-Resource Domain Adaptation for Abstractive Summarization, pp. 5892\u20135904 (2021)","DOI":"10.18653\/v1\/2021.naacl-main.471"},{"key":"42_CR24","unstructured":"\u00a0Aghajanyan,\u00a0A.,\u00a0 et al.: Better Fine-tuning by Reducing Representational Collapse (2021)"},{"key":"42_CR25","doi-asserted-by":"crossref","unstructured":"Liu,\u00a0Y.,\u00a0 Lapata, M: Text Summarization with Pretrained Encoders (2019)","DOI":"10.18653\/v1\/D19-1387"},{"key":"42_CR26","doi-asserted-by":"crossref","unstructured":"\u00a0Fabbri, A., et al.:\u00a0 Improving zero and few-shot abstractive summarization with intermediate fine-tuning and data augmentation. In: NAACL, pp. 704\u2013717 (2021)","DOI":"10.18653\/v1\/2021.naacl-main.57"},{"key":"42_CR27","unstructured":"Gunel, B., Du,  J.,\u00a0 Conneau, A., Stoyanov, V.: Supervised contrastive learning for pre-trained language model fine-tuning. In: ICLR, pp. 1\u201321 (2021)"},{"key":"42_CR28","doi-asserted-by":"crossref","unstructured":"Gururangan,\u00a0S., et al.:\u00a0 Don\u2019t stop pretraining: adapt language models to domains and tasks. In: ACL, pp. 8342\u20138360 (2020)","DOI":"10.18653\/v1\/2020.acl-main.740"},{"key":"42_CR29","doi-asserted-by":"crossref","unstructured":"\u00a0Guo,\u00a0H.,\u00a0 et al.: Multi-source domain adaptation for text classification via DistanceNet-bandits. In: 34th AAAI Conference on Artifical Intelligence, pp. 7830\u20137838 (2020)","DOI":"10.1609\/aaai.v34i05.6288"},{"key":"42_CR30","unstructured":"Khandelwal, U.,\u00a0 Clark, K.,\u00a0 Jurafsky, D., Brain, G.: Sample Efficient Text Summarization Using a Single Pre-Trained Transformer (2018)"},{"key":"42_CR31","first-page":"1631","volume":"3","author":"J Gu","year":"2016","unstructured":"Gu, J., Lu, Z., Li, H., Li, V.O.K.: Incorporating copying mechanism in sequence-to-sequence learning. ACL 3, 1631\u20131640 (2016)","journal-title":"ACL"},{"key":"42_CR32","first-page":"1073","volume":"1","author":"A See","year":"2017","unstructured":"See, A., Liu, P.J., Manning, C.D.: Get to the point: Summarization with pointer-generator networks. ACL 1, 1073\u20131083 (2017)","journal-title":"ACL"},{"key":"42_CR33","unstructured":"Chen,\u00a0Q., et al.:\u00a0 Distraction-Based Neural Networks for Document Summarization\u00a0 (2016)"},{"key":"42_CR34","doi-asserted-by":"crossref","unstructured":"Li, S., Lei, D.,\u00a0 Qin, P.,\u00a0 Wang, W.Y.: Deep reinforcement learning with distributional semantic rewards for abstractive summarization. In: EMNLP, pp. 6038\u20136044 (2020)","DOI":"10.18653\/v1\/D19-1623"},{"key":"42_CR35","unstructured":"Paulus, R.,\u00a0 Xiong, C., Socher, R.: A deep reinforced model for abstractive summarization. In: 6th ICLR, pp. 1\u201312 (2018)"},{"key":"42_CR36","unstructured":"Li,\u00a0W.: et al.: Improving neural abstractive document summarization with explicit information selection modeling.\u00a0 In: EMNLP, pp. 1787\u20131796 (2020)"},{"key":"42_CR37","doi-asserted-by":"publisher","unstructured":"Kong, L., Jiang, H., Zhuang, Y., Lyu, J., Zhao, T.,\u00a0 Zhang, C.: Calibrated Language Model Fine-Tuning for In- and Out-of-Distribution Data, pp. 1326\u20131340 (2020).\u00a0https:\/\/doi.org\/10.18653\/v1\/2020.emnlp-main.102.","DOI":"10.18653\/v1\/2020.emnlp-main.102"},{"key":"42_CR38","unstructured":"Laifa,\u00a0A., et al.:\u00a0 Data augmentation impact on domain-specific text summarization Data augmentation impact on domain-specific text summarization (2021)"}],"container-title":["Lecture Notes in Computer Science","Intelligent Information and Database Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-21743-2_42","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,12]],"date-time":"2024-03-12T17:06:17Z","timestamp":1710263177000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-21743-2_42"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031217425","9783031217432"],"references-count":38,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-21743-2_42","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"9 December 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ACIIDS","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Asian Conference on Intelligent Information and Database Systems","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Ho Chi Minh City","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vietnam","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 November 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30 November 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"aciids2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/aciids.pwr.edu.pl\/2022\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}