{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,21]],"date-time":"2025-12-21T06:23:22Z","timestamp":1766298202985,"version":"3.41.0"},"publisher-location":"New York, NY, USA","reference-count":40,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,8,20]],"date-time":"2024-08-20T00:00:00Z","timestamp":1724112000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,8,20]]},"DOI":"10.1145\/3685650.3685664","type":"proceedings-article","created":{"date-parts":[[2024,9,18]],"date-time":"2024-09-18T10:19:28Z","timestamp":1726654768000},"page":"1-10","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":5,"title":["Assessing Abstractive and Extractive Methods for Automatic News Summarization"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5035-9404","authenticated-orcid":false,"given":"Hil\u00e1rio","family":"Oliveira","sequence":"first","affiliation":[{"name":"Instituto Federal do Esp\u00edrito Santo, Serra, Esp\u00edrito Santo, Brazil"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3497-5044","authenticated-orcid":false,"given":"Rafael Dueire","family":"Lins","sequence":"additional","affiliation":[{"name":"Universidade Federal Rural de Pernambuco, Univesidade Federal de Pernambuco, Recife, Pernambuco, Brazil"}]}],"member":"320","published-online":{"date-parts":[[2024,9,18]]},"reference":[{"key":"e_1_3_2_1_1_1","doi-asserted-by":"publisher","DOI":"10.1186\/s40537-023-00836-y"},{"key":"e_1_3_2_1_2_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2021.101276"},{"key":"e_1_3_2_1_3_1","volume-title":"Holistic Evaluation of Language Models. Annals of the New York Academy of Sciences","author":"Bommasani Rishi","year":"2023","unstructured":"Rishi Bommasani, Percy Liang, and Tony Lee. 2023. Holistic Evaluation of Language Models. Annals of the New York Academy of Sciences (2023)."},{"key":"e_1_3_2_1_4_1","unstructured":"Tom Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared D Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell et al. 2020. Language models are few-shot learners. Adv. in neural information processing sys. 33 (2020) 1877--1901."},{"key":"e_1_3_2_1_5_1","unstructured":"Hyung Won Chung Le Hou Shayne Longpre Barret Zoph Yi Tay William Fedus Eric Li Xuezhi Wang Mostafa Dehghani Siddhartha Brahma et al. 2022. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416 (2022)."},{"key":"e_1_3_2_1_6_1","volume-title":"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In North American","author":"Devlin Jacob","year":"2019","unstructured":"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In North American Chapter of the Assoc. Computational Linguistics: Human Language Technologies. 4171--4186."},{"key":"e_1_3_2_1_7_1","doi-asserted-by":"publisher","DOI":"10.1145\/321510.321519"},{"key":"e_1_3_2_1_8_1","volume-title":"Rafael Dueire Lins, Gabriel Pereira e Silva, Fred Freitas, George DC Cavalcanti, Rinaldo Lima, Steven J Simske, and Luciano Favaro.","author":"Ferreira Rafael","year":"2013","unstructured":"Rafael Ferreira, Luciano de Souza Cabral, Rafael Dueire Lins, Gabriel Pereira e Silva, Fred Freitas, George DC Cavalcanti, Rinaldo Lima, Steven J Simske, and Luciano Favaro. 2013. Assessing sentence scoring techniques for extractive text summarization. Expert systems with applications 40, 14 (2013), 5755--5764."},{"volume-title":"33rd annual ACM symposium on applied computing. 712--719.","author":"Garcia Rodrigo","key":"e_1_3_2_1_9_1","unstructured":"Rodrigo Garcia, Rinaldo Lima, Bernard Espinasse, and Hil\u00e1rio Oliveira. 2018. Towards coherent single-document summarization: an integer linear programming-based approach. In 33rd annual ACM symposium on applied computing. 712--719."},{"key":"e_1_3_2_1_10_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2018.12.011"},{"key":"e_1_3_2_1_11_1","volume-title":"Teaching machines to read and comprehend. Advances in neural information processing systems 28","author":"Hermann Karl Moritz","year":"2015","unstructured":"Karl Moritz Hermann, Tomas Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend. Advances in neural information processing systems 28 (2015)."},{"key":"e_1_3_2_1_12_1","unstructured":"Albert Q Jiang Alexandre Sablayrolles Arthur Mensch et al. 2023. Mistral 7B. arXiv preprint arXiv:2310.06825 (2023)."},{"key":"e_1_3_2_1_13_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"e_1_3_2_1_14_1","volume-title":"ROUGE: A Package for Automatic Evaluation of Summaries. In Text Summarization Branches Out","author":"Lin Chin-Yew","year":"2004","unstructured":"Chin-Yew Lin. 2004. ROUGE: A Package for Automatic Evaluation of Summaries. In Text Summarization Branches Out. Association for Computational Linguistics, Barcelona, Spain, 74--81."},{"key":"e_1_3_2_1_15_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33019815"},{"volume-title":"Proc. of the ACM Symposium on Document Engineering 2020","author":"Lins Rafael Dueire","key":"e_1_3_2_1_16_1","unstructured":"Rafael Dueire Lins, Rafael Ferreira de Mello, and Steve J. Simske. 2020. ACM DocEng'2020 Competition on Extractive Text Summarization. In Proc. of the ACM Symposium on Document Engineering 2020 (Virtual Event, CA, USA) (DocEng '20). Association for Computing Machinery, NY, USA, Article 3, 4 pages."},{"key":"e_1_3_2_1_17_1","doi-asserted-by":"publisher","DOI":"10.1145\/3342558.3345388"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1145\/3685650.3685671"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.207"},{"key":"e_1_3_2_1_20_1","doi-asserted-by":"publisher","DOI":"10.1147\/rd.22.0159"},{"key":"e_1_3_2_1_21_1","volume-title":"Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing, Dekang Lin and Dekai Wu (Eds.). Association for Computational Linguistics","author":"Mihalcea Rada","year":"2004","unstructured":"Rada Mihalcea and Paul Tarau. 2004. TextRank: Bringing Order into Text. In Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing, Dekang Lin and Dekai Wu (Eds.). Association for Computational Linguistics, Barcelona, Spain, 404--411."},{"key":"e_1_3_2_1_22_1","volume-title":"Leveraging BERT for extractive text summarization on lectures. arXiv preprint arXiv:1906.04165","author":"Miller Derek","year":"2019","unstructured":"Derek Miller. 2019. Leveraging BERT for extractive text summarization on lectures. arXiv preprint arXiv:1906.04165 (2019)."},{"key":"e_1_3_2_1_23_1","volume-title":"Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization. In Empirical Methods in Natural Language Processing","author":"Narayan Shashi","year":"1807","unstructured":"Shashi Narayan, Shay B. Cohen, and Mirella Lapata. 2018. Don't Give Me the Details, Just the Summary! Topic-Aware Convolutional Neural Networks for Extreme Summarization. In Empirical Methods in Natural Language Processing. Association for Computational Linguistics, 1797--1807."},{"volume-title":"A compositional context sensitive multi-document summarizer: exploring the factors that influence summarization","author":"Nenkova Ani","key":"e_1_3_2_1_24_1","unstructured":"Ani Nenkova, Lucy Vanderwende, and Kathleen McKeown. 2006. A compositional context sensitive multi-document summarizer: exploring the factors that influence summarization.. In SIGIR, Efthimis N. Efthimiadis, Susan T. Dumais, David Hawking, and Kalervo J\u00e4rvelin (Eds.). ACM, 573--580."},{"key":"e_1_3_2_1_25_1","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2016.08.030"},{"volume-title":"Proceedings of the 2016 ACM Symposium on Document Engineering","author":"Oliveira Hil\u00e1rio","key":"e_1_3_2_1_26_1","unstructured":"Hil\u00e1rio Oliveira, Rinaldo Lima, Rafael Dueire Lins, Fred Freitas, Marcelo Riss, and Steven J. Simske. 2016. Assessing Concept Weighting in Integer Linear Programming Based Single-document Summarization. In Proceedings of the 2016 ACM Symposium on Document Engineering (Vienna, Austria) (DocEng '16). ACM, New York, NY, USA, 205--208."},{"key":"e_1_3_2_1_27_1","doi-asserted-by":"publisher","DOI":"10.1109\/BRACIS.2016.079"},{"key":"e_1_3_2_1_28_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICTAI.2017.00051"},{"key":"e_1_3_2_1_29_1","doi-asserted-by":"publisher","DOI":"10.1017\/S1351324919000524"},{"key":"e_1_3_2_1_30_1","volume-title":"Proceedings of the 40th Annual Meeting on Association for Computational Linguistics","author":"Papineni Kishore","year":"2002","unstructured":"Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002. BLEU: A Method for Automatic Evaluation of Machine Translation. In Proceedings of the 40th Annual Meeting on Association for Computational Linguistics (Philadelphia, Pennsylvania) (ACL '02). Assoc. for Computational Linguistics, USA, 311--318."},{"key":"e_1_3_2_1_31_1","article-title":"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer","volume":"21","author":"Raffel Colin","year":"2020","unstructured":"Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. J. Mach. Learn. Res. 21, 1, Article 140 (jan 2020), 67 pages.","journal-title":"J. Mach. Learn. Res."},{"key":"e_1_3_2_1_32_1","doi-asserted-by":"publisher","DOI":"10.1155\/2020\/9365340"},{"key":"e_1_3_2_1_33_1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3052783"},{"key":"e_1_3_2_1_34_1","volume-title":"Juliette Love, et al.","author":"Team Gemma","year":"2024","unstructured":"Gemma Team, Thomas Mesnard, Cassidy Hardin, Robert Dadashi, Surya Bhupati-raju, Shreya Pathak, Laurent Sifre, Morgane Rivi\u00e8re, Mihir Sanjay Kale, Juliette Love, et al. 2024. Gemma: Open models based on gemini research and technology. arXiv preprint arXiv:2403.08295 (2024)."},{"key":"e_1_3_2_1_35_1","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale et al. 2023. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)."},{"key":"e_1_3_2_1_36_1","volume-title":"\u0141 ukasz Kaiser, and Illia Polosukhin","author":"Vaswani Ashish","year":"2017","unstructured":"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is All you Need. In Advances in Neural Information Processing Systems, I. Guyon, U. Von Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (Eds.), Vol. 30. Curran Associates, Inc."},{"volume-title":"Proceedings of the 37th International Conference on Machine Learning (ICML'20)","author":"Zhang Jingqing","key":"e_1_3_2_1_37_1","unstructured":"Jingqing Zhang, Yao Zhao, Mohammad Saleh, and Peter J. Liu. 2020. PEGASUS: Pre-Training with Extracted Gap-Sentences for Abstractive Summarization. In Proceedings of the 37th International Conference on Machine Learning (ICML'20). JMLR.org, Article 1051, 12 pages."},{"key":"e_1_3_2_1_38_1","volume-title":"BERTScore: Evaluating Text Generation with BERT. In 8th International Conference on Learning Representations, ICLR 2020","author":"Zhang Tianyi","year":"2020","unstructured":"Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. BERTScore: Evaluating Text Generation with BERT. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net."},{"key":"e_1_3_2_1_39_1","volume-title":"Benchmarking large language models for news summarization. arXiv preprint arXiv:2301.13848","author":"Zhang Tianyi","year":"2023","unstructured":"Tianyi Zhang, Faisal Ladhak, Esin Durmus, Percy Liang, Kathleen McKeown, and Tatsunori B Hashimoto. 2023. Benchmarking large language models for news summarization. arXiv preprint arXiv:2301.13848 (2023)."},{"key":"e_1_3_2_1_40_1","unstructured":"Wayne Xin Zhao Kun Zhou Junyi Li Tianyi Tang Xiaolei Wang Yupeng Hou Yingqian Min Beichen Zhang Junjie Zhang Zican Dong et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223 (2023)."}],"event":{"name":"DocEng '24: ACM Symposium on Document Engineering 2024","sponsor":["SIGWEB ACM Special Interest Group on Hypertext, Hypermedia, and Web"],"location":"San Jose CA USA","acronym":"DocEng '24"},"container-title":["Proceedings of the ACM Symposium on Document Engineering 2024"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3685650.3685664","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3685650.3685664","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T00:06:18Z","timestamp":1750291578000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3685650.3685664"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,20]]},"references-count":40,"alternative-id":["10.1145\/3685650.3685664","10.1145\/3685650"],"URL":"https:\/\/doi.org\/10.1145\/3685650.3685664","relation":{},"subject":[],"published":{"date-parts":[[2024,8,20]]},"assertion":[{"value":"2024-09-18","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}