{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,21]],"date-time":"2026-04-21T23:12:17Z","timestamp":1776813137699,"version":"3.51.2"},"reference-count":47,"publisher":"Springer Science and Business Media LLC","issue":"12","license":[{"start":{"date-parts":[[2024,8,22]],"date-time":"2024-08-22T00:00:00Z","timestamp":1724284800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,8,22]],"date-time":"2024-08-22T00:00:00Z","timestamp":1724284800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["62272105"],"award-info":[{"award-number":["62272105"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Shanghai Municipal Science and Technology Major Project","award":["2018SHZDZX01"],"award-info":[{"award-number":["2018SHZDZX01"]}]},{"name":"ZJ Lab and Shanghai Center for Brain Science and Brain-Inspired Intelligence Technology and the 111 Project"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Knowl Inf Syst"],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1007\/s10115-024-02195-3","type":"journal-article","created":{"date-parts":[[2024,8,23]],"date-time":"2024-08-23T16:52:00Z","timestamp":1724431920000},"page":"7557-7580","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":8,"title":["GoSum: extractive summarization of long documents by reinforcement learning and graph-organized discourse state"],"prefix":"10.1007","volume":"66","author":[{"given":"Junyi","family":"Bian","sequence":"first","affiliation":[]},{"given":"Xiaodi","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Hong","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Tianyang","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Shanfeng","family":"Zhu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,8,22]]},"reference":[{"key":"2195_CR1","doi-asserted-by":"crossref","unstructured":"Xiong W, Gupta A, Toshniwal S, Mehdad Y, Yih W-t (2022) Adapting pretrained text-to-text models for long text sequences. arXiv:2209.10052","DOI":"10.18653\/v1\/2023.findings-emnlp.370"},{"key":"2195_CR2","doi-asserted-by":"crossref","unstructured":"Pang B, Nijkamp E, Kry\u015bci\u0144ski W, Savarese S, Zhou Y, Xiong C (2023) Long document summarization with top\u2013down and bottom-up inference. In: Findings of the association for computational linguistics: EACL 2023, pp 1237\u20131254","DOI":"10.18653\/v1\/2023.findings-eacl.94"},{"key":"2195_CR3","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown T, Mann B, Ryder N, Subbiah M, Kaplan JD, Dhariwal P, Neelakantan A, Shyam P, Sastry G, Askell A et al (2020) Language models are few-shot learners. Adv Neural Inf Process Syst 33:1877\u20131901","journal-title":"Adv Neural Inf Process Syst"},{"key":"2195_CR4","unstructured":"OpenAI (2023) Gpt-4 technical report. arXiv:2303.08774"},{"key":"2195_CR5","unstructured":"Touvron H, Lavril T, Izacard G, Martinet X, Lachaux M-A, Lacroix T, Rozi\u00e8re B, Goyal N, Hambro E, Azhar F, et al (2023) Llama: open and efficient foundation language models. arXiv:2302.13971"},{"key":"2195_CR6","unstructured":"Chung HW, Hou L, Longpre S, Zoph B, Tay Y, Fedus W, Li Y, Wang X, Dehghani M, Brahma S et al (2022) Scaling instruction-finetuned language models. arXiv:2210.11416"},{"key":"2195_CR7","unstructured":"Workshop B, Scao TL, Fan A, Akiki C, Pavlick E, Ili\u0107 S. Hesslow D, Castagn\u00e9 R, Luccioni AS, Yvon F, et al (2022) Bloom: a 176b-parameter open-access multilingual language model. arXiv:2211.05100"},{"key":"2195_CR8","doi-asserted-by":"crossref","unstructured":"Kry\u015bci\u0144ski W, McCann B, Xiong C, Socher R (2020) Evaluating the factual consistency of abstractive text summarization. In: Proceedings of the 2020 conference on empirical methods in natural language processing (EMNLP), pp 9332\u20139346","DOI":"10.18653\/v1\/2020.emnlp-main.750"},{"key":"2195_CR9","doi-asserted-by":"crossref","unstructured":"Pagnoni A, Balachandran V, Tsvetkov Y (2021) Understanding factuality in abstractive summarization with frank: a benchmark for factuality metrics. In: Proceedings of the 2021 Conference of the North American chapter of the association for computational linguistics: human language technologies, pp 4812\u20134829","DOI":"10.18653\/v1\/2021.naacl-main.383"},{"key":"2195_CR10","unstructured":"Lin C-Y (2004) ROUGE: a package for automatic evaluation of summaries. In: Text summarization branches out, pp 74\u201381. https:\/\/www.aclweb.org\/anthology\/W04-1013 Accessed 27 July 2019"},{"key":"2195_CR11","doi-asserted-by":"publisher","unstructured":"Zhong M, Liu P, Chen Y, Wang D, Qiu X, Huang X (2020) Extractive summarization as text matching. In: Proceedings of the 58th annual meeting of the association for computational linguistics, pp 6197\u20136208. https:\/\/doi.org\/10.18653\/v1\/2020.acl-main.552. Accessed 21 Dec 2020","DOI":"10.18653\/v1\/2020.acl-main.552"},{"key":"2195_CR12","doi-asserted-by":"crossref","unstructured":"Zhou Q, Yang N, Wei F, Huang S, Zhou M, Zhao T (2018) Neural document summarization by jointly learning to score and select sentences. In: Proceedings of the 56th annual meeting of the association for computational linguistics (volume 1: long papers), pp 654\u2013663","DOI":"10.18653\/v1\/P18-1061"},{"key":"2195_CR13","unstructured":"Hermann KM, Kocisky T, Grefenstette E, Espeholt L, Kay W, Suleyman M, Blunsom P (2015) Teaching machines to read and comprehend. Adv Neural Inf Process Syst 28"},{"key":"2195_CR14","unstructured":"Koupaee M, Wang WY (2018) Wikihow: a large scale text summarization dataset. arXiv:1810.09305"},{"key":"2195_CR15","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser \u0141, Polosukhin I (2017) Attention is all you need. Adv Neural Inf Process Syst 30"},{"key":"2195_CR16","doi-asserted-by":"crossref","unstructured":"Narayan S, Cohen SB, Lapata M (2018) Ranking sentences for extractive summarization with reinforcement learning. In: Proceedings of the 2018 conference of the North American chapter of the association for computational linguistics: human language technologies, volume 1 (long papers), pp 1747\u20131759","DOI":"10.18653\/v1\/N18-1158"},{"key":"2195_CR17","doi-asserted-by":"crossref","unstructured":"Liu Y, Lapata M (2019) Text summarization with pretrained encoders. In: Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP), pp 3730\u20133740","DOI":"10.18653\/v1\/D19-1387"},{"key":"2195_CR18","unstructured":"Zhang J, Zhao Y, Saleh M, Liu PJ (2020) PEGASUS: pre-training with extracted gap-sentences for abstractive summarization. arXiv:1912.08777"},{"key":"2195_CR19","first-page":"17283","volume":"33","author":"M Zaheer","year":"2020","unstructured":"Zaheer M, Guruganesh G, Dubey KA, Ainslie J, Alberti C, Ontanon S, Pham P, Ravula A, Wang Q, Yang L et al (2020) Big bird: transformers for longer sequences. Adv Neural Inf Process Syst 33:17283\u201317297","journal-title":"Adv Neural Inf Process Syst"},{"key":"2195_CR20","unstructured":"Beltagy I, Peters ME, Cohan A (2020) Longformer: the long-document transformer. arXiv:2004.05150"},{"key":"2195_CR21","doi-asserted-by":"crossref","unstructured":"Huang L, Cao S, Parulian N, Ji H, Wang L (2021) Efficient attentions for long document summarization. In: Proceedings of the 2021 conference of the north American chapter of the association for computational linguistics: human language technologies, pp 1419\u20131436","DOI":"10.18653\/v1\/2021.naacl-main.112"},{"key":"2195_CR22","doi-asserted-by":"crossref","unstructured":"Xiao W, Carenini G (2019) Extractive summarization of long documents by combining global and local context. In: EMNLP-IJCNLP, pp 3011\u20133021","DOI":"10.18653\/v1\/D19-1298"},{"key":"2195_CR23","doi-asserted-by":"crossref","unstructured":"Collins E, Augenstein I, Riedel S (2017) A supervised approach to extractive summarisation of scientific papers. In: Proceedings of the 21st conference on computational natural language learning (CoNLL 2017), pp 195\u2013205","DOI":"10.18653\/v1\/K17-1021"},{"key":"2195_CR24","doi-asserted-by":"crossref","unstructured":"Zhu T, Hua W, Qu J, Zhou X (2021) Summarizing long-form document with rich discourse information. In: Proceedings of the 30th ACM international conference on information & knowledge management, pp 2770\u20132779","DOI":"10.1145\/3459637.3482396"},{"key":"2195_CR25","doi-asserted-by":"crossref","unstructured":"Cho S, Song K, Wang X, Liu F, Yu D (2022) Toward unifying text segmentation and long document summarization. In: Proceedings of the 2022 conference on empirical methods in natural language processing, pp 106\u2013118","DOI":"10.18653\/v1\/2022.emnlp-main.8"},{"key":"2195_CR26","doi-asserted-by":"crossref","unstructured":"Ruan Q, Ostendorff M, Rehm G (2022) Histruct+: improving extractive text summarization with hierarchical structure information. In: Findings of the association for computational linguistics: ACL 2022, pp 1292\u20131308","DOI":"10.18653\/v1\/2022.findings-acl.102"},{"key":"2195_CR27","doi-asserted-by":"publisher","first-page":"457","DOI":"10.1613\/jair.1523","volume":"22","author":"G Erkan","year":"2004","unstructured":"Erkan G, Radev DR (2004) Lexrank: graph-based lexical centrality as salience in text summarization. J Artific Intell Res 22:457\u2013479","journal-title":"J Artific Intell Res"},{"key":"2195_CR28","doi-asserted-by":"crossref","unstructured":"Wang D, Liu P, Zheng Y, Qiu X, Huang X (2020) Heterogeneous graph neural networks for extractive document summarization. In: Proceedings of the 58th annual meeting of the association for computational linguistics, pp 6209\u20136219","DOI":"10.18653\/v1\/2020.acl-main.553"},{"key":"2195_CR29","doi-asserted-by":"crossref","unstructured":"Jia R, Cao Y, Tang H, Fang F, Cao C, Wang S (2020) Neural extractive summarization with hierarchical attentive heterogeneous graph network. In: Proceedings of the 2020 conference on empirical methods in natural language processing (EMNLP), pp 3622\u20133631","DOI":"10.18653\/v1\/2020.emnlp-main.295"},{"key":"2195_CR30","doi-asserted-by":"crossref","unstructured":"Cui P, Hu L, Liu Y (2020) Enhancing extractive text summarization with topic-aware graph neural networks. In: Proceedings of the 28th international conference on computational linguistics, pp 5360\u20135371","DOI":"10.18653\/v1\/2020.coling-main.468"},{"key":"2195_CR31","doi-asserted-by":"crossref","unstructured":"Xu J, Gan Z, Cheng Y, Liu J (2020) Discourse-aware neural extractive text summarization. In: Proceedings of the 58th annual meeting of the association for computational linguistics, pp 5021\u20135031","DOI":"10.18653\/v1\/2020.acl-main.451"},{"key":"2195_CR32","doi-asserted-by":"crossref","unstructured":"Wu Y, Hu B (2018) Learning to extract coherent summary via deep reinforcement learning. In: Proceedings of the AAAI conference on artificial intelligence, vol 32","DOI":"10.1609\/aaai.v32i1.11987"},{"key":"2195_CR33","doi-asserted-by":"crossref","unstructured":"B\u00f6hm F, Gao Y, Meyer CM, Shapira O, Dagan I, Gurevych I (2019) Better rewards yield better summaries: Learning to summarise without references. In: Proceedings of the 2019 conference on empirical methods in natural language processing and the 9th international joint conference on natural language processing (EMNLP-IJCNLP), pp 3110\u20133120","DOI":"10.18653\/v1\/D19-1307"},{"key":"2195_CR34","doi-asserted-by":"crossref","unstructured":"Gao Y, Meyer CM, Gurevych I (2018) April: interactively learning to summarise by combining active preference learning and reinforcement learning. In: Proceedings of the 2018 conference on empirical methods in natural language processing, pp 4120\u20134130","DOI":"10.18653\/v1\/D18-1445"},{"key":"2195_CR35","doi-asserted-by":"crossref","unstructured":"Chen Y-C, Bansal M (2018) Fast abstractive summarization with reinforce-selected sentence rewriting. In: Proceedings of the 56th annual meeting of the association for computational linguistics (volume 1: long papers), pp 675\u2013686","DOI":"10.18653\/v1\/P18-1063"},{"key":"2195_CR36","unstructured":"Xiao W, Carenini G (2020) Systematically exploring redundancy reduction in summarizing long documents. In: Proceedings of the 1st conference of the Asia\u2013Pacific chapter of the association for computational linguistics and the 10th international joint conference on natural language processing, pp 516\u2013528"},{"key":"2195_CR37","doi-asserted-by":"crossref","unstructured":"Gu N, Ash E, Hahnloser R (2022) Memsum: extractive summarization of long documents using multi-step episodic markov decision processes. In: Proceedings of the 60th annual meeting of the association for computational linguistics (volume 1: long papers), pp 6507\u20136522","DOI":"10.18653\/v1\/2022.acl-long.450"},{"key":"2195_CR38","unstructured":"Pang RY, He H (2020) Text generation by learning from demonstrations. In: International conference on learning representations"},{"key":"2195_CR39","unstructured":"Xu Y, Lapata M (2022) Text summarization with oracle expectation. In: The eleventh international conference on learning representations"},{"issue":"3","key":"2195_CR40","doi-asserted-by":"publisher","first-page":"229","DOI":"10.1007\/BF00992696","volume":"8","author":"RJ Williams","year":"1992","unstructured":"Williams RJ (1992) Simple statistical gradient-following algorithms for connectionist reinforcement learning. Mach Learn 8(3):229\u2013256","journal-title":"Mach Learn"},{"key":"2195_CR41","doi-asserted-by":"crossref","unstructured":"Pennington J, Socher R, Manning CD (2014) Glove: Global vectors for word representation. In: Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pp 1532\u20131543","DOI":"10.3115\/v1\/D14-1162"},{"issue":"8","key":"2195_CR42","doi-asserted-by":"publisher","first-page":"1735","DOI":"10.1162\/neco.1997.9.8.1735","volume":"9","author":"S Hochreiter","year":"1997","unstructured":"Hochreiter S, Schmidhuber J (1997) Long short-term memory. Neural Comput 9(8):1735\u20131780","journal-title":"Neural Comput"},{"key":"2195_CR43","unstructured":"Veli\u010dkovi\u0107 P, Cucurull G, Casanova A, Romero A, Lio P, Bengio Y (2017) Graph attention networks. arXiv:1710.10903"},{"key":"2195_CR44","doi-asserted-by":"publisher","unstructured":"Cohan A, Dernoncourt F, Kim DS, Bui T, Kim S, Chang W, Goharian N (2018) A discourse-aware attention model for abstractive summarization of long documents. In: Proceedings of the 2018 conference of the north American chapter of the association for computational linguistics: human language technologies, volume 2 (short papers), pp 615\u2013621. https:\/\/doi.org\/10.18653\/v1\/N18-2097 . Accessed 2020-10-12","DOI":"10.18653\/v1\/N18-2097"},{"key":"2195_CR45","unstructured":"Kingma DP, Ba J (2015) Adam: a method for stochastic optimization. In: ICLR (Poster)"},{"key":"2195_CR46","doi-asserted-by":"crossref","unstructured":"Pilault J, Li R, Subramanian S, Pal C (2020) On extractive and abstractive neural document summarization with transformer language models. In: Proceedings of the 2020 conference on empirical methods in natural language processing (EMNLP), pp 9308\u20139319","DOI":"10.18653\/v1\/2020.emnlp-main.748"},{"key":"2195_CR47","unstructured":"Zhang T, Kishore V, Wu F, Weinberger KQ, Artzi Y (2019) Bertscore: evaluating text generation with bert. In: International conference on learning representations"}],"container-title":["Knowledge and Information Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10115-024-02195-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10115-024-02195-3\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10115-024-02195-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,24]],"date-time":"2024-10-24T12:07:56Z","timestamp":1729771676000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10115-024-02195-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,22]]},"references-count":47,"journal-issue":{"issue":"12","published-print":{"date-parts":[[2024,12]]}},"alternative-id":["2195"],"URL":"https:\/\/doi.org\/10.1007\/s10115-024-02195-3","relation":{},"ISSN":["0219-1377","0219-3116"],"issn-type":[{"value":"0219-1377","type":"print"},{"value":"0219-3116","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,8,22]]},"assertion":[{"value":"17 March 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 July 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 July 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 August 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}