{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,28]],"date-time":"2025-10-28T08:16:20Z","timestamp":1761639380136,"version":"build-2065373602"},"reference-count":57,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T00:00:00Z","timestamp":1754006400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Intell Inf Syst"],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1007\/s10844-025-00967-z","type":"journal-article","created":{"date-parts":[[2025,8,1]],"date-time":"2025-08-01T06:19:30Z","timestamp":1754029170000},"page":"1891-1919","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Enhancing inter-sentence coherence of extractive summarization with multitask learning"],"prefix":"10.1007","volume":"63","author":[{"given":"Renlong","family":"Jie","sequence":"first","affiliation":[]},{"given":"Xiaojun","family":"Meng","sequence":"additional","affiliation":[]},{"given":"Shang","family":"Lifeng","sequence":"additional","affiliation":[]},{"given":"Jiang","family":"Xin","sequence":"additional","affiliation":[]},{"given":"Qun","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,8,1]]},"reference":[{"key":"967_CR1","doi-asserted-by":"publisher","first-page":"6201","DOI":"10.3233\/JIFS-179702","volume":"38","author":"N Akhtar","year":"2020","unstructured":"Akhtar, N., Beg, M. S., Hussain, M., et al. (2020). Extractive multidocument summarization using relative redundancy and coherence scores. Journal of Intelligent & Fuzzy Systems, 38, 6201\u20136210. https:\/\/doi.org\/10.3233\/JIFS-179702","journal-title":"Journal of Intelligent & Fuzzy Systems"},{"key":"967_CR2","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/ACCESS.2025.3538886","volume":"13","author":"M Azam","year":"2025","unstructured":"Azam, M., Khalid, S., Almutairi, S., et al. (2025). Current trends and advances in extractive text summarization: A comprehensive review. IEEE Access, 13, 1\u20131. https:\/\/doi.org\/10.1109\/ACCESS.2025.3538886","journal-title":"IEEE Access"},{"key":"967_CR3","doi-asserted-by":"publisher","first-page":"1","DOI":"10.3115\/1219840.1219858","volume":"34","author":"R Barzilay","year":"2008","unstructured":"Barzilay, R., & Lapata, M. (2008). Modeling local coherence: An entity-based approach. Computational Linguistics, 34, 1\u201334. https:\/\/doi.org\/10.3115\/1219840.1219858","journal-title":"Computational Linguistics"},{"doi-asserted-by":"publisher","unstructured":"Cheng, J., & Lapata, M. (2016). Neural summarization by extracting sentences and words. In: Annual meeting of the Association for Computational Linguistics (ACL). https:\/\/doi.org\/10.18653\/v1\/P16-1046","key":"967_CR4","DOI":"10.18653\/v1\/P16-1046"},{"doi-asserted-by":"publisher","unstructured":"Christensen, J., Mausam, S., Soderland, S., et al. (2013a). Towards coherent multi-document summarization. In: North American Chapter of the Association for Computational Linguistics. (NAACL). https:\/\/doi.org\/10.3115\/v1\/N13-1136","key":"967_CR5","DOI":"10.3115\/v1\/N13-1136"},{"doi-asserted-by":"publisher","unstructured":"Christensen, J., Mausam, S., Soderland, S., et al. (2013b). Towards coherent multi-document summarization. In: North American Chapter of the Association for Computational Linguistics (NAACL). https:\/\/doi.org\/10.3115\/v1\/N13-1136","key":"967_CR6","DOI":"10.3115\/v1\/N13-1136"},{"doi-asserted-by":"publisher","unstructured":"Devlin, J., Chang, M.W., Lee, K., et al. (2019). BERT: Pretraining of deep bidirectional transformers for language understanding. In: North American Chapter of the Association for Computational Linguistics (NAACL). https:\/\/doi.org\/10.18653\/v1\/N19-1423","key":"967_CR7","DOI":"10.18653\/v1\/N19-1423"},{"doi-asserted-by":"publisher","unstructured":"Dong, X., Li, W., Le, Y., et al. (2025). Termdiffusum: A term-guided diffusion model for extractive summarization of legal documents. In: Proceedings of the 31st International Conference on Computational Linguistics (pp. 3222\u20133235). https:\/\/doi.org\/10.18653\/v1\/2025.coling-main.216","key":"967_CR8","DOI":"10.18653\/v1\/2025.coling-main.216"},{"doi-asserted-by":"publisher","unstructured":"Durrett, G., Berg-Kirkpatrick, T., & Klein, D. (2016). Learning-based single-document summarization with compression and anaphoricity constraints. In: Annual Meeting of the Association for Computational Linguistics (ACL). https:\/\/doi.org\/10.18653\/v1\/P16-1188","key":"967_CR9","DOI":"10.18653\/v1\/P16-1188"},{"doi-asserted-by":"publisher","unstructured":"Eisner, M., & Charniak E. (2011). Extending the entity grid with entity-specific features. In: Annual Meeting of the Association for Computational Linguistics (ACL). https:\/\/doi.org\/10.3115\/v1\/P11-2022.","key":"967_CR10","DOI":"10.3115\/v1\/P11-2022."},{"key":"967_CR11","doi-asserted-by":"publisher","first-page":"113679","DOI":"10.1016\/j.eswa.2020.113679","volume":"165","author":"WS El-Kassas","year":"2021","unstructured":"El-Kassas, W. S., Salama, C. R., Rafea, A. A., et al. (2021). Automatic text summarization: A comprehensive survey. Expert Systems with Applications, 165, 113679. https:\/\/doi.org\/10.1016\/j.eswa.2020.113679","journal-title":"Expert Systems with Applications"},{"key":"967_CR12","doi-asserted-by":"publisher","first-page":"457","DOI":"10.1613\/jair.1526","volume":"22","author":"G Erkan","year":"2004","unstructured":"Erkan, G., & Radev, D. R. (2004). Lexrank: Graph-based lexical centrality as salience in text summarization. Journal of Artificial Intelligence Research, 22, 457\u2013479. https:\/\/doi.org\/10.1613\/jair.1526","journal-title":"Journal of Artificial Intelligence Research"},{"key":"967_CR13","doi-asserted-by":"publisher","first-page":"1794","DOI":"10.1016\/j.ipm.2019.04.001","volume":"56","author":"L Ermakova","year":"2019","unstructured":"Ermakova, L., Cossu, J. V., & Mothe, J. (2019). A survey on evaluation of summarization methods. Information Processing and Management, 56, 1794\u20131814. https:\/\/doi.org\/10.1016\/j.ipm.2019.04.001","journal-title":"Information Processing and Management"},{"key":"967_CR14","doi-asserted-by":"publisher","first-page":"189","DOI":"10.1016\/j.eswa.2016.12.021","volume":"72","author":"C Fang","year":"2017","unstructured":"Fang, C., Mu, D., Deng, Z., et al. (2017). Word-sentence co-ranking for automatic extractive text summarization. Expert Systems with Applications, 72, 189\u2013195. https:\/\/doi.org\/10.1016\/j.eswa.2016.12.021","journal-title":"Expert Systems with Applications"},{"key":"967_CR15","doi-asserted-by":"publisher","first-page":"5755","DOI":"10.1016\/j.eswa.2013.04.023","volume":"40","author":"R Ferreira","year":"2013","unstructured":"Ferreira, R., de Souza, Cabral L., Lins, R. D., et al. (2013). Assessing sentence scoring techniques for extractive text summarization. Expert Systems with Applications, 40, 5755\u20135764. https:\/\/doi.org\/10.1016\/j.eswa.2013.04.023","journal-title":"Expert Systems with Applications"},{"doi-asserted-by":"publisher","unstructured":"Goyal, T., Li, J.J., Durrett, G. (2022). News summarization and evaluation in the era of GPT-3. https:\/\/doi.org\/10.48550\/arXiv.2209.12356","key":"967_CR16","DOI":"10.48550\/arXiv.2209.12356"},{"doi-asserted-by":"publisher","unstructured":"Gu, N., Ash, E., Hahnloser, R. (2022). Memsum: Extractive summarization of long documents using multi-step episodic markov decision processes. In:Annual Meeting of the Association for Computational Linguistics (ACL). https:\/\/doi.org\/10.18653\/v1\/2022.acl-long.506","key":"967_CR17","DOI":"10.18653\/v1\/2022.acl-long.506"},{"doi-asserted-by":"publisher","unstructured":"Hermann, K. M., Kocisky, T., Grefenstette, E., et al. (2015). Teaching machines to read and comprehend. Advances in Neural Information Processing Systems (NeurIPS),28. https:\/\/doi.org\/10.18653\/v1\/2022.acl-long.450","key":"967_CR18","DOI":"10.18653\/v1\/2022.acl-long.450"},{"doi-asserted-by":"publisher","unstructured":"Hu, B., Lu, Z., Li, H., et al. (2014). Convolutional neural network architectures for matching natural language sentences. Advances in Neural Information Processing Systems (NeurIPS). https:\/\/doi.org\/10.5555\/2969033.2969055","key":"967_CR19","DOI":"10.5555\/2969033.2969055"},{"doi-asserted-by":"publisher","unstructured":"Jang, E., Gu, S., & Poole, B. (2017). Categorical reparametrization with Gumbel-Softmax. In: International Conference on Learning Representations (ICLR). https:\/\/doi.org\/10.48550\/arXiv.1611.01144.","key":"967_CR20","DOI":"10.48550\/arXiv.1611.01144."},{"key":"967_CR21","doi-asserted-by":"publisher","first-page":"355","DOI":"10.1007\/s10844-024-00886-5","volume":"63","author":"X Jiang","year":"2025","unstructured":"Jiang, X., & Chen, J. (2025). Heterogeneous graphormer for extractive multimodal summarization. Journal of Intelligent Information Systems, 63, 355\u2013373. https:\/\/doi.org\/10.1007\/s10844-024-00886-5","journal-title":"Journal of Intelligent Information Systems"},{"doi-asserted-by":"publisher","unstructured":"Jing, B., You, Z., Yang, T., et al. (2021). Multiplex graph neural network for extractive text summarization. In: Conference on Empirical Methods in Natural Language Processing (EMNLP). https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.11","key":"967_CR22","DOI":"10.18653\/v1\/2021.emnlp-main.11"},{"key":"967_CR23","doi-asserted-by":"publisher","first-page":"200","DOI":"10.1016\/j.eswa.2019.03.045","volume":"129","author":"A Joshi","year":"2019","unstructured":"Joshi, A., Fidalgo, E., Alegre, E., et al. (2019). Summcoder: An unsupervised framework for extractive text summarization based on deep auto-encoders. Expert Systems with Applications, 129, 200\u2013215. https:\/\/doi.org\/10.1016\/j.eswa.2019.03.045","journal-title":"Expert Systems with Applications"},{"doi-asserted-by":"publisher","unstructured":"Joty, S.R., Nguyen, D.T., & Mohiuddin, M.T. (2018). Coherence modeling of asynchronous conversations: A neural entity grid approach. In: Annual Meeting of the Association for Computational Linguistics (ACL). https:\/\/doi.org\/10.18653\/v1\/P18-1052","key":"967_CR24","DOI":"10.18653\/v1\/P18-1052"},{"doi-asserted-by":"publisher","unstructured":"Kwon J, Kobayashi N, Kamigaito H, et al. (2021). Considering nested tree structure in sentence extractive summarization with pre-trained transformer. In: Conference on Empirical Methods in Natural Language Processing (EMNLP). https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.330","key":"967_CR25","DOI":"10.18653\/v1\/2021.emnlp-main.330"},{"doi-asserted-by":"publisher","unstructured":"Laban, P., Dai, L., Bandarkar, L., et al. (2021). Can transformer models measure coherence in text: Re-thinking the shuffle test. In: Annual Meeting of the Association for Computational Linguistics (ACL). https:\/\/doi.org\/10.18653\/v1\/2021.acl-short.134","key":"967_CR26","DOI":"10.18653\/v1\/2021.acl-short.134"},{"doi-asserted-by":"publisher","unstructured":"Lewis, P., Perez, E., Piktus, A., et al. (2020). Retrieval-augmented generation for knowledge-intensive NLP tasks. Advances in Neural Information Processing Systems (NeurIPS),33, 9459\u20139474. https:\/\/doi.org\/10.48550\/arXiv.2005.11401","key":"967_CR27","DOI":"10.48550\/arXiv.2005.11401"},{"doi-asserted-by":"publisher","unstructured":"Lin, C.Y. (2004). ROUGE: A package for automatic evaluation of summaries. In: Workshop on text summarization branches out at the Association for Computational Linguistics (ACL) (pp. 74\u201381). https:\/\/doi.org\/10.3115\/v1\/W04-1013","key":"967_CR28","DOI":"10.3115\/v1\/W04-1013"},{"doi-asserted-by":"publisher","unstructured":"Liu, Y., & Lapata, M. (2019). Text summarization with pretrained encoders. In: Conference on Empirical Methods in Natural Language Processing (EMNLP). https:\/\/doi.org\/10.18653\/v1\/D19-1387","key":"967_CR29","DOI":"10.18653\/v1\/D19-1387"},{"key":"967_CR30","doi-asserted-by":"publisher","first-page":"173","DOI":"10.1016\/j.eswa.2019.05.011","volume":"133","author":"X Mao","year":"2019","unstructured":"Mao, X., Yang, H., Huang, S., et al. (2019). Extractive summarization using supervised and unsupervised learning. Expert Systems with Applications, 133, 173\u2013181. https:\/\/doi.org\/10.1016\/j.eswa.2019.05.011","journal-title":"Expert Systems with Applications"},{"doi-asserted-by":"publisher","unstructured":"Mihalcea, R., & Tarau, P. (2004). Textrank: Bringing order into text. In: Conference on Empirical Methods in Natural Language Processing (EMNLP) (pp. 404\u2013411). https:\/\/doi.org\/10.3115\/v1\/W04-3252","key":"967_CR31","DOI":"10.3115\/v1\/W04-3252"},{"doi-asserted-by":"publisher","unstructured":"Minaee, S., Mikolov, T., Nikzad, N., et al. (2024). Large language models: A survey. https:\/\/doi.org\/10.48550\/arXiv.2402.06196","key":"967_CR32","DOI":"10.48550\/arXiv.2402.06196"},{"key":"967_CR33","doi-asserted-by":"publisher","first-page":"112958","DOI":"10.1016\/j.eswa.2019.112958","volume":"143","author":"M Mohd","year":"2020","unstructured":"Mohd, M., Jan, R., & Shah, M. (2020). Text document summarization using word embedding. Expert Systems with Applications, 143, 112958. https:\/\/doi.org\/10.1016\/j.eswa.2019.112958","journal-title":"Expert Systems with Applications"},{"doi-asserted-by":"publisher","unstructured":"Mohiuddin, M.T., Jwalapuram, P., Lin, X., et al. (2021). Rethinking coherence modeling: Synthetic vs. downstream tasks. In: European Chapter of the Association for Computational Linguistics (EACL) (pp. 3528\u20133539). https:\/\/doi.org\/10.18653\/v1\/2021.eacl-main.308","key":"967_CR34","DOI":"10.18653\/v1\/2021.eacl-main.308"},{"doi-asserted-by":"publisher","unstructured":"Moon, H.C., Mohiuddin, M.T., Joty, S., et al. (2019). A unified neural coherence model. In: Conference on Empirical Methods in Natural Language Processing (EMNLP). https:\/\/doi.org\/10.18653\/v1\/D19-1231","key":"967_CR35","DOI":"10.18653\/v1\/D19-1231"},{"doi-asserted-by":"publisher","unstructured":"Moratanch, N., & Chitrakala, S. (2017). A survey on extractive text summarization. In: 2017 International Conference on Computer, Communication and Signal Processing (ICCCSP) (pp. 1\u20136). IEEE. https:\/\/doi.org\/10.1109\/ICCCSP.2017.7944061","key":"967_CR36","DOI":"10.1109\/ICCCSP.2017.7944061"},{"key":"967_CR37","doi-asserted-by":"publisher","first-page":"102359","DOI":"10.1016\/j.ipm.2020.102359","volume":"57","author":"B Mutlu","year":"2020","unstructured":"Mutlu, B., Sezer, E. A., & Akcayol, M. A. (2020). Candidate sentence selection for extractive text summarization. Information Processing and Management, 57, 102359. https:\/\/doi.org\/10.1016\/j.ipm.2020.102359","journal-title":"Information Processing and Management"},{"key":"967_CR38","doi-asserted-by":"publisher","first-page":"102088","DOI":"10.1016\/j.ipm.2019.102088","volume":"56","author":"Z Nasar","year":"2019","unstructured":"Nasar, Z., Jaffry, S. W., & Malik, M. K. (2019). Textual keyword extraction and summarization: State-of-the-art. Information Processing and Management, 56, 102088. https:\/\/doi.org\/10.1016\/j.ipm.2019.102088","journal-title":"Information Processing and Management"},{"doi-asserted-by":"publisher","unstructured":"Nguyen, D.T., & Joty, S. (2017). A neural local coherence model. In: Annual Meeting of the Association for Computational Linguistics (ACL). https:\/\/doi.org\/10.18653\/v1\/P17-1121","key":"967_CR39","DOI":"10.18653\/v1\/P17-1121"},{"key":"967_CR40","doi-asserted-by":"publisher","first-page":"36","DOI":"10.1016\/j.neunet.2020.02.022","volume":"126","author":"D Pandey","year":"2020","unstructured":"Pandey, D., & Chowdary, C. R. (2020). Modeling coherence by ordering paragraphs using pointer networks. Neural Networks, 126, 36\u201341. https:\/\/doi.org\/10.1016\/j.neunet.2020.02.022","journal-title":"Neural Networks"},{"doi-asserted-by":"publisher","unstructured":"Parmar, M., Deilamsalehy, H., Dernoncourt, F., et al. (2024). Towards enhancing coherence in extractive summarization: Dataset and experiments with LLMs. In: Proceedings of the 2024 conference on empirical methods in natural language processing (pp. 19810\u201319820). https:\/\/doi.org\/10.18653\/v1\/2024.emnlp-main.1106","key":"967_CR41","DOI":"10.18653\/v1\/2024.emnlp-main.1106"},{"doi-asserted-by":"crossref","unstructured":"Parveen, D., & Strube, M. (2015). Integrating importance, non-redundancy and coherence in graph-based extractive summarization. In: International Joint Conference on Artificial Intelligence (IJCAI). https:\/\/dl.acm.org\/doi\/10.5555\/2832415.2832430","key":"967_CR42","DOI":"10.18653\/v1\/D15-1226"},{"key":"967_CR43","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3419106","volume":"2","author":"T Shi","year":"2020","unstructured":"Shi, T., Keneshloo, Y., Ramakrishnan, N., et al. (2020). Neural abstractive text summarization with sequence-to-sequence models: A survey. ACM Transactions on Data Science, 2, 1\u201337. https:\/\/doi.org\/10.1145\/3419106","journal-title":"ACM Transactions on Data Science"},{"doi-asserted-by":"publisher","unstructured":"Tang, P., Hu, K., Yan, R., et al. (2022). OTextSum: Extractive text summarisation with optimal transport. In: Findings of the North American Chapter of the Association for Computational Linguistics (NAACL) (pp. 1557\u20131570). https:\/\/doi.org\/10.18653\/v1\/2022.findings-naacl.85","key":"967_CR44","DOI":"10.18653\/v1\/2022.findings-naacl.85"},{"key":"967_CR45","doi-asserted-by":"publisher","first-page":"123045","DOI":"10.1016\/j.eswa.2023.123045","volume":"245","author":"SN Vo","year":"2024","unstructured":"Vo, S. N., Vo, T. T., & Le, B. (2024). Interpretable extractive text summarization with meta-learning and BI-LSTM: A study of meta learning and explainability techniques. Expert Systems with Applications, 245, 123045. https:\/\/doi.org\/10.1016\/j.eswa.2023.123045","journal-title":"Expert Systems with Applications"},{"doi-asserted-by":"publisher","unstructured":"Wang, D., Chen, J., Wu, X., et al. (2021). CNewSum: A large-scale chinese news summarization dataset with human-annotated adequacy and deducibility level. https:\/\/doi.org\/10.1007\/978-3-030-88480-2_31","key":"967_CR46","DOI":"10.1007\/978-3-030-88480-2_31"},{"doi-asserted-by":"publisher","unstructured":"Wang, D., Liu, P., Zheng, Y., et al. (2020). Heterogeneous graph neural networks for extractive document summarization. In: Annual Meeting of the Association for Computational Linguistics (ACL). https:\/\/doi.org\/10.18653\/v1\/2020.acl-main.553","key":"967_CR47","DOI":"10.18653\/v1\/2020.acl-main.553"},{"doi-asserted-by":"publisher","unstructured":"Wolf, T., Debut, L., Sanh, V., et al. (2019). Transformers: State-of-the-art natural language processing. https:\/\/doi.org\/10.18653\/v1\/2020.emnlp-demos.6","key":"967_CR48","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"issue":"1","key":"967_CR49","doi-asserted-by":"publisher","first-page":"5602","DOI":"10.1609\/aaai.v32i1.11987","volume":"32","author":"Y Wu","year":"2018","unstructured":"Wu, Y., & Hu, B. (2018). Learning to extract coherent summary via deep reinforcement learning. Proceedings of the AAAI Conference on Artificial Intelligence., 32(1), 5602\u20135609. https:\/\/doi.org\/10.1609\/aaai.v32i1.11987","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence."},{"key":"967_CR50","doi-asserted-by":"publisher","first-page":"325","DOI":"10.1007\/s10844-022-00757-x","volume":"61","author":"F Xie","year":"2023","unstructured":"Xie, F., Chen, J., & Chen, K. (2023). Extractive text-image summarization with relation-enhanced graph attention network. Journal of Intelligent Information Systems, 61, 325\u2013341. https:\/\/doi.org\/10.1007\/s10844-022-00757-x","journal-title":"Journal of Intelligent Information Systems"},{"key":"967_CR51","doi-asserted-by":"publisher","first-page":"52","DOI":"10.1016\/j.neucom.2018.01.020","volume":"284","author":"K Yao","year":"2018","unstructured":"Yao, K., Zhang, L., Luo, T., et al. (2018). Deep reinforcement learning for extractive document summarization. Neurocomputing, 284, 52\u201362. https:\/\/doi.org\/10.1016\/j.neucom.2018.01.020","journal-title":"Neurocomputing"},{"doi-asserted-by":"publisher","unstructured":"Zhang, H., Liu, X., & Zhang, J. (2023). Extractive summarization via chatgpt for faithful summary generation. In: Findings of the Conference on Empirical Methods in Natural Language Processing (EMNLP) (pp. 14066\u201314080). https:\/\/doi.org\/10.18653\/v1\/2023.findings-emnlp.214","key":"967_CR52","DOI":"10.18653\/v1\/2023.findings-emnlp.214"},{"doi-asserted-by":"publisher","unstructured":"Zhang T, Kishore V, Wu F, et al. (2019) BERTScore: Evaluating text generation with BERT. In: International Conference on Learning Representations (ICLR). https:\/\/doi.org\/10.48550\/arXiv.1904.09675","key":"967_CR53","DOI":"10.48550\/arXiv.1904.09675"},{"issue":"10","key":"967_CR54","doi-asserted-by":"publisher","first-page":"11757","DOI":"10.1609\/aaai.v36i10.21431","volume":"36","author":"Z Zhang","year":"2022","unstructured":"Zhang, Z., Meng, X., Wang, Y., et al. (2022). Unims: A unified framework for multimodal summarization with knowledge distillation. Proceedings of the AAAI Conference on Artificial Intelligence., 36(10), 11757\u201311764. https:\/\/doi.org\/10.1609\/aaai.v36i10.21431","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence."},{"doi-asserted-by":"publisher","unstructured":"Zhao, W.X., Zhou, K., Li, J., et al. (2023). A survey of large language models. https:\/\/doi.org\/10.48550\/arXiv.2303.18223","key":"967_CR55","DOI":"10.48550\/arXiv.2303.18223"},{"key":"967_CR56","doi-asserted-by":"publisher","first-page":"340","DOI":"10.1016\/j.neunet.2022.08.021","volume":"155","author":"Y Zhao","year":"2022","unstructured":"Zhao, Y., Wang, L., Wang, C., et al. (2022). Multi-granularity heterogeneous graph attention networks for extractive document summarization. Neural Networks, 155, 340\u2013347. https:\/\/doi.org\/10.1016\/j.neunet.2022.08.021","journal-title":"Neural Networks"},{"doi-asserted-by":"publisher","unstructured":"Zhong, M., Liu, Y., Yin, D., et al. (2022). Towards a unified multi-dimensional evaluator for text generation. In: Conference on Empirical Methods in Natural Language Processing (EMNLP) (pp. 2023\u20132038). https:\/\/doi.org\/10.18653\/v1\/2022.emnlp-main.131","key":"967_CR57","DOI":"10.18653\/v1\/2022.emnlp-main.131"}],"container-title":["Journal of Intelligent Information Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10844-025-00967-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10844-025-00967-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10844-025-00967-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,28]],"date-time":"2025-10-28T08:10:28Z","timestamp":1761639028000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10844-025-00967-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,8,1]]},"references-count":57,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2025,12]]}},"alternative-id":["967"],"URL":"https:\/\/doi.org\/10.1007\/s10844-025-00967-z","relation":{},"ISSN":["0925-9902","1573-7675"],"issn-type":[{"type":"print","value":"0925-9902"},{"type":"electronic","value":"1573-7675"}],"subject":[],"published":{"date-parts":[[2025,8,1]]},"assertion":[{"value":"26 January 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 July 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 July 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"1 August 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing Interests"}}]}}