{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,14]],"date-time":"2026-01-14T23:54:27Z","timestamp":1768434867171,"version":"3.49.0"},"reference-count":52,"publisher":"Springer Science and Business Media LLC","issue":"10","license":[{"start":{"date-parts":[[2025,6,3]],"date-time":"2025-06-03T00:00:00Z","timestamp":1748908800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,6,3]],"date-time":"2025-06-03T00:00:00Z","timestamp":1748908800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"the open fund of Information Materials and Intelligent Sensing Laboratory of Anhui Province","award":["No. IMIS202010"],"award-info":[{"award-number":["No. IMIS202010"]}]},{"name":"the open fund of Information Materials and Intelligent Sensing Laboratory of Anhui Province","award":["No. IMIS202010"],"award-info":[{"award-number":["No. IMIS202010"]}]},{"name":"the excellent young talents support program in universities of Anhui Province","award":["No. 2022AH020091"],"award-info":[{"award-number":["No. 2022AH020091"]}]},{"name":"the excellent young talents support program in universities of Anhui Province","award":["No. 2022AH020091"],"award-info":[{"award-number":["No. 2022AH020091"]}]},{"name":"the outstanding Youth Talent Support Program in Universities of Anhui Province","award":["No. gxyqZD2021128"],"award-info":[{"award-number":["No. gxyqZD2021128"]}]},{"name":"the outstanding Youth Talent Support Program in Universities of Anhui Province","award":["No. gxyqZD2021128"],"award-info":[{"award-number":["No. gxyqZD2021128"]}]},{"name":"CCF-Zhipu AI Large Model Fund","award":["202213"],"award-info":[{"award-number":["202213"]}]},{"name":"CCF-Zhipu AI Large Model Fund","award":["202213"],"award-info":[{"award-number":["202213"]}]},{"name":"CCF-Zhipu AI Large Model Fund","award":["202213"],"award-info":[{"award-number":["202213"]}]},{"name":"CCF-Zhipu AI Large Model Fund","award":["202213"],"award-info":[{"award-number":["202213"]}]},{"name":"CCF-Zhipu AI Large Model Fund","award":["202213"],"award-info":[{"award-number":["202213"]}]},{"name":"CCF-Zhipu AI Large Model Fund","award":["202213"],"award-info":[{"award-number":["202213"]}]},{"name":"the University Synergy Innovation Program of Anhui Province","award":["GXXT-2023-050"],"award-info":[{"award-number":["GXXT-2023-050"]}]},{"name":"the University Synergy Innovation Program of Anhui Province","award":["GXXT-2023-050"],"award-info":[{"award-number":["GXXT-2023-050"]}]},{"name":"the University Synergy Innovation Program of Anhui Province","award":["GXXT-2023-050"],"award-info":[{"award-number":["GXXT-2023-050"]}]},{"name":"the University Synergy Innovation Program of Anhui Province","award":["GXXT-2023-050"],"award-info":[{"award-number":["GXXT-2023-050"]}]},{"name":"the University Synergy Innovation Program of Anhui Province","award":["GXXT-2023-050"],"award-info":[{"award-number":["GXXT-2023-050"]}]},{"name":"the University Synergy Innovation Program of Anhui Province","award":["GXXT-2023-050"],"award-info":[{"award-number":["GXXT-2023-050"]}]},{"name":"Zhipu Al - Anhui University Joint Research Center on Foundation Model"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int. J. Mach. Learn. &amp; Cyber."],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1007\/s13042-025-02669-y","type":"journal-article","created":{"date-parts":[[2025,6,2]],"date-time":"2025-06-02T22:35:00Z","timestamp":1748903700000},"page":"7543-7557","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Sentop: sentence-level prefix prompt for controllable abstractive summarization"],"prefix":"10.1007","volume":"16","author":[{"given":"Shu","family":"Zhao","sequence":"first","affiliation":[]},{"given":"Yang","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Jie","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Zhen","family":"Duan","sequence":"additional","affiliation":[]},{"given":"Chunhui","family":"Zou","sequence":"additional","affiliation":[]},{"given":"Yuanfang","family":"Cheng","sequence":"additional","affiliation":[]},{"given":"Fugui","family":"He","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,6,3]]},"reference":[{"key":"2669_CR1","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2020.113679","volume":"165","author":"WS El-Kassas","year":"2021","unstructured":"El-Kassas WS, Salama CR, Rafea AA, Mohamed HK (2021) Automatic text summarization: a comprehensive survey. Expert Syst. Appl. 165:113679. https:\/\/doi.org\/10.1016\/j.eswa.2020.113679","journal-title":"Expert Syst. Appl."},{"key":"2669_CR2","doi-asserted-by":"publisher","unstructured":"Pilault J, Li R, Subramanian S, Pal C (2020). On extractive and abstractive neural document summarization with transformer language models. In: Proceedings of the 2020 conference on empirical methods in natural language processing, EMNLP, pp. 9308\u20139319 . https:\/\/doi.org\/10.18653\/v1\/2020.emnlp-main.748","DOI":"10.18653\/v1\/2020.emnlp-main.748"},{"key":"2669_CR3","unstructured":"Zhang J, Zhao Y, Saleh M, Liu PJ (2020) PEGASUS: pre-training with extracted gap-sentences for abstractive summarization. In: Proceedings of the 37th international conference on machine learning, ICML, 119, 11328\u201311339"},{"key":"2669_CR4","doi-asserted-by":"crossref","unstructured":"Pang B, Nijkamp E, Kryscinski W, Savarese S, Zhou Y, Xiong C (2023) Long document summarization with top-down and bottom-up inference. In: Findings of the association for computational linguistics: EACL, pp. 1237\u20131254","DOI":"10.18653\/v1\/2023.findings-eacl.94"},{"key":"2669_CR5","unstructured":"Sutskever I, Vinyals O, Le QV (2014) Sequence to sequence learning with neural networks. In: Advances in neural information processing systems 27: annual conference on neural information processing systems, pp. 3104\u20133112"},{"key":"2669_CR6","unstructured":"Zhang X, Liu Y, Wang X, He P, Yu Y, Chen S, Xiong W, Wei F (2022) Momentum calibration for text generation. CoRR abs\/2212.04257 ) 10.48550\/arXiv.2212.04257"},{"issue":"2","key":"2669_CR7","doi-asserted-by":"publisher","first-page":"270","DOI":"10.1162\/neco.1989.1.2.270","volume":"1","author":"RJ Williams","year":"1989","unstructured":"Williams RJ, Zipser D (1989) A learning algorithm for continually running fully recurrent neural networks. Neural Comput 1(2):270\u2013280. https:\/\/doi.org\/10.1162\/neco.1989.1.2.270","journal-title":"Neural Comput"},{"key":"2669_CR8","unstructured":"Bengio S, Vinyals O, Jaitly N, Shazeer N (2015) Scheduled sampling for sequence prediction with recurrent neural networks. In: Advances in neural information processing systems 28: annual conference on neural information processing systems, pp. 1171\u20131179"},{"key":"2669_CR9","unstructured":"Ranzato M, Chopra S, Auli M, Zaremba W (2016) Sequence level training with recurrent neural networks. In: 4th international conference on learning representations, ICLR"},{"key":"2669_CR10","unstructured":"Lin CY (2004) Rouge: a package for automatic evaluation of summaries. In: Text summarization branches Out, pp. 74\u201381"},{"key":"2669_CR11","doi-asserted-by":"publisher","unstructured":"Wiseman S, Rush AM (2016) Sequence-to-sequence learning as beam-search optimization. In: Proceedings of the 2016 conference on empirical methods in natural language processing, EMNLP, pp. 1296\u20131306 . https:\/\/doi.org\/10.18653\/v1\/d16-1137","DOI":"10.18653\/v1\/d16-1137"},{"key":"2669_CR12","doi-asserted-by":"publisher","unstructured":"Liu Y, Liu P (2021) Simcls: a simple framework for contrastive learning of abstractive summarization. In: Proceedings of the 59th annual meeting of the association for computational linguistics and the 11th international joint conference on natural language processing, ACL\/IJCNLP 2021, (Volume 2: Short Papers), pp. 1065\u20131072 . https:\/\/doi.org\/10.18653\/v1\/2021.acl-short.135","DOI":"10.18653\/v1\/2021.acl-short.135"},{"key":"2669_CR13","unstructured":"Sun S, Li W (2021) Alleviating exposure bias via contrastive learning for abstractive text summarization. CoRR abs\/2108.11846"},{"key":"2669_CR14","unstructured":"Zhuang H, Zhang W.E, Dong C, Yang J, Sheng Q (2024) Trainable hard negative examples in contrastive learning for unsupervised abstractive summarization. In: Findings of the association for computational linguistics: EACL, pp. 1589\u20131600"},{"issue":"12","key":"2669_CR15","doi-asserted-by":"publisher","first-page":"5563","DOI":"10.1007\/S13042-024-02263-8","volume":"15","author":"H Tang","year":"2024","unstructured":"Tang H, Li R, Duan W, Dou Q, Lu M (2024) A novel abstractive summarization model based on topic-aware and contrastive learning. Int J Mach Learn Cybern 15(12):5563\u20135577. https:\/\/doi.org\/10.1007\/S13042-024-02263-8","journal-title":"Int J Mach Learn Cybern"},{"issue":"8","key":"2669_CR16","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford A, Wu J, Child R, Luan D, Amodei D, Sutskever I et al (2019) Language models are unsupervised multitask learners. OpenAI blog 1(8):9","journal-title":"OpenAI blog"},{"key":"2669_CR17","unstructured":"Brown TB, Mann B, Ryder N, Subbiah M, Kaplan J, Dhariwal P, Neelakantan A, Shyam P, Sastry G, Askell A, Agarwal S, Herbert-Voss A, Krueger G, Henighan T, Child R, Ramesh A, Ziegler DM, Wu J, Winter C, Hesse C, Chen M, Sigler E, Litwin M, Gray S, Chess B, Clark J, Berner C, McCandlish S, Radford A, Sutskever I, Amodei D (2020) Language models are few-shot learners. In: Advances in neural information processing systems 33: annual conference on neural information processing systems, NeurIPS"},{"key":"2669_CR18","doi-asserted-by":"publisher","unstructured":"Lester B, Al-Rfou R, Constant N (2021) The power of scale for parameter-efficient prompt tuning. In: Proceedings of the 2021 conference on empirical methods in natural language processing, EMNLP, pp. 3045\u20133059 . https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.243","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"2669_CR19","doi-asserted-by":"publisher","unstructured":"Li X.L, Liang P (2021). Prefix-tuning: optimizing continuous prompts for generation. In: Proceedings of the 59th annual meeting of the association for computational linguistics and the 11th international joint conference on natural language processing, ACL\/IJCNLP 2021, (Volume 1: Long Papers), pp. 4582\u20134597 . https:\/\/doi.org\/10.18653\/v1\/2021.acl-long.353","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"2669_CR20","doi-asserted-by":"publisher","unstructured":"Lewis M, Liu Y, Goyal N, Ghazvininejad M, Mohamed A, Levy O, Stoyanov V, Zettlemoyer L (2020) BART: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In: Proceedings of the 58th annual meeting of the association for computational linguistics, ACL, pp. 7871\u20137880 . https:\/\/doi.org\/10.18653\/v1\/2020.acl-main.703","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"2669_CR21","doi-asserted-by":"publisher","unstructured":"Du Z, Qian Y, Liu X, Ding M, Qiu J, Yang Z, Tang J (2022) GLM: general language model pretraining with autoregressive blank infilling. In: Proceedings of the 60th annual meeting of the association for computational linguistics (Volume 1: Long Papers), ACL, pp. 320\u2013335 . https:\/\/doi.org\/10.18653\/v1\/2022.acl-long.26","DOI":"10.18653\/v1\/2022.acl-long.26"},{"key":"2669_CR22","doi-asserted-by":"publisher","unstructured":"Li B, Zhou H, He J, Wang M, Yang Y, Li L (2020) On the sentence embeddings from pre-trained language models. In: Proceedings of the 2020 conference on empirical methods in natural language processing, EMNLP, pp. 9119\u20139130 . https:\/\/doi.org\/10.18653\/v1\/2020.emnlp-main.733","DOI":"10.18653\/v1\/2020.emnlp-main.733"},{"key":"2669_CR23","doi-asserted-by":"publisher","unstructured":"Liu Y, Liu P, Radev D.R, Neubig G (2022) BRIO: bringing order to abstractive summarization. In: Proceedings of the 60th annual meeting of the association for computational linguistics (Volume 1: Long Papers), ACL, pp. 2890\u20132903 . https:\/\/doi.org\/10.18653\/v1\/2022.acl-long.207","DOI":"10.18653\/v1\/2022.acl-long.207"},{"key":"2669_CR24","doi-asserted-by":"publisher","unstructured":"Xie J, Su Q, Zhang S, Zhang X (2023) Alleviating exposure bias via multi-level contrastive learning and deviation simulation in abstractive summarization. In: Findings of the association for computational linguistics: ACL, pp. 9732\u20139747. https:\/\/doi.org\/10.18653\/V1\/2023.FINDINGS-ACL.617","DOI":"10.18653\/V1\/2023.FINDINGS-ACL.617"},{"issue":"9","key":"2669_CR25","doi-asserted-by":"publisher","first-page":"195","DOI":"10.1145\/3560815","volume":"55","author":"P Liu","year":"2023","unstructured":"Liu P, Yuan W, Fu J, Jiang Z, Hayashi H, Neubig G (2023) Pre-train, prompt, and predict: a systematic survey of prompting methods in natural language processing. ACM Comput Surv 55(9):195\u2013119535. https:\/\/doi.org\/10.1145\/3560815","journal-title":"ACM Comput Surv"},{"key":"2669_CR26","doi-asserted-by":"publisher","unstructured":"Schick T, Schmid H, Sch\u00fctze H (2020) Automatically identifying words that can serve as labels for few-shot text classification. In: Proceedings of the 28th international conference on computational linguistics, COLING, pp. 5569\u20135578 . https:\/\/doi.org\/10.18653\/v1\/2020.coling-main.488","DOI":"10.18653\/v1\/2020.coling-main.488"},{"key":"2669_CR27","doi-asserted-by":"publisher","unstructured":"Schick T, Sch\u00fctze H (2021). Few-shot text generation with natural language instructions. In: Proceedings of the 2021 conference on empirical methods in natural language processing, EMNLP, pp. 390\u2013402 . https:\/\/doi.org\/10.18653\/v1\/2021.emnlp-main.32","DOI":"10.18653\/v1\/2021.emnlp-main.32"},{"key":"2669_CR28","doi-asserted-by":"publisher","unstructured":"Liu X, Ji K, Fu Y, Tam W, Du Z, Yang Z, Tang J (2022) P-tuning: prompt tuning can be comparable to fine-tuning across scales and tasks. In: Proceedings of the 60th annual meeting of the association for computational linguistics (Volume 2: Short Papers), ACL, pp. 61\u201368 . https:\/\/doi.org\/10.18653\/v1\/2022.acl-short.8","DOI":"10.18653\/v1\/2022.acl-short.8"},{"key":"2669_CR29","unstructured":"Zhang H, Song H, Li S, Zhou M, Song D (2022) A survey of controllable text generation using transformer-based pre-trained language models. CoRR abs\/2201.05337"},{"key":"2669_CR30","doi-asserted-by":"publisher","unstructured":"Prabhumoye S, Black AW, Salakhutdinov R (2020) Exploring controllable text generation techniques. In: Proceedings of the 28th international conference on computational linguistics, COLING, pp. 1\u201314 . https:\/\/doi.org\/10.18653\/v1\/2020.coling-main.1","DOI":"10.18653\/v1\/2020.coling-main.1"},{"key":"2669_CR31","doi-asserted-by":"publisher","first-page":"23471","DOI":"10.1609\/aaai.v38i21.30433","volume":"38","author":"N Delpisheh","year":"2024","unstructured":"Delpisheh N, Chali Y (2024) Improving faithfulness in abstractive text summarization with edus using bart (student abstract). Proceedings of the AAAI conference on artificial intelligence 38:23471\u201323472","journal-title":"Proceedings of the AAAI conference on artificial intelligence"},{"key":"2669_CR32","unstructured":"Keskar N.S, McCann B, Varshney L.R, Xiong C, Socher R (2019) CTRL: a conditional transformer language model for controllable generation. CoRR abs\/1909.05858"},{"key":"2669_CR33","unstructured":"Dathathri S, Madotto A, Lan J, Hung J, Frank E, Molino P, Yosinski J, Liu R (2020) Plug and play language models: a simple approach to controlled text generation. In: 8th international conference on learning representations, ICLR"},{"key":"2669_CR34","first-page":"140","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel C, Shazeer N, Roberts A, Lee K, Narang S, Matena M, Zhou Y, Li W, Liu PJ (2020) Exploring the limits of transfer learning with a unified text-to-text transformer. J Mach Learn Res 21:140\u2013114067","journal-title":"J Mach Learn Res"},{"key":"2669_CR35","unstructured":"Ouyang L, Wu J, Jiang X, Almeida D, Wainwright CL, Mishkin P, Zhang C, Agarwal S, Slama K, Ray A, Schulman J, Hilton J, Kelton F, Miller L, Simens M, Askell A, Welinder P, Christiano PF, Leike J, Lowe R (2022) Training language models to follow instructions with human feedback. In: NeurIPS"},{"issue":"3","key":"2669_CR36","doi-asserted-by":"publisher","first-page":"67","DOI":"10.1145\/3622933","volume":"56","author":"Q Jia","year":"2024","unstructured":"Jia Q, Liu Y, Ren S, Zhu KQ (2024) Taxonomy of abstractive dialogue summarization: Scenarios, approaches, and future directions. ACM Comput Surv 56(3):67\u201316738. https:\/\/doi.org\/10.1145\/3622933","journal-title":"ACM Comput Surv"},{"key":"2669_CR37","doi-asserted-by":"publisher","unstructured":"Nallapati R, Zhou B, Santos C.N, G\u00fcl\u00e7ehre \u00c7, Xiang B (2016) Abstractive text summarization using sequence-to-sequence rnns and beyond. In: Proceedings of the 20th SIGNLL conference on computational natural language learning, CoNLL, pp. 280\u2013290 . https:\/\/doi.org\/10.18653\/v1\/k16-1028","DOI":"10.18653\/v1\/k16-1028"},{"key":"2669_CR38","doi-asserted-by":"publisher","unstructured":"Narayan S, Cohen SB, Lapata M (2018) Don\u2019t give me the details, just the summary! topic-aware convolutional neural networks for extreme summarization. In: Proceedings of the 2018 conference on empirical methods in natural language processing, pp. 1797\u20131807 . https:\/\/doi.org\/10.18653\/v1\/d18-1206","DOI":"10.18653\/v1\/d18-1206"},{"key":"2669_CR39","doi-asserted-by":"publisher","unstructured":"Gliwa B, Mochol I, Biesek M, Wawer A (2019). SAMSum corpus: a human-annotated dialogue dataset for abstractive summarization. In: Proceedings of the 2nd workshop on new frontiers in summarization, pp. 70\u201379. https:\/\/doi.org\/10.18653\/v1\/D19-5409","DOI":"10.18653\/v1\/D19-5409"},{"key":"2669_CR40","doi-asserted-by":"publisher","unstructured":"Chen Y, Liu Y, Chen L, Zhang Y (2021) Dialogsum: a real-life scenario dialogue summarization dataset. In: Findings of the association for computational linguistics: ACL\/IJCNLP. Findings of ACL, vol. ACL\/IJCNLP 2021, pp. 5062\u20135074. https:\/\/doi.org\/10.18653\/v1\/2021.findings-acl.449","DOI":"10.18653\/v1\/2021.findings-acl.449"},{"key":"2669_CR41","doi-asserted-by":"publisher","unstructured":"Guo M, Ainslie J, Uthus DC, Onta\u00f1\u00f3n S, Ni J, Sung Y, Yang Y (2022) Longt5: efficient text-to-text transformer for long sequences. In: Findings of the association for computational linguistics: NAACL 724\u2013736. https:\/\/doi.org\/10.18653\/v1\/2022.findings-naacl.55","DOI":"10.18653\/v1\/2022.findings-naacl.55"},{"key":"2669_CR42","doi-asserted-by":"publisher","unstructured":"Ainslie J, Onta\u00f1\u00f3n S, Alberti C, Cvicek V, Fisher Z, Pham P, Ravula A, Sanghai S, Wang Q, Yang L (2020). ETC: encoding long and structured inputs in transformers. In: Proceedings of the 2020 conference on empirical methods in natural language processing, EMNLP, pp. 268\u2013284 . https:\/\/doi.org\/10.18653\/v1\/2020.emnlp-main.19","DOI":"10.18653\/v1\/2020.emnlp-main.19"},{"key":"2669_CR43","unstructured":"Zeng A, Liu X, Du Z, Wang Z, Lai H, Ding M, Yang Z, Xu Y, Zheng W, Xia X, et al (2022) Glm-130b: an open bilingual pre-trained model. arXiv preprint arXiv:2210.02414"},{"key":"2669_CR44","doi-asserted-by":"crossref","unstructured":"Bao G, Ou Z, Zhang Y (2023) Gemini: Controlling the sentence-level summary style in abstractive text summarization. In: Proceedings of the 2023 conference on empirical methods in natural language processing, pp. 831\u2013842","DOI":"10.18653\/v1\/2023.emnlp-main.53"},{"key":"2669_CR45","doi-asserted-by":"crossref","unstructured":"Chen Y, Liu Y, Xu R, Yang Z, Zhu C, Zeng M, Zhang Y (2023) Unisumm and summzoo: Unified model and diverse benchmark for few-shot summarization. In: Proceedings of the 61st annual meeting of the association for computational linguistics (Volume 1: Long Papers), ACL, pp. 12833\u201312855","DOI":"10.18653\/v1\/2023.acl-long.718"},{"key":"2669_CR46","unstructured":"Hu E.J, Shen Y, Wallis P, Allen-Zhu Z, Li Y, Wang S, Wang L, Chen W (2022) Lora: low-rank adaptation of large language models. In: The tenth international conference on learning representations, ICLR"},{"key":"2669_CR47","doi-asserted-by":"publisher","unstructured":"Zheng C, Zhang K, Wang H.J, Fan L, Wang Z (2021) Enhanced seq2seq autoencoder via contrastive learning for abstractive text summarization. In: 2021 IEEE International conference on big data (Big Data), pp. 1764\u20131771. https:\/\/doi.org\/10.1109\/BigData52589.2021.9671819","DOI":"10.1109\/BigData52589.2021.9671819"},{"key":"2669_CR48","unstructured":"Liu X, Gao Y, Bai Y, Li J, Hu Y, Huang H, Chen B (2022) PSP: pre-trained soft prompts for few-shot abstractive summarization. In: Proceedings of the 29th international conference on computational linguistics, COLING, pp. 6355\u20136368"},{"key":"2669_CR49","unstructured":"He J, Zhou C, Ma X, Berg-Kirkpatrick T, Neubig G (2022) Towards a unified view of parameter-efficient transfer learning. In: The tenth international conference on learning representations, ICLR"},{"key":"2669_CR50","unstructured":"Hu E.J, Shen Y, Wallis P, Allen-Zhu Z, Li Y, Wang S, Wang L, Chen W (2022) Lora: low-rank adaptation of large language models. In: The tenth international conference on learning representations, ICLR"},{"key":"2669_CR51","unstructured":"Pang R.Y, He H (2021) Text generation by learning from demonstrations. In: 9th international conference on learning representations, ICLR"},{"key":"2669_CR52","doi-asserted-by":"publisher","DOI":"10.1109\/TBDATA.2024.3387311","author":"S Zhao","year":"2024","unstructured":"Zhao S, Cheng Y, Zhang Y, Chen J, Duan Z, Sun Y, Wang X (2024) Hyfit: hybrid fine-tuning with diverse sampling for abstractive summarization. IEEE Trans Big Data. https:\/\/doi.org\/10.1109\/TBDATA.2024.3387311","journal-title":"IEEE Trans Big Data"}],"container-title":["International Journal of Machine Learning and Cybernetics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-025-02669-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s13042-025-02669-y\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-025-02669-y.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,15]],"date-time":"2025-10-15T17:02:07Z","timestamp":1760547727000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s13042-025-02669-y"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,3]]},"references-count":52,"journal-issue":{"issue":"10","published-print":{"date-parts":[[2025,10]]}},"alternative-id":["2669"],"URL":"https:\/\/doi.org\/10.1007\/s13042-025-02669-y","relation":{},"ISSN":["1868-8071","1868-808X"],"issn-type":[{"value":"1868-8071","type":"print"},{"value":"1868-808X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,6,3]]},"assertion":[{"value":"27 May 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 April 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 June 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}