{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T02:39:50Z","timestamp":1767321590566,"version":"3.48.0"},"publisher-location":"Singapore","reference-count":34,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819541577","type":"print"},{"value":"9789819541584","type":"electronic"}],"license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-4158-4_12","type":"book-chapter","created":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T02:36:35Z","timestamp":1767321395000},"page":"188-203","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["SCM: Enhancing Large Language Model with\u00a0Self-Controlled Memory Framework"],"prefix":"10.1007","author":[{"given":"Bing","family":"Wang","sequence":"first","affiliation":[]},{"given":"Xinnian","family":"Liang","sequence":"additional","affiliation":[]},{"given":"Jian","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Hui","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Zhenhe","family":"Wu","sequence":"additional","affiliation":[]},{"given":"ShuangZhi","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Zejun","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Zhoujun","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,2]]},"reference":[{"key":"12_CR1","unstructured":"Ntk-aware scaled rope (2023). https:\/\/www.reddit.com\/r\/LocalLLaMA\/comments\/14lz7j5\/ntkaware_scaled_rope_allows_llama_models_to_have\/"},{"key":"12_CR2","doi-asserted-by":"crossref","unstructured":"Bai, J., et al.: Griprank: bridging the gap between retrieval and generation via the generative knowledge improved passage ranking (2023)","DOI":"10.1145\/3583780.3614901"},{"key":"12_CR3","doi-asserted-by":"crossref","unstructured":"Bai, J., Yang, Z., Yang, J., Guo, H., Li, Z.: Kinet: incorporating relevant facts into knowledge-grounded dialog generation. IEEE\/ACM Trans. Audio Speech Lang. Process. (2023)","DOI":"10.1109\/TASLP.2023.3240654"},{"key":"12_CR4","unstructured":"Bai, J., et al.: Qwen technical report (2023)"},{"key":"12_CR5","unstructured":"Beltagy, I., Peters, M.E., Cohan, A.: Longformer: the long-document transformer (2020)"},{"key":"12_CR6","unstructured":"Brown, T.B., et al.: Language models are few-shot learners. In: Proceedings of NeurIPS (2020)"},{"key":"12_CR7","unstructured":"Brown, T.B., et al.: Language models are few-shot learners (2020)"},{"key":"12_CR8","doi-asserted-by":"crossref","unstructured":"Chai, L., et\u00a0al.: xcot: Cross-lingual instruction tuning for cross-lingual chain-of-thought reasoning. arXiv preprint arXiv:2401.07037 (2024)","DOI":"10.1609\/aaai.v39i22.34524"},{"key":"12_CR9","unstructured":"Chen, S., Wong, S., Chen, L., Tian, Y.: Extending context window of large language models via positional interpolation. CoRR abs\/2306.15595 (2023)"},{"key":"12_CR10","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of NAACL (2019)"},{"key":"12_CR11","unstructured":"Gao, Y., et al.: Retrieval-augmented generation for large language models: a survey (2024)"},{"key":"12_CR12","doi-asserted-by":"crossref","unstructured":"Guo, M., et al.: LongT5: efficient text-to-text transformer for long sequences. In: Proceedings of ACL Findings (2022)","DOI":"10.18653\/v1\/2022.findings-naacl.55"},{"key":"12_CR13","doi-asserted-by":"crossref","unstructured":"Liu, N.F., et al.: Lost in the middle: how language models use long contexts (2023)","DOI":"10.1162\/tacl_a_00638"},{"key":"12_CR14","doi-asserted-by":"crossref","unstructured":"Ma, X., Gong, Y., He, P., Zhao, H., Duan, N.: Query rewriting for retrieval-augmented large language models. arXiv preprint arXiv:2305.14283 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.322"},{"key":"12_CR15","unstructured":"Minaee, S., et al.: Large language models: a survey (2024)"},{"key":"12_CR16","unstructured":"OpenAI: GPT-4 technical report (2024)"},{"key":"12_CR17","unstructured":"Peng, B., Quesnelle, J., Fan, H., Shippole, E.: Yarn: efficient context window extension of large language models. CoRR abs\/2309.00071 (2023)"},{"key":"12_CR18","doi-asserted-by":"crossref","unstructured":"Pi, X., Wang, B., Gao, Y., Guo, J., Li, Z., Lou, J.G.: Towards robustness of text-to-SQL models against natural and realistic adversarial table perturbation (2022)","DOI":"10.18653\/v1\/2022.acl-long.142"},{"key":"12_CR19","unstructured":"Press, O., Smith, N., Lewis, M.: Train short, test long: attention with linear biases enables input length extrapolation. In: Proceedings of ICLR (2022)"},{"key":"12_CR20","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Improving language understanding with unsupervised learning (2018)"},{"key":"12_CR21","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Language models are unsupervised multitask learners (2019)"},{"key":"12_CR22","unstructured":"Stiennon, N., et al.: Learning to summarize from human feedback. CoRR (2020)"},{"key":"12_CR23","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Proceedings of NeurIPS (2017)"},{"key":"12_CR24","doi-asserted-by":"crossref","unstructured":"Wang, B., Gao, Y., Li, Z., Lou, J.G.: Know what I don\u2019t know: handling ambiguous and unanswerable questions for text-to-SQL (2023)","DOI":"10.18653\/v1\/2023.findings-acl.352"},{"key":"12_CR25","unstructured":"Wei, J., et al.: Finetuned language models are zero-shot learners. In: Proceedings of ICLR (2022)"},{"key":"12_CR26","unstructured":"Wei, J., et al.: Emergent abilities of large language models. TMLR (2022)"},{"key":"12_CR27","unstructured":"Wei, J., et al.: Chain of thought prompting elicits reasoning in large language models. In: Proceedings of NeurIPS (2022)"},{"key":"12_CR28","doi-asserted-by":"crossref","unstructured":"Wu, H., Zhan, M., Tan, H., Hou, Z., Liang, D., Song, L.: VCSUM: a versatile Chinese meeting summarization dataset (2023)","DOI":"10.18653\/v1\/2023.findings-acl.377"},{"key":"12_CR29","unstructured":"Wu, J., et al.: Recursively summarizing books with human feedback (2021)"},{"key":"12_CR30","unstructured":"Yang, J., et al.: Multilingual machine translation systems from Microsoft for WMT21 shared task. In: WMT@EMNLP 2021, Online Event, 10\u201311 November 2021 (2021)"},{"key":"12_CR31","unstructured":"Zaheer, M., et al.: Big bird: transformers for longer sequences (2021)"},{"key":"12_CR32","unstructured":"Zeng, A., et al.: GLM-130b: an open bilingual pre-trained model. In: Proceedings of ICLR (2023)"},{"key":"12_CR33","unstructured":"Zhang, Y., et al.: Summ$$^n$$: a multi-stage summarization framework for long input dialogues and documents. In: Proceedings of ACL (2022)"},{"key":"12_CR34","doi-asserted-by":"crossref","unstructured":"Zhong, W., Guo, L., Gao, Q., Ye, H., Wang, Y.: Memorybank: enhancing large language models with long-term memory (2023)","DOI":"10.1609\/aaai.v38i17.29946"}],"container-title":["Lecture Notes in Computer Science","Database Systems for Advanced Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-4158-4_12","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T02:36:39Z","timestamp":1767321399000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-4158-4_12"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"ISBN":["9789819541577","9789819541584"],"references-count":34,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-4158-4_12","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]},"assertion":[{"value":"2 January 2026","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"DASFAA","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Database Systems for Advanced Applications","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Singapore","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Singapore","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 May 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 May 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"dasfaa2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/dasfaa2025.github.io","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}