{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,20]],"date-time":"2026-03-20T00:34:15Z","timestamp":1773966855209,"version":"3.50.1"},"reference-count":41,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T00:00:00Z","timestamp":1775001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100008462","name":"Fujian University of Technology","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100008462","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005270","name":"Fujian Provincial Department of Science and Technology","doi-asserted-by":"publisher","award":["2022J01935"],"award-info":[{"award-number":["2022J01935"]}],"id":[{"id":"10.13039\/501100005270","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Journal of Systems Architecture"],"published-print":{"date-parts":[[2026,4]]},"DOI":"10.1016\/j.sysarc.2026.103699","type":"journal-article","created":{"date-parts":[[2026,1,7]],"date-time":"2026-01-07T00:16:35Z","timestamp":1767744995000},"page":"103699","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["KVC-Q: A high-fidelity and dynamic KV Cache quantization framework for long-context large language models"],"prefix":"10.1016","volume":"173","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-4397-6763","authenticated-orcid":false,"given":"Yusen","family":"Wu","sequence":"first","affiliation":[]},{"given":"Ruiqin","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Jiarong","family":"Que","sequence":"additional","affiliation":[]},{"given":"Qixiang","family":"Zeng","sequence":"additional","affiliation":[]},{"given":"Hongsen","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.sysarc.2026.103699_b1","first-page":"1877","article-title":"Language models are few-shot learners","volume":"vol. 33","author":"Brown","year":"2020"},{"key":"10.1016\/j.sysarc.2026.103699_b2","series-title":"Qwen2.5 technical report","author":"Team","year":"2024"},{"key":"10.1016\/j.sysarc.2026.103699_b3","series-title":"GPT-4 technical report","author":"OpenAI","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b4","series-title":"Mixtral of experts","author":"Jiang","year":"2024"},{"key":"10.1016\/j.sysarc.2026.103699_b5","series-title":"Phi-3 technical report: a highly capable language model locally on your phone","author":"Abdin","year":"2024"},{"key":"10.1016\/j.sysarc.2026.103699_b6","series-title":"Outrageously large neural networks: The sparsely-gated mixture-of-experts layer","author":"Shazeer","year":"2017"},{"key":"10.1016\/j.sysarc.2026.103699_b7","series-title":"GShard: Scaling giant models with conditional computation and automatic sharding","author":"Lepikhin","year":"2020"},{"key":"10.1016\/j.sysarc.2026.103699_b8","article-title":"Attention is all you need","volume":"30","author":"Vaswani","year":"2017","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.sysarc.2026.103699_b9","series-title":"Fast transformer decoding: One write-head is all you need","author":"Shazeer","year":"2019"},{"key":"10.1016\/j.sysarc.2026.103699_b10","series-title":"GQA: Training generalized multi-query transformer models from multi-head checkpoints","author":"Ainslie","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b11","series-title":"GPTQ: Accurate post-training quantization for generative pre-trained transformers","author":"Frantar","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b12","series-title":"ZeroQuant: Efficient and affordable post-training quantization for large-scale transformers","author":"Yao","year":"2022"},{"key":"10.1016\/j.sysarc.2026.103699_b13","series-title":"SpecInfer: Accelerating generative large language model serving with tree-based speculative inference and verification","author":"Miao","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b14","series-title":"Efficient streaming language models with attention sinks","author":"Xiao","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b15","series-title":"International Conference on Learning Representations","article-title":"Optimal brain apoptosis","author":"Sun","year":"2025"},{"key":"10.1016\/j.sysarc.2026.103699_b16","series-title":"Distilling the knowledge in a neural network","author":"Hinton","year":"2015"},{"key":"10.1016\/j.sysarc.2026.103699_b17","first-page":"3123","article-title":"Binaryconnect: Training deep neural networks with binary weights during propagations","volume":"vol. 28","author":"Courbariaux","year":"2015"},{"key":"10.1016\/j.sysarc.2026.103699_b18","series-title":"SpQR: A sparse-quantized representation for near-lossless LLM weight compression","author":"Dettmers","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b19","article-title":"Llm.int8(): 8-bit matrix multiplication for transformers at scale","volume":"vol. 35","author":"Dettmers","year":"2022"},{"key":"10.1016\/j.sysarc.2026.103699_b20","series-title":"International Conference on Machine Learning","article-title":"SmoothQuant: Accurate and efficient post-training quantization for large language models","author":"Xiao","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b21","series-title":"Advances in Neural Information Processing Systems","article-title":"Scissorhands: exploiting the persistence of importance hypothesis for LLM KV cache compression at test time","author":"Liu","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b22","series-title":"KIVI: A tuning-free asymmetric 2bit quantization for KV cache","author":"Liu","year":"2024"},{"key":"10.1016\/j.sysarc.2026.103699_b23","doi-asserted-by":"crossref","unstructured":"Z. Dai, Z. Yang, Y. Yang, W.W. Cohen, J. Carbonell, Q.V. Le, R. Salakhutdinov, Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context, in: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, 2019, pp. 2978\u20132988.","DOI":"10.18653\/v1\/P19-1285"},{"key":"10.1016\/j.sysarc.2026.103699_b24","unstructured":"I. Beltagy, M.E. Peters, A. Cohan, Longformer: The long-document transformer, in: Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing, 2020."},{"key":"10.1016\/j.sysarc.2026.103699_b25","first-page":"17283","article-title":"Big bird: Transformers for longer sequences","volume":"vol. 33","author":"Zaheer","year":"2020"},{"key":"10.1016\/j.sysarc.2026.103699_b26","series-title":"Generating long sequences with sparse transformers","author":"Child","year":"2019"},{"key":"10.1016\/j.sysarc.2026.103699_b27","unstructured":"N. Kitaev, \u0141. Kaiser, A. Levskaya, Reformer: The Efficient Transformer, in: International Conference on Learning Representations, 2020."},{"key":"10.1016\/j.sysarc.2026.103699_b28","series-title":"Linformer: Self-attention with linear complexity","author":"Wang","year":"2020"},{"key":"10.1016\/j.sysarc.2026.103699_b29","series-title":"FlashAttention-2: Faster attention with better parallelism and work partitioning","author":"Dao","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b30","article-title":"Flashattention: Fast and memory-efficient exact attention with io-awareness","volume":"vol. 35","author":"Dao","year":"2022"},{"key":"10.1016\/j.sysarc.2026.103699_b31","series-title":"International Conference on Machine Learning","article-title":"Fast inference from transformers via speculative decoding","author":"Leviathan","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b32","unstructured":"O. Press, N.A. Smith, M. Lewis, Train short, test long: Attention with linear biases enables input length extrapolation, in: International Conference on Learning Representations, 2022."},{"key":"10.1016\/j.sysarc.2026.103699_b33","series-title":"Extending context window of large language models via positional interpolation","author":"Chen","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b34","series-title":"Roformer: Enhanced transformer with rotary position embedding","author":"Su","year":"2021"},{"key":"10.1016\/j.sysarc.2026.103699_b35","series-title":"LLMLingua: Compressing prompts for accelerated inference of large language models","author":"Jiang","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b36","doi-asserted-by":"crossref","unstructured":"A. Gholami, S. Kim, Z. Dong, Z. Yao, M.W. Mahoney, K. Keutzer, A Survey of Quantization Methods for Efficient Neural Network Inference, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2022, pp. 2919\u20132938.","DOI":"10.1201\/9781003162810-13"},{"key":"10.1016\/j.sysarc.2026.103699_b37","series-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b38","series-title":"Mistral 7B","author":"Jiang","year":"2023"},{"key":"10.1016\/j.sysarc.2026.103699_b39","series-title":"Compressive transformers for long-range sequence modelling","author":"Rae","year":"2019"},{"key":"10.1016\/j.sysarc.2026.103699_b40","series-title":"International Conference on Learning Representations","article-title":"Memorizing transformers","author":"Wu","year":"2022"},{"key":"10.1016\/j.sysarc.2026.103699_b41","series-title":"LoRC: Low-rank compression for LLMs KV cache with a progressive compression strategy","author":"Zhang","year":"2024"}],"container-title":["Journal of Systems Architecture"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1383762126000172?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1383762126000172?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,3,19]],"date-time":"2026-03-19T22:34:34Z","timestamp":1773959674000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S1383762126000172"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,4]]},"references-count":41,"alternative-id":["S1383762126000172"],"URL":"https:\/\/doi.org\/10.1016\/j.sysarc.2026.103699","relation":{},"ISSN":["1383-7621"],"issn-type":[{"value":"1383-7621","type":"print"}],"subject":[],"published":{"date-parts":[[2026,4]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"KVC-Q: A high-fidelity and dynamic KV Cache quantization framework for long-context large language models","name":"articletitle","label":"Article Title"},{"value":"Journal of Systems Architecture","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.sysarc.2026.103699","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"103699"}}