{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T16:49:40Z","timestamp":1774716580352,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":88,"publisher":"ACM","funder":[{"name":"NRF","award":["RS-2024-00342148"],"award-info":[{"award-number":["RS-2024-00342148"]}]},{"name":"IITP","award":["2018-0-00503, RS-2024-00459797,IITP-2025-RS-2023-00256472"],"award-info":[{"award-number":["2018-0-00503, RS-2024-00459797,IITP-2025-RS-2023-00256472"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,6,21]]},"DOI":"10.1145\/3695053.3731019","type":"proceedings-article","created":{"date-parts":[[2025,6,20]],"date-time":"2025-06-20T16:43:11Z","timestamp":1750437791000},"page":"482-497","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":7,"title":["Oaken: Fast and Efficient LLM Serving with Online-Offline Hybrid KV Cache Quantization"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-8751-0352","authenticated-orcid":false,"given":"Minsu","family":"Kim","sequence":"first","affiliation":[{"name":"KAIST, Daejeon, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-7940-8221","authenticated-orcid":false,"given":"Seongmin","family":"Hong","sequence":"additional","affiliation":[{"name":"HyperAccel, Seoul, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-3829-7761","authenticated-orcid":false,"given":"RyeoWook","family":"Ko","sequence":"additional","affiliation":[{"name":"KAIST, Daejeon, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-6453-9755","authenticated-orcid":false,"given":"Soongyu","family":"Choi","sequence":"additional","affiliation":[{"name":"KAIST, Daejeon, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-4460-9530","authenticated-orcid":false,"given":"Hunjong","family":"Lee","sequence":"additional","affiliation":[{"name":"HyperAccel, Seoul, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6680-2602","authenticated-orcid":false,"given":"Junsoo","family":"Kim","sequence":"additional","affiliation":[{"name":"HyperAccel, Seoul, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1099-1496","authenticated-orcid":false,"given":"Joo-Young","family":"Kim","sequence":"additional","affiliation":[{"name":"HyperAccel, Seoul, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6629-449X","authenticated-orcid":false,"given":"Jongse","family":"Park","sequence":"additional","affiliation":[{"name":"KAIST, Daejeon, Republic of Korea"}]}],"member":"320","published-online":{"date-parts":[[2025,6,20]]},"reference":[{"key":"e_1_3_3_2_2_2","unstructured":"Amey Agrawal Nitin Kedia Ashish Panwar Jayashree Mohan Nipun Kwatra Bhargav\u00a0S Gulavani Alexey Tumanov and Ramachandran Ramjee. 2024. Taming Throughput-Latency Tradeoff in LLM Inference with Sarathi-Serve. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2403.02310 (2024)."},{"key":"e_1_3_3_2_3_2","volume-title":"Thirty-seventh Conference on Neural Information Processing Systems","author":"Ahmadian Arash","year":"2023","unstructured":"Arash Ahmadian, Saurabh Dash, Hongyu Chen, Bharat Venkitesh, Zhen\u00a0Stephen Gou, Phil Blunsom, Ahmet \u00dcst\u00fcn, and Sara Hooker. 2023. Intriguing Properties of Quantization at Scale. In Thirty-seventh Conference on Neural Information Processing Systems. https:\/\/openreview.net\/forum?id=IYe8j7Gy8f"},{"key":"e_1_3_3_2_4_2","doi-asserted-by":"crossref","unstructured":"Joshua Ainslie James Lee-Thorp Michiel de Jong Yury Zemlyanskiy Federico Lebr\u00f3n and Sumit Sanghai. 2023. GQA: Training Generalized Multi-Query Transformer Models from Multi-Head Checkpoints. https:\/\/arxiv.org\/abs\/2305.13245","DOI":"10.18653\/v1\/2023.emnlp-main.298"},{"key":"e_1_3_3_2_5_2","unstructured":"Rohan Anil Andrew\u00a0M. Dai Orhan Firat Melvin Johnson Dmitry Lepikhin Alexandre Passos Siamak Shakeri Emanuel Taropa Paige Bailey Zhifeng Chen Eric Chu Jonathan\u00a0H. Clark Laurent\u00a0El Shafey Yanping Huang Kathy Meier-Hellstern Gaurav Mishra Erica Moreira Mark Omernick Kevin Robinson Sebastian Ruder Yi Tay Kefan Xiao Yuanzhong Xu Yujing Zhang Gustavo\u00a0Hernandez Abrego Junwhan Ahn Jacob Austin Paul Barham Jan Botha James Bradbury Siddhartha Brahma Kevin Brooks Michele Catasta Yong Cheng Colin Cherry Christopher\u00a0A. Choquette-Choo Aakanksha Chowdhery Cl\u00e9ment Crepy Shachi Dave Mostafa Dehghani Sunipa Dev Jacob Devlin Mark D\u00edaz Nan Du Ethan Dyer Vlad Feinberg Fangxiaoyu Feng Vlad Fienber Markus Freitag Xavier Garcia Sebastian Gehrmann Lucas Gonzalez Guy Gur-Ari Steven Hand Hadi Hashemi Le Hou Joshua Howland Andrea Hu Jeffrey Hui Jeremy Hurwitz Michael Isard Abe Ittycheriah Matthew Jagielski Wenhao Jia Kathleen Kenealy Maxim Krikun Sneha Kudugunta Chang Lan Katherine Lee Benjamin Lee Eric Li Music Li Wei Li YaGuang Li Jian Li Hyeontaek Lim Hanzhao Lin Zhongtao Liu Frederick Liu Marcello Maggioni Aroma Mahendru Joshua Maynez Vedant Misra Maysam Moussalem Zachary Nado John Nham Eric Ni Andrew Nystrom Alicia Parrish Marie Pellat Martin Polacek Alex Polozov Reiner Pope Siyuan Qiao Emily Reif Bryan Richter Parker Riley Alex\u00a0Castro Ros Aurko Roy Brennan Saeta Rajkumar Samuel Renee Shelby Ambrose Slone Daniel Smilkov David\u00a0R. So Daniel Sohn Simon Tokumine Dasha Valter Vijay Vasudevan Kiran Vodrahalli Xuezhi Wang Pidong Wang Zirui Wang Tao Wang John Wieting Yuhuai Wu Kelvin Xu Yunhan Xu Linting Xue Pengcheng Yin Jiahui Yu Qiao Zhang Steven Zheng Ce Zheng Weikang Zhou Denny Zhou Slav Petrov and Yonghui Wu. 2023. PaLM 2 Technical Report. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2305.10403 (2023)."},{"key":"e_1_3_3_2_6_2","unstructured":"Saleh Ashkboos Amirkeivan Mohtashami Maximilian\u00a0L. Croci Bo Li Pashmina Cameron Martin Jaggi Dan Alistarh Torsten Hoefler and James Hensman. 2024. QuaRot: Outlier-Free 4-Bit Inference in Rotated LLMs. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2404.00456 (2024). https:\/\/arxiv.org\/abs\/2404.00456"},{"key":"e_1_3_3_2_7_2","unstructured":"Iz Beltagy Matthew\u00a0E. Peters and Arman Cohan. 2020. Longformer: The Long-Document Transformer. https:\/\/arxiv.org\/abs\/2004.05150"},{"key":"e_1_3_3_2_8_2","unstructured":"Yonatan Bisk Rowan Zellers Ronan\u00a0Le Bras Jianfeng Gao and Yejin Choi. 2019. PIQA: Reasoning about Physical Commonsense in Natural Language. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1911.11641 (2019)."},{"key":"e_1_3_3_2_9_2","unstructured":"Tom\u00a0B. Brown Benjamin Mann Nick Ryder Melanie Subbiah Jared Kaplan Prafulla Dhariwal Arvind Neelakantan Pranav Shyam Girish Sastry Amanda Askell Sandhini Agarwal Ariel Herbert-Voss Gretchen Krueger Tom Henighan Rewon Child Aditya Ramesh Daniel\u00a0M. Ziegler Jeffrey Wu Clemens Winter Christopher Hesse Mark Chen Eric Sigler Mateusz Litwin Scott Gray Benjamin Chess Jack Clark Christopher Berner Sam McCandlish Alec Radford Ilya Sutskever and Dario Amodei. 2020. Language Models are Few-Shot Learners. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2005.14165 (2020)."},{"key":"e_1_3_3_2_10_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA51647.2021.00027"},{"key":"e_1_3_3_2_11_2","series-title":"(NIPS \u201923)","volume-title":"Proceedings of the 37th International Conference on Neural Information Processing Systems","author":"Chee Jerry","year":"2024","unstructured":"Jerry Chee, Yaohui Cai, Volodymyr Kuleshov, and Christopher De\u00a0Sa. 2024. QuIP: 2-bit \u2018quantization of large language models with guarantees. In Proceedings of the 37th International Conference on Neural Information Processing Systems (New Orleans, LA, USA) (NIPS \u201923). Curran Associates Inc., Red Hook, NY, USA, Article 196, 34\u00a0pages."},{"key":"e_1_3_3_2_12_2","unstructured":"Wenhua Cheng Weiwei Zhang Haihao Shen Yiyang Cai Xin He and Kaokao Lv. 2023. Optimize Weight Rounding via Signed Gradient Descent for the Quantization of LLMs. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2309.05516 (2023)."},{"key":"e_1_3_3_2_13_2","doi-asserted-by":"crossref","unstructured":"Zhoujun Cheng Jungo Kasai and Tao Yu. 2023. Batch Prompting: Efficient Inference with Large Language Model APIs. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2301.08721 (2023).","DOI":"10.18653\/v1\/2023.emnlp-industry.74"},{"key":"e_1_3_3_2_14_2","unstructured":"Brian Chmiel Ron Banner Elad Hoffer Hilla\u00a0Ben Yaacov and Daniel Soudry. 2024. Accurate Neural Training with 4-bit Matrix Multiplications at Standard Formats. https:\/\/arxiv.org\/abs\/2112.10769"},{"key":"e_1_3_3_2_15_2","unstructured":"Tri Dao Daniel\u00a0Y. Fu Stefano Ermon Atri Rudra and Christopher R\u00e9. 2022. FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2205.14135 (2022)."},{"key":"e_1_3_3_2_16_2","unstructured":"Tim Dettmers Mike Lewis Younes Belkada and Luke Zettlemoyer. 2022. LLM.int8(): 8-bit Matrix Multiplication for Transformers at Scale. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2208.07339 (2022)."},{"key":"e_1_3_3_2_17_2","doi-asserted-by":"publisher","DOI":"10.1145\/3613424.3623783"},{"key":"e_1_3_3_2_18_2","volume-title":"The Eleventh International Conference on Learning Representations","author":"Frantar Elias","year":"2022","unstructured":"Elias Frantar, Saleh Ashkboos, Torsten Hoefler, and Dan Alistarh. 2022. OPTQ: Accurate quantization for generative pre-trained transformers. In The Eleventh International Conference on Learning Representations."},{"key":"e_1_3_3_2_19_2","doi-asserted-by":"publisher","unstructured":"Christina Giannoula Ivan Fernandez Juan\u00a0G\u00f3mez Luna Nectarios Koziris Georgios Goumas and Onur Mutlu. 2022. SparseP: Towards Efficient Sparse Matrix Vector Multiplication on Real Processing-In-Memory Architectures. Proc. ACM Meas. Anal. Comput. Syst. 6 1 Article 21 (feb 2022) 49\u00a0pages. 10.1145\/3508041","DOI":"10.1145\/3508041"},{"key":"e_1_3_3_2_20_2","doi-asserted-by":"publisher","DOI":"10.1145\/3579371.3589038"},{"key":"e_1_3_3_2_21_2","doi-asserted-by":"publisher","DOI":"10.1145\/3620666.3651380"},{"key":"e_1_3_3_2_22_2","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO56248.2022.00051"},{"key":"e_1_3_3_2_23_2","unstructured":"Coleman Hooper Sehoon Kim Hiva Mohammadzadeh Michael\u00a0W Mahoney Yakun\u00a0Sophia Shao Kurt Keutzer and Amir Gholami. 2024. KVQuant: Towards 10 Million Context Length LLM Inference with KV Cache Quantization. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2401.18079 (2024)."},{"key":"e_1_3_3_2_24_2","doi-asserted-by":"crossref","unstructured":"Suyeon Hur Seongmin Na Dongup Kwon Joonsung Kim Andrew Boutros Eriko Nurvitadhi and Jangwoo Kim. 2023. A fast and flexible FPGA-based accelerator for natural language processing neural networks. ACM Transactions on Architecture and Code Optimization 20 1 (2023) 1\u201324.","DOI":"10.1145\/3564606"},{"key":"e_1_3_3_2_25_2","doi-asserted-by":"publisher","unstructured":"Ahmet Inci Siri Virupaksha Aman Jain Ting-Wu Chin Venkata Thallam Ruizhou Ding and Diana Marculescu. 2023. QUIDAM: A Framework for Quantization-aware DNN Accelerator and Model Co-Exploration. ACM Trans. Embed. Comput. Syst. 22 2 Article 33 (Jan. 2023) 21\u00a0pages. 10.1145\/3555807","DOI":"10.1145\/3555807"},{"key":"e_1_3_3_2_26_2","unstructured":"Albert\u00a0Q. Jiang Alexandre Sablayrolles Arthur Mensch Chris Bamford Devendra\u00a0Singh Chaplot Diego de\u00a0las Casas Florian Bressand Gianna Lengyel Guillaume Lample Lucile Saulnier L\u00e9lio\u00a0Renard Lavaud Marie-Anne Lachaux Pierre Stock Teven\u00a0Le Scao Thibaut Lavril Thomas Wang Timoth\u00e9e Lacroix and William\u00a0El Sayed. 2023. Mistral 7B. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2310.06825 (2023)."},{"key":"e_1_3_3_2_27_2","unstructured":"Albert\u00a0Q. Jiang Alexandre Sablayrolles Antoine Roux Arthur Mensch Blanche Savary Chris Bamford Devendra\u00a0Singh Chaplot Diego de\u00a0las Casas Emma\u00a0Bou Hanna Florian Bressand Gianna Lengyel Guillaume Bour Guillaume Lample L\u00e9lio\u00a0Renard Lavaud Lucile Saulnier Marie-Anne Lachaux Pierre Stock Sandeep Subramanian Sophia Yang Szymon Antoniak Teven\u00a0Le Scao Th\u00e9ophile Gervet Thibaut Lavril Thomas Wang Timoth\u00e9e Lacroix and William\u00a0El Sayed. 2024. Mixtral of Experts. https:\/\/arxiv.org\/abs\/2401.04088"},{"key":"e_1_3_3_2_28_2","volume-title":"International Conference on Learning Representations","author":"Jin Qing","year":"2022","unstructured":"Qing Jin, Jian Ren, Richard Zhuang, Sumant Hanumante, Zhengang Li, Zhiyu Chen, Yanzhi Wang, Kaiyuan Yang, and Sergey Tulyakov. 2022. F8Net: Fixed-Point 8-bit Only Multiplication for Network Quantization. In International Conference on Learning Representations. https:\/\/openreview.net\/forum?id=_CfpJazzXT2"},{"key":"e_1_3_3_2_29_2","doi-asserted-by":"crossref","unstructured":"Norman\u00a0P. Jouppi George Kurian Sheng Li Peter Ma Rahul Nagarajan Lifeng Nai Nishant Patil Suvinay Subramanian Andy Swing Brian Towles Cliff Young Xiang Zhou Zongwei Zhou and David Patterson. 2023. TPU v4: An Optically Reconfigurable Supercomputer for Machine Learning with Hardware Support for Embeddings. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2304.01433 (2023).","DOI":"10.1145\/3579371.3589350"},{"key":"e_1_3_3_2_30_2","doi-asserted-by":"publisher","DOI":"10.1109\/VLSITechnologyandCir46769.2022.9830277"},{"key":"e_1_3_3_2_31_2","unstructured":"Sehoon Kim Coleman Hooper Amir Gholami Zhen Dong Xiuyu Li Sheng Shen Michael\u00a0W Mahoney and Kurt Keutzer. 2023. Squeezellm: Dense-and-sparse quantization. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2306.07629 (2023)."},{"key":"e_1_3_3_2_32_2","unstructured":"Young\u00a0Jin Kim Rawn Henry Raffy Fahim and Hany\u00a0Hassan Awadalla. 2023. Finequant: Unlocking efficiency with fine-grained weight-only quantization for llms. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2308.09723 (2023)."},{"key":"e_1_3_3_2_33_2","doi-asserted-by":"publisher","DOI":"10.1145\/3600006.3613165"},{"key":"e_1_3_3_2_34_2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i12.29237"},{"key":"e_1_3_3_2_35_2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.910"},{"key":"e_1_3_3_2_36_2","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA59077.2024.00080"},{"key":"e_1_3_3_2_37_2","first-page":"945","volume-title":"2023 USENIX Annual Technical Conference (USENIX ATC 23)","author":"Li Jiamin","year":"2023","unstructured":"Jiamin Li, Yimin Jiang, Yibo Zhu, Cong Wang, and Hong Xu. 2023. Accelerating Distributed MoE Training and Inference with Lina. In 2023 USENIX Annual Technical Conference (USENIX ATC 23). 945\u2013959."},{"key":"e_1_3_3_2_38_2","doi-asserted-by":"crossref","unstructured":"Liang Li Qingyuan Li Bo Zhang and Xiangxiang Chu. 2024. Norm Tweaking: High-Performance Low-Bit Quantization of Large Language Models. Proceedings of the AAAI Conference on Artificial Intelligence (Mar. 2024) 18536\u201318544.","DOI":"10.1609\/aaai.v38i17.29815"},{"key":"e_1_3_3_2_39_2","unstructured":"Wenjie Li Aokun Hu Ningyi Xu and Guanghui He. 2024. Quantization and Hardware Architecture Co-Design for Matrix-Vector Multiplications of Large Language Models. IEEE Transactions on Circuits and Systems I: Regular Papers (2024)."},{"key":"e_1_3_3_2_40_2","unstructured":"Haokun Lin Haobo Xu Yichen Wu Jingzhi Cui Yingtao Zhang Linzhan Mou Linqi Song Zhenan Sun and Ying Wei. 2024. DuQuant: Distributing Outliers via Dual Transformation Makes Stronger Quantized LLMs. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2406.01721 (2024). https:\/\/arxiv.org\/abs\/2406.01721"},{"key":"e_1_3_3_2_41_2","unstructured":"Ji Lin Jiaming Tang Haotian Tang Shang Yang Xingyu Dang and Song Han. 2023. Awq: Activation-aware weight quantization for llm compression and acceleration. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2306.00978 (2023)."},{"key":"e_1_3_3_2_42_2","unstructured":"Yujun Lin* Haotian Tang* Shang Yang* Zhekai Zhang Guangxuan Xiao Chuang Gan and Song Han. 2024. QServe: W4A8KV4 Quantization and System Co-design for Efficient LLM Serving. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2405.04532 (2024)."},{"key":"e_1_3_3_2_43_2","unstructured":"Jing Liu Ruihao Gong Xiuying Wei Zhiwei Dong Jianfei Cai and Bohan Zhuang. 2023. Qllm: Accurate and efficient low-bitwidth quantization for large language models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2310.08041 (2023)."},{"key":"e_1_3_3_2_44_2","unstructured":"Zirui Liu Jiayi Yuan Hongye Jin Shaochen Zhong Zhaozhuo Xu Vladimir Braverman Beidi Chen and Xia Hu. 2024. KIVI: A Tuning-Free Asymmetric 2bit Quantization for KV Cache. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2402.02750 (2024)."},{"key":"e_1_3_3_2_45_2","unstructured":"Zechun Liu Changsheng Zhao Igor Fedorov Bilge Soran Dhruv Choudhary Raghuraman Krishnamoorthi Vikas Chandra Yuandong Tian and Tijmen Blankevoort. 2025. SpinQuant: LLM quantization with learned rotations. (2025). arxiv:https:\/\/arXiv.org\/abs\/2405.16406\u00a0[cs.LG] https:\/\/arxiv.org\/abs\/2405.16406"},{"key":"e_1_3_3_2_46_2","doi-asserted-by":"crossref","unstructured":"Eitan Medina and Eran Dagan. 2020. Habana Labs Purpose-Built AI Inference and Training Processor Architectures: Scaling AI Training Systems Using Standard Ethernet With Gaudi Processor. IEEE Micro 40 (2020) 17\u201324.","DOI":"10.1109\/MM.2020.2975185"},{"key":"e_1_3_3_2_47_2","unstructured":"Stephen Merity Caiming Xiong James Bradbury and Richard Socher. 2016. Pointer Sentinel Mixture Models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1609.07843 (2016)."},{"key":"e_1_3_3_2_48_2","unstructured":"Microsoft. 2024. AzurePublicDataset. https:\/\/github.com\/Azure\/AzurePublicDataset."},{"key":"e_1_3_3_2_49_2","doi-asserted-by":"publisher","DOI":"10.1109\/HCS59251.2023.10254693"},{"key":"e_1_3_3_2_50_2","unstructured":"NVIDIA. 2020. NVIDIA A100 Tensor Core GPU Architecture. https:\/\/images.nvidia.com\/aem-dam\/en-zz\/Solutions\/data-center\/nvidia-ampere-architecture-whitepaper.pdf."},{"key":"e_1_3_3_2_51_2","unstructured":"NVIDIA. 2023. NVIDIA TensorRT-LLM. https:\/\/github.com\/NVIDIA\/TensorRT-LLM."},{"key":"e_1_3_3_2_52_2","volume-title":"ICLR","author":"Park Gunho","year":"2024","unstructured":"Gunho Park, Baeseong Park, Minsub Kim, Sungjae Lee, Jeonghoon Kim, Beomseok Kwon, Se\u00a0Jung Kwon, Byeongwook Kim, Youngjoo Lee, and Dongsoo Lee. 2024. LUT-GEMM: Quantized Matrix Multiplication based on LUTs for Efficient Inference in Large-Scale Generative Language Models. In ICLR. https:\/\/openreview.net\/forum?id=gLARhFLE0F"},{"key":"e_1_3_3_2_53_2","doi-asserted-by":"publisher","DOI":"10.1145\/3620665.3640422"},{"key":"e_1_3_3_2_54_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA57654.2024.00078"},{"key":"e_1_3_3_2_55_2","volume-title":"ISCA","author":"Patel Pratyush","year":"2024","unstructured":"Pratyush Patel, Esha Choukse, Chaojie Zhang, Aashaka Shah, Inigo Goiri, Saeed Maleki, and Ricardo Bianchini. 2024. Splitwise: Efficient generative LLM inference using phase splitting. In ISCA. https:\/\/www.microsoft.com\/en-us\/research\/publication\/splitwise-efficient-generative-llm-inference-using-phase-splitting"},{"key":"e_1_3_3_2_56_2","doi-asserted-by":"publisher","DOI":"10.1145\/3579371.3589057"},{"key":"e_1_3_3_2_57_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA56546.2023.10071076"},{"key":"e_1_3_3_2_58_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA.2018.00017"},{"key":"e_1_3_3_2_59_2","unstructured":"Keisuke Sakaguchi Ronan\u00a0Le Bras Chandra Bhagavatula and Yejin Choi. 2019. WinoGrande: An Adversarial Winograd Schema Challenge at Scale. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1907.10641 (2019)."},{"key":"e_1_3_3_2_60_2","doi-asserted-by":"publisher","DOI":"10.1145\/3620666.3651324"},{"key":"e_1_3_3_2_61_2","unstructured":"Wenqi Shao Mengzhao Chen Zhaoyang Zhang Peng Xu Lirui Zhao Zhiqian Li Kaipeng Zhang Peng Gao Yu Qiao and Ping Luo. 2023. Omniquant: Omnidirectionally calibrated quantization for large language models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2308.13137 (2023)."},{"key":"e_1_3_3_2_62_2","unstructured":"Noam Shazeer. 2019. Fast Transformer Decoding: One Write-Head is All You Need. https:\/\/arxiv.org\/abs\/1911.02150"},{"key":"e_1_3_3_2_63_2","unstructured":"Noam Shazeer Azalia Mirhoseini Krzysztof Maziarz Andy Davis Quoc Le Geoffrey Hinton and Jeff Dean. 2017. Outrageously Large Neural Networks: The Sparsely-Gated Mixture-of-Experts Layer. https:\/\/arxiv.org\/abs\/1701.06538"},{"key":"e_1_3_3_2_64_2","doi-asserted-by":"crossref","unstructured":"Xuan Shen Peiyan Dong Lei Lu Zhenglun Kong Zhengang Li Ming Lin Chao Wu and Yanzhi Wang. 2024. Agile-Quant: Activation-Guided Quantization for Faster Inference of LLMs on the Edge. Proceedings of the AAAI Conference on Artificial Intelligence (2024).","DOI":"10.1609\/aaai.v38i17.29860"},{"key":"e_1_3_3_2_65_2","first-page":"31094","volume-title":"International Conference on Machine Learning","author":"Sheng Ying","year":"2023","unstructured":"Ying Sheng, Lianmin Zheng, Binhang Yuan, Zhuohan Li, Max Ryabinin, Beidi Chen, Percy Liang, Christopher R\u00e9, Ion Stoica, and Ce Zhang. 2023. Flexgen: High-throughput generative inference of large language models with a single gpu. In International Conference on Machine Learning. PMLR, 31094\u201331116."},{"key":"e_1_3_3_2_66_2","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA45697.2020.00086"},{"key":"e_1_3_3_2_67_2","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale Dan Bikel Lukas Blecher Cristian\u00a0Canton Ferrer Moya Chen Guillem Cucurull David Esiobu Jude Fernandes Jeremy Fu Wenyin Fu Brian Fuller Cynthia Gao Vedanuj Goswami Naman Goyal Anthony Hartshorn Saghar Hosseini Rui Hou Hakan Inan Marcin Kardas Viktor Kerkez Madian Khabsa Isabel Kloumann Artem Korenev Punit\u00a0Singh Koura Marie-Anne Lachaux Thibaut Lavril Jenya Lee Diana Liskovich Yinghai Lu Yuning Mao Xavier Martinet Todor Mihaylov Pushkar Mishra Igor Molybog Yixin Nie Andrew Poulton Jeremy Reizenstein Rashi Rungta Kalyan Saladi Alan Schelten Ruan Silva Eric\u00a0Michael Smith Ranjan Subramanian Xiaoqing\u00a0Ellen Tan Binh Tang Ross Taylor Adina Williams Jian\u00a0Xiang Kuan Puxin Xu Zheng Yan Iliyan Zarov Yuchen Zhang Angela Fan Melanie Kambadur Sharan Narang Aurelien Rodriguez Robert Stojnic Sergey Edunov and Thomas Scialom. 2023. Llama 2: Open Foundation and Fine-Tuned Chat Models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2307.09288 (2023)."},{"key":"e_1_3_3_2_68_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA51647.2021.00018"},{"key":"e_1_3_3_2_69_2","unstructured":"Yuxin Wang Yuhan Chen Zeyu Li Xueze Kang Zhenheng Tang Xin He Rui Guo Xin Wang Qiang Wang Amelie\u00a0Chi Zhou and Xiaowen Chu. 2024. BurstGPT: A Real-world Workload Dataset to Optimize LLM Serving Systems. arxiv:https:\/\/arXiv.org\/abs\/2401.17644\u00a0[cs.DC] https:\/\/arxiv.org\/abs\/2401.17644"},{"key":"e_1_3_3_2_70_2","unstructured":"Xiuying Wei Yunchen Zhang Yuhang Li Xiangguo Zhang Ruihao Gong Jinyang Guo and Xianglong Liu. 2023. Outlier suppression+: Accurate quantization of large language models by equivalent and optimal shifting and scaling. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2304.09145 (2023)."},{"key":"e_1_3_3_2_71_2","unstructured":"Xiuying Wei Yunchen Zhang Xiangguo Zhang Ruihao Gong Shanghang Zhang Qi Zhang Fengwei Yu and Xianglong Liu. 2022. Outlier suppression: Pushing the limit of low-bit transformer language models. Advances in Neural Information Processing Systems 35 (2022) 17402\u201317414."},{"key":"e_1_3_3_2_72_2","first-page":"38087","volume-title":"International Conference on Machine Learning","author":"Xiao Guangxuan","year":"2023","unstructured":"Guangxuan Xiao, Ji Lin, Mickael Seznec, Hao Wu, Julien Demouth, and Song Han. 2023. Smoothquant: Accurate and efficient post-training quantization for large language models. In International Conference on Machine Learning. PMLR, 38087\u201338099."},{"key":"e_1_3_3_2_73_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA51647.2021.00055"},{"key":"e_1_3_3_2_74_2","unstructured":"Yuhui Xu Lingxi Xie Xiaotao Gu Xin Chen Heng Chang Hengheng Zhang Zhengsu Chen Xiaopeng Zhang and Qi Tian. 2023. QA-LoRA: Quantization-Aware Low-Rank Adaptation of Large Language Models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2309.14717 (2023)."},{"key":"e_1_3_3_2_75_2","doi-asserted-by":"crossref","unstructured":"Jianxun Yang Fengbin Tu Yixuan Li Yiqi Wang Leibo Liu Shaojun Wei and Shouyi Yin. 2022. GQNA: Generic Quantized DNN Accelerator With Weight-Repetition-Aware Activation Aggregating. IEEE Transactions on Circuits and Systems I: Regular Papers (2022).","DOI":"10.1109\/TCSI.2022.3188899"},{"key":"e_1_3_3_2_76_2","volume-title":"Advances in Neural Information Processing Systems","author":"Yao Zhewei","year":"2022","unstructured":"Zhewei Yao, Reza\u00a0Yazdani Aminabadi, Minjia Zhang, Xiaoxia Wu, Conglong Li, and Yuxiong He. 2022. ZeroQuant: Efficient and Affordable Post-Training Quantization for Large-Scale Transformers. In Advances in Neural Information Processing Systems."},{"key":"e_1_3_3_2_77_2","volume-title":"ICML","author":"Yang Yiran Ding and Li Lyna Zhang and Chengruidong Zhang and Yuanyuan Xu and Ning Shang and Jiahang Xu and Fan Yang and Mao","year":"2024","unstructured":"Yiran Ding and Li Lyna Zhang and Chengruidong Zhang and Yuanyuan Xu and Ning Shang and Jiahang Xu and Fan Yang and Mao Yang. 2024. LongRoPE: Extending LLM Context Window Beyond 2 Million Tokens. In ICML."},{"key":"e_1_3_3_2_78_2","first-page":"521","volume-title":"16th USENIX Symposium on Operating Systems Design and Implementation (OSDI 22)","author":"Yu Gyeong-In","year":"2022","unstructured":"Gyeong-In Yu, Joo\u00a0Seong Jeong, Geon-Woo Kim, Soojeong Kim, and Byung-Gon Chun. 2022. Orca: A distributed serving system for { Transformer-Based} generative models. In 16th USENIX Symposium on Operating Systems Design and Implementation (OSDI 22). 521\u2013538."},{"key":"e_1_3_3_2_79_2","unstructured":"Zhihang Yuan Lin Niu Jiawei Liu Wenyu Liu Xinggang Wang Yuzhang Shang Guangyu Sun Qiang Wu Jiaxiang Wu and Bingzhe Wu. 2023. Rptq: Reorder-based post-training quantization for large language models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2304.01089 (2023)."},{"key":"e_1_3_3_2_80_2","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO50266.2020.00071"},{"key":"e_1_3_3_2_81_2","doi-asserted-by":"publisher","DOI":"10.1145\/3470496.3527438"},{"key":"e_1_3_3_2_82_2","doi-asserted-by":"crossref","unstructured":"Rowan Zellers Ari Holtzman Yonatan Bisk Ali Farhadi and Yejin Choi. 2019. HellaSwag: Can a Machine Really Finish Your Sentence? arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/1905.07830 (2019).","DOI":"10.18653\/v1\/P19-1472"},{"key":"e_1_3_3_2_83_2","doi-asserted-by":"crossref","unstructured":"Shulin Zeng Jun Liu Guohao Dai Xinhao Yang Tianyu Fu Hongyi Wang Wenheng Ma Hanbo Sun Shiyao Li Zixiao Huang Yadong Dai Jintao Li Zehao Wang Ruoyu Zhang Kairui Wen Xuefei Ning and Yu Wang. 2024. FlightLLM: Efficient Large Language Model Inference with a Complete Mapping Flow on FPGA. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2401.03868 (2024).","DOI":"10.1145\/3626202.3637562"},{"key":"e_1_3_3_2_84_2","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA59077.2024.00082"},{"key":"e_1_3_3_2_85_2","unstructured":"Susan Zhang Stephen Roller Naman Goyal Mikel Artetxe Moya Chen Shuohui Chen Christopher Dewan Mona Diab Xian Li Xi\u00a0Victoria Lin Todor Mihaylov Myle Ott Sam Shleifer Kurt Shuster Daniel Simig Punit\u00a0Singh Koura Anjali Sridhar Tianlu Wang and Luke Zettlemoyer. 2022. OPT: Open Pre-trained Transformer Language Models. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2205.01068 (2022)."},{"key":"e_1_3_3_2_86_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA47549.2020.00030"},{"key":"e_1_3_3_2_87_2","unstructured":"Yilong Zhao Chien-Yu Lin Kan Zhu Zihao Ye Lequn Chen Size Zheng Luis Ceze Arvind Krishnamurthy Tianqi Chen and Baris Kasikci. 2024. Atom: Low-bit Quantization for Efficient and Accurate LLM Serving. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2310.19102 (2024)."},{"key":"e_1_3_3_2_88_2","doi-asserted-by":"crossref","unstructured":"Youpeng Zhao Di Wu and Jun Wang. 2024. ALISA: Accelerating Large Language Model Inference via Sparsity-Aware KV Caching. arXiv preprint arXiv:https:\/\/arXiv.org\/abs\/2403.17312 (2024).","DOI":"10.1109\/ISCA59077.2024.00077"},{"key":"e_1_3_3_2_89_2","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA53966.2022.00082"}],"event":{"name":"ISCA '25: Proceedings of the 52nd Annual International Symposium on Computer Architecture","location":"Tokyo Japan","acronym":"SIGARCH '25","sponsor":["SIGARCH ACM Special Interest Group on Computer Architecture"]},"container-title":["Proceedings of the 52nd Annual International Symposium on Computer Architecture"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3695053.3731019","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,21]],"date-time":"2025-06-21T11:04:25Z","timestamp":1750503865000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3695053.3731019"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,20]]},"references-count":88,"alternative-id":["10.1145\/3695053.3731019","10.1145\/3695053"],"URL":"https:\/\/doi.org\/10.1145\/3695053.3731019","relation":{},"subject":[],"published":{"date-parts":[[2025,6,20]]},"assertion":[{"value":"2025-06-20","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}