{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,28]],"date-time":"2026-02-28T04:52:53Z","timestamp":1772254373818,"version":"3.50.1"},"reference-count":42,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,26]],"date-time":"2025-10-26T00:00:00Z","timestamp":1761436800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,26]],"date-time":"2025-10-26T00:00:00Z","timestamp":1761436800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,26]]},"DOI":"10.1109\/iccad66269.2025.11240926","type":"proceedings-article","created":{"date-parts":[[2025,11,20]],"date-time":"2025-11-20T18:39:34Z","timestamp":1763663974000},"page":"1-9","source":"Crossref","is-referenced-by-count":1,"title":["OA-LAMA: An Outlier-Adaptive LLM Inference Accelerator with Memory-Aligned Mixed-Precision Group Quantization"],"prefix":"10.1109","author":[{"given":"Huangxu","family":"Chen","sequence":"first","affiliation":[{"name":"The Hong Kong University of Science and Technology (Guangzhou)"}]},{"given":"Yingbo","family":"Hao","sequence":"additional","affiliation":[{"name":"South China University of Technology"}]},{"given":"Yi","family":"Zou","sequence":"additional","affiliation":[{"name":"South China University of Technology"}]},{"given":"Xinyu","family":"Chen","sequence":"additional","affiliation":[{"name":"The Hong Kong University of Science and Technology (Guangzhou)"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3649329.3657323"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3579371.3589038"},{"key":"ref3","first-page":"196","article-title":"Atom: Low-bit quantization for efficient and accurate llm serving","volume-title":"Proceedings of Machine Learning and Systems","volume":"6","author":"Zhao"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA59077.2024.00080"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3649329.3656221"},{"key":"ref6","first-page":"3","article-title":"Language models are few-shot learners","volume":"1","author":"Mann","year":"2020"},{"key":"ref7","article-title":"Gpt-4 technical report","author":"Achiam","year":"2023"},{"key":"ref8","article-title":"Github copilot","year":"2023"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-025-09422-z"},{"key":"ref10","article-title":"Nvidia blackwell b200 gpu","author":"Corporation","year":"2024"},{"key":"ref11","article-title":"A white paper on neural network quantization","author":"Nagel","year":"2021"},{"key":"ref12","article-title":"Evaluating quantized large language models","author":"Li","year":"2024"},{"key":"ref13","first-page":"87","article-title":"Awq: Activation-aware weight quantization for on-device llm compression and acceleration","volume-title":"Proceedings of Machine Learning and Systems","volume":"6","author":"Lin"},{"key":"ref14","first-page":"38 087","article-title":"Smoothquant: Accurate and efficient post-training quantization for large language models","volume-title":"International Conference on Machine Learning","author":"Xiao"},{"key":"ref15","article-title":"Qserve: W4a8kv4 quantization and system co-design for efficient llm serving","author":"Lin","year":"2024"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO50266.2020.00071"},{"key":"ref17","article-title":"Gptq: Accurate post-training quantization for generative pre-trained transformers","author":"Frantar","year":"2022"},{"key":"ref18","first-page":"30 318","article-title":"Gpt3. int8 (): 8-bit matrix multiplication for transformers at scale","volume":"35","author":"Dettmers","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref19","first-page":"27 168","article-title":"Zeroquant: Efficient and affordable post-training quantization for large-scale transformers","volume":"35","author":"Yao","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.197"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO56248.2022.00095"},{"key":"ref22","article-title":"Qlora: Efficient finetuning of quantized llms","volume":"36","author":"Dettmers","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i12.29237"},{"key":"ref24","article-title":"Llm inference unveiled: Survey and roofline model insights","author":"Yuan","year":"2024"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.52202\/079017-0040"},{"key":"ref26","article-title":"Kivi: A tuning-free asymmetric 2bit quantization for kv cache","author":"Liu","year":"2024"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1145\/3600006.3613165"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA.2018.00063"},{"key":"ref29","article-title":"Ocp microscaling (mx) specification","author":"Bita Darvish Rouhani","year":"2023","journal-title":"Open Compute Project"},{"key":"ref30","article-title":"Microscaling data formats for deep learning","author":"Rouhani","year":"2023"},{"key":"ref31","article-title":"Opt: Open pre-trained transformer language models","author":"Zhang","year":"2022"},{"key":"ref32","article-title":"Channel-wise mixed-precision quantization for large language models","author":"Chen","year":"2024"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3079856.3080246"},{"key":"ref35","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref36","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref37","article-title":"68-95-99.7 rule - wikipedia","author":"contributors","year":"2022","journal-title":"The Free Encyclopedia"},{"key":"ref38","volume-title":"A framework for few-shot language model evaluation","author":"Gao","year":"2021"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-4757-2370-0"},{"key":"ref40","article-title":"Cacti 6.0: A tool to model large caches. hp laboratories","volume-title":"Tech. Rep. HPL-2009-85","author":"Muralimanohar","year":"2009"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO.2016.7783720"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ISCAS51556.2021.9401196"}],"event":{"name":"2025 IEEE\/ACM International Conference On Computer Aided Design (ICCAD)","location":"Munich, Germany","start":{"date-parts":[[2025,10,26]]},"end":{"date-parts":[[2025,10,30]]}},"container-title":["2025 IEEE\/ACM International Conference On Computer Aided Design (ICCAD)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11240608\/11240621\/11240926.pdf?arnumber=11240926","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,21]],"date-time":"2025-11-21T05:46:21Z","timestamp":1763703981000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11240926\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,26]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/iccad66269.2025.11240926","relation":{},"subject":[],"published":{"date-parts":[[2025,10,26]]}}}