{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,17]],"date-time":"2025-09-17T06:12:06Z","timestamp":1758089526931,"version":"3.44.0"},"reference-count":33,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,22]],"date-time":"2025-06-22T00:00:00Z","timestamp":1750550400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,22]],"date-time":"2025-06-22T00:00:00Z","timestamp":1750550400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,22]]},"DOI":"10.1109\/dac63849.2025.11132485","type":"proceedings-article","created":{"date-parts":[[2025,9,15]],"date-time":"2025-09-15T17:35:41Z","timestamp":1757957741000},"page":"1-7","source":"Crossref","is-referenced-by-count":0,"title":["An Algorithm-Hardware Co-design Based on Revised Microscaling Format Quantization for Accelerating Large Language Models"],"prefix":"10.1109","author":[{"given":"Yingbo","family":"Hao","sequence":"first","affiliation":[{"name":"South China University of Technology"}]},{"given":"Huangxu","family":"Chen","sequence":"additional","affiliation":[{"name":"Hong Kong University of Science and Technology (GZ),Guangzhou,China"}]},{"given":"Yi","family":"Zou","sequence":"additional","affiliation":[{"name":"South China University of Technology"}]},{"given":"Yanfeng","family":"Yang","sequence":"additional","affiliation":[{"name":"South China University of Technology"}]}],"member":"263","reference":[{"issue":"8","key":"ref1","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"key":"ref2","article-title":"Gpt-4 technical report","volume-title":"arXiv preprint arXiv:2303.08774","author":"Achiam","year":"2023"},{"key":"ref3","article-title":"A white paper on neural network quantization","author":"Nagel","year":"2021","journal-title":"arXiv preprint arXiv:2106.08295"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/MM.2020.2971677"},{"key":"ref5","first-page":"5506","article-title":"I-bert: Integer-only bert quantization","volume-title":"International conference on machine learning.","author":"Kim"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i05.6409"},{"key":"ref7","first-page":"87","article-title":"Awq: Activation-aware weight quantization for on-device 1lm compression and acceleration","volume-title":"Proceedings of Machine Learning and Systems","volume":"6","author":"Lin"},{"key":"ref8","first-page":"30318","article-title":"Gpt3. int8 (): 8-bit matrix multiplication for transformers at scale","volume":"35","author":"Dettmers","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO56248.2022.00095"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3579371.3589038"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO50266.2020.00071"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA.2018.00063"},{"key":"ref13","first-page":"38087","article-title":"Smoothquant: Accurate and efficient post-training quantization for large language models","volume-title":"International Conference on Machine Learning.","author":"Xiao"},{"key":"ref14","article-title":"Qlora: Efficient finetuning of quantized llms","volume":"36","author":"Dettmers","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref15","article-title":"Qserve: W4a8kv4 quantization and system co-design for efficient llm serving","author":"Lin","year":"2024","journal-title":"arXiv preprint arXiv:2405.04532"},{"key":"ref16","first-page":"196","article-title":"Atom: Low-bit quantization for efficient and accurate llm serving","volume-title":"Proceedings of Machine Learning and Systems","volume":"6","author":"Zhao"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3579371.3589351"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/EMC2-NIPS53020.2019.00016"},{"key":"ref19","first-page":"603","article-title":"Natural language processing","author":"Chowdhary","year":"2020","journal-title":"Fundamentals of artificial intelligence"},{"key":"ref20","article-title":"Opt: Open pre-trained transformer language models","author":"Zhang","year":"2022","journal-title":"arXiv preprint arXiv:2205.01068"},{"key":"ref21","article-title":"Microscaling data formats for deep learning","author":"Rouhani","year":"2023","journal-title":"arXiv preprint arXiv:2310.10537"},{"key":"ref22","article-title":"68-95-99.7 rule - wikipedia","author":"contributors","year":"2022","journal-title":"The Free Encyclopedia."},{"key":"ref23","article-title":"Post training 4-bit quantization of convolutional networks for rapid-deployment","volume":"32","author":"Banner","year":"2019","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref24","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023","journal-title":"arXiv preprint arXiv:2307.09288"},{"key":"ref25","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv preprint arXiv:2302.13971"},{"issue":"140","key":"ref26","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"Journal of machine learning research"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA59077.2024.00080"},{"article-title":"Logic Synthesis Using Synopsys, 2nd ed","year":"2011","author":"Kurup","key":"ref30"},{"key":"ref31","article-title":"Cacti 6.0: A tool to model large caches. hp laboratories","volume-title":"Tech. Rep. HPL-2009-85","author":"Muralimanohar","year":"2009"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO.2016.7783720"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ISCAS51556.2021.9401196"}],"event":{"name":"2025 62nd ACM\/IEEE Design Automation Conference (DAC)","start":{"date-parts":[[2025,6,22]]},"location":"San Francisco, CA, USA","end":{"date-parts":[[2025,6,25]]}},"container-title":["2025 62nd ACM\/IEEE Design Automation Conference (DAC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11132383\/11132091\/11132485.pdf?arnumber=11132485","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,16]],"date-time":"2025-09-16T05:36:02Z","timestamp":1758000962000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11132485\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,22]]},"references-count":33,"URL":"https:\/\/doi.org\/10.1109\/dac63849.2025.11132485","relation":{},"subject":[],"published":{"date-parts":[[2025,6,22]]}}}