{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,17]],"date-time":"2025-09-17T06:12:10Z","timestamp":1758089530110,"version":"3.44.0"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,22]],"date-time":"2025-06-22T00:00:00Z","timestamp":1750550400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,22]],"date-time":"2025-06-22T00:00:00Z","timestamp":1750550400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001321","name":"National Research Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001321","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003836","name":"IC Design Education Center","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003836","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,22]]},"DOI":"10.1109\/dac63849.2025.11133184","type":"proceedings-article","created":{"date-parts":[[2025,9,15]],"date-time":"2025-09-15T17:35:41Z","timestamp":1757957741000},"page":"1-7","source":"Crossref","is-referenced-by-count":0,"title":["Precon: A Precision-Convertible Architecture for Accelerating Quantized Deep Learning Models across Various Domains Including LLMs"],"prefix":"10.1109","author":[{"given":"Jongwoo","family":"Park","sequence":"first","affiliation":[{"name":"Kyung Hee University,Republic of Korea"}]},{"given":"Hyeonseong","family":"Kim","sequence":"additional","affiliation":[{"name":"Kyung Hee University,Republic of Korea"}]},{"given":"Jiyun","family":"Han","sequence":"additional","affiliation":[{"name":"Kyung Hee University,Republic of Korea"}]},{"given":"Seungkyu","family":"Choi","sequence":"additional","affiliation":[{"name":"Kyung Hee University,Republic of Korea"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1097","article-title":"Imagenet classification with deep convolutional neural networks","volume-title":"Proceedings of the 25th International Conference on Neural Information Processing Systems","volume":"1","author":"Krizhevsky"},{"key":"ref2","article-title":"Attention is all you need","author":"Vaswani","year":"2017","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1146\/annurev-control-030323-022510"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.mlwa.2021.100164"},{"key":"ref5","article-title":"Gpt-4 technical report","volume-title":"arXiv preprint arXiv:2303.08774","author":"Achiam","year":"2023"},{"key":"ref6","article-title":"Llama: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv preprint arXiv:2302.13971"},{"key":"ref7","article-title":"Opt: Open pre-trained transformer language models","author":"Zhang","year":"2022","journal-title":"arXiv preprint arXiv:2205.01068"},{"key":"ref8","first-page":"795","article-title":"Sustainable ai: Environmental implications, challenges and opportunities","volume-title":"Proceedings of Machine Learning and Systems","volume":"4","author":"Wu"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.23919\/DATE.2017.7927224"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/DAC.2018.8465893"},{"key":"ref11","article-title":"Post training 4-bit quantization of convolutional networks for rapid-deployment","volume":"32","author":"Banner","year":"2019","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref12","article-title":"Mkq-bert: Quantized bert with 4-bits weights and activations","author":"Tang","year":"2022","journal-title":"arXiv preprint arXiv:2203.13483"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19775-8_12"},{"key":"ref14","first-page":"38 087","article-title":"Smoothquant: Accurate and efficient post-training quantization for large language models","volume-title":"International Conference on Machine Learning","author":"Xiao"},{"key":"ref15","first-page":"87","article-title":"Awq: Activation-aware weight quantization for on-device llm compression and acceleration","volume-title":"Proceedings of Machine Learning and Systems","volume":"6","author":"Lin"},{"article-title":"Optq: Accurate quantization for generative pre-trained transformers","volume-title":"The Eleventh International Conference on Learning Representations","author":"Frantar","key":"ref16"},{"key":"ref17","article-title":"Qlora: Efficient finetuning of quantized llms","volume":"36","author":"Dettmers","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref18","article-title":"Lut-gemm: Quantized matrix multiplication based on luts for efficient inference in large-scale generative language models","author":"Park","year":"2022","journal-title":"arXiv preprint arXiv:2206.09557"},{"key":"ref19","article-title":"Qllm: Accurate and efficient low-bitwidth quantization for large language models","author":"Liu","year":"2023","journal-title":"arXiv preprint arXiv:2310.08041"},{"key":"ref20","article-title":"Qserve: W4a8kv4 quantization and system co-design for efficient 1 lm serving","author":"Lin","year":"2024","journal-title":"arXiv preprint arXiv:2405.04532"},{"key":"ref21","article-title":"Quarot: Outlier-free 4-bit inference in rotated llms","author":"Ashkboos","year":"2024","journal-title":"arXiv preprint arXiv:2404.00456"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA57654.2024.00064"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA.2018.00069"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA56546.2023.10071031"},{"key":"ref25","article-title":"Spinquant-1lm quantization with learned rotations","author":"Liu","year":"2024","journal-title":"arXiv preprint arXiv:2405.16406"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TC.2021.3078316"},{"key":"ref27","article-title":"A study of bfloat16 for deep learning training","author":"Kalamkar","year":"2019","journal-title":"arXiv preprint arXiv:1905.12322"},{"key":"ref28","article-title":"The pile: An 800 gb dataset of diverse text for language modeling","author":"Gao","year":"2020","journal-title":"arXiv preprint arXiv:2101.00027"},{"key":"ref29","article-title":"Pointer sentinel mixture models","author":"Merity","year":"2016","journal-title":"arXiv preprint arXiv:1609.07843"},{"key":"ref30","article-title":"Deep residual learning for image recognition","volume":"abs\/1512.03385","author":"He","year":"2015","journal-title":"operatornameCoRR"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2018.00474"},{"article-title":"Efficientnet: Rethinking model scaling for convolutional neural networks","year":"2020","author":"Tan","key":"ref32"},{"key":"ref33","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv preprint arXiv:1810.04805"},{"key":"ref34","article-title":"An image is worth 16 16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv preprint arXiv:2010.11929"},{"key":"ref35","first-page":"10347","article-title":"Training data-efficient image transformers & distillation through attention","volume-title":"International conference on machine learning","author":"Touvron"},{"key":"ref36","first-page":"28","article-title":"Cacti 6.0: A tool to model large caches","volume":"27","author":"Muralimanohar","year":"2009","journal-title":"HP laboratories"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA52012.2021.00010"}],"event":{"name":"2025 62nd ACM\/IEEE Design Automation Conference (DAC)","start":{"date-parts":[[2025,6,22]]},"location":"San Francisco, CA, USA","end":{"date-parts":[[2025,6,25]]}},"container-title":["2025 62nd ACM\/IEEE Design Automation Conference (DAC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11132383\/11132091\/11133184.pdf?arnumber=11133184","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,16]],"date-time":"2025-09-16T05:25:11Z","timestamp":1758000311000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11133184\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,22]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/dac63849.2025.11133184","relation":{},"subject":[],"published":{"date-parts":[[2025,6,22]]}}}