{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,8]],"date-time":"2026-03-08T01:54:17Z","timestamp":1772934857542,"version":"3.50.1"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T00:00:00Z","timestamp":1765152000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T00:00:00Z","timestamp":1765152000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,8]]},"DOI":"10.1109\/bigdata66926.2025.11400739","type":"proceedings-article","created":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T20:57:57Z","timestamp":1772830677000},"page":"5930-5939","source":"Crossref","is-referenced-by-count":0,"title":["Towards Uncertainty-Aware Low-Bit Quantized LLMs for On-Device Inference"],"prefix":"10.1109","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9450-7387","authenticated-orcid":false,"given":"Lorenz","family":"Sparrenberg","sequence":"first","affiliation":[{"name":"University of Bonn,Bonn,Germany"}]},{"given":"Tobias","family":"Schneider","sequence":"additional","affiliation":[{"name":"University of Bonn,Bonn,Germany"}]},{"given":"Tobias","family":"Deu\u00dfer","sequence":"additional","affiliation":[{"name":"University of Bonn,Bonn,Germany"}]},{"given":"Armin","family":"Berger","sequence":"additional","affiliation":[{"name":"University of Bonn,Bonn,Germany"}]},{"given":"Rafet","family":"Sifa","sequence":"additional","affiliation":[{"name":"University of Bonn,Bonn,Germany"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. NeurIPS","volume":"33","author":"Brown","year":"2020"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00632"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1002\/hcs2.61"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/BigData62323.2024.10825941"},{"key":"ref5","first-page":"30318","article-title":"Gpt3.int8(): 8-bit matrix multiplication for transformers at scale","volume-title":"Advances in Neural Information Processing Systems","volume":"35","author":"Dettmers","year":"2022"},{"key":"ref6","first-page":"27 168","article-title":"Zeroquant: Efficient and affordable post-training quantization for largescale transformers","volume-title":"Advances in Neural Information Processing Systems","volume":"35","author":"Yao","year":"2022"},{"key":"ref7","first-page":"38087","article-title":"SmoothQuant: Accurate and efficient post-training quantization for large language models","volume-title":"Proceedings of the 40th International Conference on Machine Learning, ser. Proceedings of Machine Learning Research","volume":"202","author":"Xiao","year":"2023"},{"key":"ref8","article-title":"Calibrating verbalized probabilities for large language models","author":"Wang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.824"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00494"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.366"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.783"},{"key":"ref13","article-title":"Qwen2.5 technical report","author":"Yang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref14","article-title":"Comment on issue \\#525","volume-title":"gitHub comment stating that the model\u2019s training data includes materials up to the end of 2023","year":"2024"},{"key":"ref15","article-title":"llama.cpp: LLM inference in C\/C++","author":"Gerganov","year":"2023"},{"key":"ref16","article-title":"k-quants","volume-title":"GitHub pull request \\#1684, issue comment \\#2474462323","author":"Kawrakow","year":"2025"},{"key":"ref17","article-title":"English k_quantization of 11 ms does not disproportionately diminish multilingual performance","author":"Borgersen","year":"2025","journal-title":"arXiv preprint"},{"key":"ref18","article-title":"k-quants","volume-title":"GitHub pull request \\#1684","author":"Kawrakow","year":"2025"},{"key":"ref19","doi-asserted-by":"crossref","first-page":"12230","DOI":"10.18653\/v1\/2024.emnlp-main.682","article-title":"ReadMe++: Benchmarking multilingual language models for multidomain readability assessment","volume-title":"Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing","author":"Naous","year":"2024"},{"key":"ref20","doi-asserted-by":"crossref","first-page":"20","DOI":"10.18653\/v1\/2024.tsar-1.3","article-title":"CompLex-ZH: A new dataset for lexical complexity prediction in Mandarin and Cantonese","volume-title":"Proceedings of the Third Workshop on Text Simplification, Accessibility and Readability (TSAR 2024)","author":"Qiu","year":"2024"},{"key":"ref21","article-title":"Global Index on Responsible AI2024 (1st Edition), 1st ed. South Africa","volume-title":"Global Center on AI Governance","author":"Adams","year":"2024"},{"key":"ref22","article-title":"Darkbench: Benchmarking dark patterns in large language models","volume-title":"Workshop on Datasets and Evaluators of AI Safety","author":"Kran","year":"2025"},{"key":"ref23","volume-title":"Compare models - openai api","year":"2025"},{"key":"ref24","article-title":"Gptq: Accurate post-training quantization for generative pre-trained transformers","volume-title":"11th International Conference on Learning Representations","author":"Frantar","year":"2023"},{"key":"ref25","first-page":"87","article-title":"Awq: Activation-aware weight quantization for on-device 11 m compression and acceleration","volume-title":"Proceedings of Machine Learning and Systems","volume":"6","author":"Lin","year":"2024"},{"key":"ref26","first-page":"10088","article-title":"Qlora: Efficient finetuning of quantized 11 ms","volume-title":"Advances in Neural Information Processing Systems","volume":"36","author":"Dettmers","year":"2023"}],"event":{"name":"2025 IEEE International Conference on Big Data (BigData)","location":"Macau, China","start":{"date-parts":[[2025,12,8]]},"end":{"date-parts":[[2025,12,11]]}},"container-title":["2025 IEEE International Conference on Big Data (BigData)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11400704\/11400712\/11400739.pdf?arnumber=11400739","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T07:18:43Z","timestamp":1772867923000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11400739\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,8]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/bigdata66926.2025.11400739","relation":{},"subject":[],"published":{"date-parts":[[2025,12,8]]}}}