{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,16]],"date-time":"2026-01-16T13:20:20Z","timestamp":1768569620483,"version":"3.49.0"},"reference-count":41,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Develop Program of China","award":["2024YFC33065"],"award-info":[{"award-number":["2024YFC33065"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Comput."],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1109\/tc.2025.3626449","type":"journal-article","created":{"date-parts":[[2025,10,28]],"date-time":"2025-10-28T17:33:00Z","timestamp":1761672780000},"page":"503-515","source":"Crossref","is-referenced-by-count":0,"title":["MI-LLM: Multiplier-Free LLM Inference on Commodity Processing-in-Memory Hardware"],"prefix":"10.1109","volume":"75","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-2630-2918","authenticated-orcid":false,"given":"Puyun","family":"Hu","sequence":"first","affiliation":[{"name":"School of Information, Renmin University of China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6684-8336","authenticated-orcid":false,"given":"Minhui","family":"Xie","sequence":"additional","affiliation":[{"name":"School of Information, Renmin University of China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-0941-2639","authenticated-orcid":false,"given":"Linjiang","family":"Li","sequence":"additional","affiliation":[{"name":"School of Information, Renmin University of China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-1945-3521","authenticated-orcid":false,"given":"Kuiyaohui","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Information, Renmin University of China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-2963-2479","authenticated-orcid":false,"given":"Erge","family":"Xiang","sequence":"additional","affiliation":[{"name":"School of Information, Renmin University of China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3653-7013","authenticated-orcid":false,"given":"Jing","family":"Wang","sequence":"additional","affiliation":[{"name":"School of Information, Renmin University of China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9471-1780","authenticated-orcid":false,"given":"Size","family":"Zheng","sequence":"additional","affiliation":[{"name":"Tsinghua University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-1857-1368","authenticated-orcid":false,"given":"Xiao","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Information, Renmin University of China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3286-9259","authenticated-orcid":false,"given":"Yunpeng","family":"Chai","sequence":"additional","affiliation":[{"name":"School of Information, Renmin University of China, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"GPT-4 technical report","author":"Achiam","year":"2023"},{"key":"ref2","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref3","article-title":"Bard"},{"key":"ref4","article-title":"SearchGPT is a prototype of new AI search features"},{"key":"ref5","article-title":"GPTVoiceTasker: LLM-powered virtual assistant for smartphone","author":"Vu","year":"2024"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/HPCA51647.2021.00080"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA45697.2020.00071"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2022.3174101"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3620665.3640376"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3570361.3613285"},{"key":"ref11","article-title":"LUT-GEMM: Quantized matrix multiplication based on LUTs for efficient inference in large-scale generative language models","author":"Park","year":"2022"},{"key":"ref12","first-page":"10323","article-title":"SparseGPT: Massive language models can be accurately pruned in one-shot","volume-title":"Proc. Int. Conf. Mach. Learn.,","author":"Frantar","year":"2023"},{"key":"ref13","article-title":"A simple and effective pruning approach for large language models","author":"Sun","year":"2023"},{"key":"ref14","article-title":"Towards coarse-to-fine evaluation of inference efficiency for large language models","author":"Chen","year":"2024"},{"key":"ref15","article-title":"UPMEM announces processing-in-memory silicon-based benchmarks"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/IGSC54211.2021.9651614"},{"key":"ref17","doi-asserted-by":"crossref","DOI":"10.14778\/3574245.3574275","article-title":"PIM-tree: A skew-resistant index for processing-in-memory","author":"Kang","year":"2022"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3508041"},{"key":"ref19","article-title":"Accelerating graph neural networks on real processing-in-memory systems","author":"Giannoula","year":"2024"},{"key":"ref20","article-title":"GPTQ: Accurate post-training quantization for generative pre-trained transformers","author":"Frantar","year":"2022"},{"key":"ref21","first-page":"87","article-title":"AWQ: Activation-aware weight quantization for on-device LLM compression and acceleration","volume":"6","author":"Lin","year":"2024","journal-title":"Proc. Mach. Learn. Syst."},{"key":"ref22","first-page":"38087","article-title":"SmoothQuant: Accurate and efficient post-training quantization for large language models","volume-title":"Proc. Int. Conf. Mach. Learn.,","author":"Xiao","year":"2023"},{"key":"ref23","article-title":"FP8 quantization: The power of the exponent","author":"Kuzmin","year":"2024"},{"key":"ref24","article-title":"Zeroquant-FP: A leap forward in LLMs post-training W4A8 quantization using floating-point formats","author":"Wu","year":"2023"},{"key":"ref25","article-title":"SqueezeLLM: Dense-and-sparse quantization","author":"Kim","year":"2023"},{"key":"ref26","article-title":"Nomad-attention: Efficient LLM inference on CPUs through multiply-add-free attention","author":"Zhang","year":"2024"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/HCS55958.2022.9895629"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/isscc42613.2021.9365862"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/DAC56929.2023.10247915"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1093\/bioinformatics\/btad155"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/3592980.3595312"},{"key":"ref32","article-title":"An experimental evaluation of machine learning training on a real processing-in-memory system","author":"G\u00f3mez-Luna","year":"2023"},{"key":"ref33","doi-asserted-by":"crossref","DOI":"10.1109\/ISPASS57527.2023.00031","article-title":"TransPimLib: A library for efficient transcendental functions on processing-in-memory systems","author":"Item","year":"2023"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/SOCC56010.2022.9908126"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO56248.2022.00067"},{"key":"ref36","article-title":"Multiplying matrices without multiplying","author":"Blalock","year":"2021"},{"key":"ref37","first-page":"1","article-title":"ReD-LUT: Reconfigurable In-DRAM LUTs enabling massive parallel computation","volume-title":"Proc. 41st IEEE\/ACM Int. Conf. Comput.-Aided Des.","author":"Zhou","year":"2022"},{"issue":"5s","key":"ref38","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3126531","article-title":"QLUT: Input-aware quantized table lookup for energy-efficient approximate accelerators","volume":"16","author":"Raha","year":"2017","journal-title":"ACM Trans. Embedded Comput. Syst. (TECS)"},{"key":"ref39","article-title":"Understanding straight-through estimator in training activation quantized neural nets","author":"Yin","year":"2019"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1145\/3600006.3613165"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/3620666.3651380"}],"container-title":["IEEE Transactions on Computers"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/12\/11353113\/11219263.pdf?arnumber=11219263","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,15]],"date-time":"2026-01-15T20:49:28Z","timestamp":1768510168000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11219263\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2]]},"references-count":41,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tc.2025.3626449","relation":{},"ISSN":["0018-9340","1557-9956","2326-3814"],"issn-type":[{"value":"0018-9340","type":"print"},{"value":"1557-9956","type":"electronic"},{"value":"2326-3814","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,2]]}}}