{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,4]],"date-time":"2026-03-04T21:06:40Z","timestamp":1772658400751,"version":"3.50.1"},"reference-count":18,"publisher":"IEEE","license":[{"start":{"date-parts":[[2026,2,15]],"date-time":"2026-02-15T00:00:00Z","timestamp":1771113600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,2,15]],"date-time":"2026-02-15T00:00:00Z","timestamp":1771113600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"NSFC","doi-asserted-by":"publisher","award":["62125403,92464302,92164301,62304121"],"award-info":[{"award-number":["62125403,92464302,92164301,62304121"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program","doi-asserted-by":"publisher","award":["2023YFB4403100,2021ZD0114400"],"award-info":[{"award-number":["2023YFB4403100,2021ZD0114400"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100018537","name":"National Science and Technology Major Project","doi-asserted-by":"publisher","award":["2022ZD0115201"],"award-info":[{"award-number":["2022ZD0115201"]}],"id":[{"id":"10.13039\/501100018537","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100017582","name":"Beijing National Research Center for Information Science and Technology","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100017582","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026,2,15]]},"DOI":"10.1109\/isscc49663.2026.11408953","type":"proceedings-article","created":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T20:50:24Z","timestamp":1772571024000},"page":"546-548","source":"Crossref","is-referenced-by-count":0,"title":["A 28nm Speculative-Decoding LLM Processor Achieving 105-to-685\u00b5s\/Token Latency for Billion-Parameter Models"],"prefix":"10.1109","author":[{"given":"Yang","family":"Wang","sequence":"first","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Huanyu","family":"Wang","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Jiaxin","family":"Yang","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Yutong","family":"Su","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Ruiqi","family":"Guo","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Zhiheng","family":"Yue","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Jiangyuan","family":"Gu","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Shaojun","family":"Wei","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Yang","family":"Hu","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]},{"given":"Shouyi","family":"Yin","sequence":"additional","affiliation":[{"name":"Tsinghua University,Beijing,China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"GPT-4 Technical Report","year":"2024","journal-title":"arXiv"},{"key":"ref2","article-title":"Gemma 3 Technical Report","author":"Team","year":"2025","journal-title":"arXiv"},{"key":"ref3","article-title":"The Llama 3 Herd of Models","author":"Grattafiori","year":"2024","journal-title":"arXiv"},{"key":"ref4","article-title":"Gemini 2.5: Pushing the Frontier with Advanced Reasoning, Multimodality, Long Context, and Next Generation Agentic Capabilities","author":"Comanici","year":"2025","journal-title":"arXiv"},{"key":"ref5","article-title":"Efficient LLM Inference: Bandwidth, Compute, Synchronization, and Capacity are all you Need","author":"Davies","year":"2025","journal-title":"arXiv"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/isscc49661.2025.10904774"},{"key":"ref7","first-page":"19274","article-title":"Fast Inference from Transformers via Speculative Decoding","author":"Leviathan","year":"2023","journal-title":"ICML"},{"key":"ref8","article-title":"Accelerating LLM Inference with Staged Speculative Decoding","volume-title":"Workshop on Efficient Systems for Foundation Models at ICML","author":"Spector","year":"2023"},{"key":"ref9","first-page":"5209","article-title":"MEDUSA: Simple LLM Inference Acceleration Framework with Multiple Decoding Heads","author":"Cai","year":"2024","journal-title":"ICML"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3620666.3651335"},{"key":"ref11","article-title":"DistillSpec: Improving Speculative Decoding via Knowledge Distillation","author":"Zhou","year":"2023","journal-title":"ICLR"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.naacl-long.328"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.486"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00735"},{"key":"ref15","first-page":"368","article-title":"C-Transformer: A $2.6-18.1 \\mu \\mathrm{J} \/$ Token Homogeneous DNN-Transformer\/Spiking-Transformer Processor with Big-Little Network and Implicit Weight Generation for Large Language Models","author":"Kim","year":"2024","journal-title":"ISSCC"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/isscc42615.2023.10067817"},{"key":"ref17","first-page":"406","article-title":"T-REX: A 68-to-567\u03bcs\/Token 0.41-to-3.95\u03bcJ\/Token Transformer Accelerator with Reduced External Memory Access and Enhanced Hardware Utilization in 16 nm FinFET","author":"Moon","year":"2025","journal-title":"ISSCC"},{"key":"ref18","first-page":"421","article-title":"Slim-Llama: A 4.69 mW Large-Language-Model Processor with Binary\/Ternary Weights for Billion-Parameter Llama Model","author":"Kim","year":"2025","journal-title":"ISSCC"}],"event":{"name":"2026 IEEE International Solid-State Circuits Conference (ISSCC)","location":"San Francisco, CA, USA","start":{"date-parts":[[2026,2,15]]},"end":{"date-parts":[[2026,2,19]]}},"container-title":["2026 IEEE International Solid-State Circuits Conference (ISSCC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11408863\/11408946\/11408953.pdf?arnumber=11408953","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,4]],"date-time":"2026-03-04T20:47:29Z","timestamp":1772657249000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11408953\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2,15]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/isscc49663.2026.11408953","relation":{},"subject":[],"published":{"date-parts":[[2026,2,15]]}}}