{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,21]],"date-time":"2025-11-21T05:51:47Z","timestamp":1763704307375,"version":"3.45.0"},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,26]],"date-time":"2025-10-26T00:00:00Z","timestamp":1761436800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,26]],"date-time":"2025-10-26T00:00:00Z","timestamp":1761436800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,26]]},"DOI":"10.1109\/iccad66269.2025.11240713","type":"proceedings-article","created":{"date-parts":[[2025,11,20]],"date-time":"2025-11-20T18:39:34Z","timestamp":1763663974000},"page":"1-9","source":"Crossref","is-referenced-by-count":0,"title":["H\n                    <sup>2<\/sup>\n                    EAL: Hybrid-Bonding Architecture with Hybrid Sparse Attention for Efficient Long-Context LLM Inference"],"prefix":"10.1109","author":[{"given":"Zizhuo","family":"Fu","sequence":"first","affiliation":[{"name":"Institute for Artificial Intelligence"}]},{"given":"Xiaotian","family":"Guo","sequence":"additional","affiliation":[{"name":"Peking University,School of Integrated Circuits,Beijing,China"}]},{"given":"Wenxuan","family":"Zeng","sequence":"additional","affiliation":[{"name":"Institute for Artificial Intelligence"}]},{"given":"Shuzhang","family":"Zhong","sequence":"additional","affiliation":[{"name":"Institute for Artificial Intelligence"}]},{"given":"Yadong","family":"Zhang","sequence":"additional","affiliation":[{"name":"Nano Core Chip Electronic Technology,Hangzhou,China"}]},{"given":"Peiyu","family":"Chen","sequence":"additional","affiliation":[{"name":"Nano Core Chip Electronic Technology,Hangzhou,China"}]},{"given":"Runsheng","family":"Wang","sequence":"additional","affiliation":[{"name":"Peking University,School of Integrated Circuits,Beijing,China"}]},{"given":"Le","family":"Ye","sequence":"additional","affiliation":[{"name":"Peking University,School of Integrated Circuits,Beijing,China"}]},{"given":"Meng","family":"Li","sequence":"additional","affiliation":[{"name":"Institute for Artificial Intelligence"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3771090"},{"article-title":"Agent ai: Surveying the horizons of multimodal interaction","year":"2024","author":"Durante","key":"ref2"},{"article-title":"Language models as zero-shot planners: Extracting actionable knowledge for embodied agents","year":"2022","author":"Huang","key":"ref3"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3545176"},{"article-title":"A survey on multi-turn interaction capabilities of large language models","year":"2025","author":"Zhang","key":"ref5"},{"article-title":"Towards reasoning era: A survey of long chain-of-thought for reasoning large language models","year":"2025","author":"Chen","key":"ref6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3620666.3651380"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/MICRO61859.2024.00105"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3620665.3640422"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/HOTCHIPS.2019.8875680"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ISCA59077.2024.00037"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ISSCC42614.2022.9731694"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/IEDM13553.2020.9372039"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ISSCC42614.2022.9731565"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3695053.3731008"},{"article-title":"H2o: Heavy-hitter oracle for efficient generative inference of large language models","year":"2023","author":"Zhang","key":"ref16"},{"article-title":"Quest: Query-aware sparsity for efficient long-context llm inference","year":"2024","author":"Tang","key":"ref17"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.1126"},{"article-title":"Duoattention: Efficient long-context llm inference with retrieval and streaming heads","year":"2024","author":"Xiao","key":"ref19"},{"article-title":"Not all heads matter: A head-level kv cache compression method with integrated retrieval and reasoning","year":"2024","author":"Fu","key":"ref20"},{"article-title":"Keep the cost down: A review on methods to optimize llm\u2019 s kv-cache consumption","year":"2024","author":"Luohe","key":"ref21"},{"article-title":"Efficient streaming language models with attention sinks","year":"2023","author":"Xiao","key":"ref22"},{"article-title":"Model tells you what to discard: Adaptive kv cache compression for llms","year":"2023","author":"Ge","key":"ref23"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.52202\/079017-3595"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/JSSC.2022.3213542"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TC.2024.3500362"},{"article-title":"Lserve: Efficient long-sequence llm serving with unified sparse attention","year":"2025","author":"Yang","key":"ref27"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.298"},{"article-title":"Fast transformer decoding: One write-head is all you need","year":"2019","author":"Shazeer","key":"ref29"},{"article-title":"Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality","year":"2023","author":"Chiang","key":"ref30"},{"article-title":"Llama 2: Open foundation and fine-tuned chat models","year":"2023","author":"Touvron","key":"ref31"},{"article-title":"Deepseek-v2: A strong, economical, and efficient mixture-of-experts language model","year":"2024","author":"Shao","key":"ref32"},{"article-title":"Mpcache: Mpc-friendly kv cache eviction for efficient private large language model inference","year":"2025","author":"Zeng","key":"ref33"},{"article-title":"Flashattention: Fast and memory-efficient exact attention with io-awareness","year":"2022","author":"Dao","key":"ref34"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.4153\/CJM-1956-045-5"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ISSCC42613.2021.9365766"},{"article-title":"Qserve: W4a8kv4 quantization and system co-design for efficient llm serving","year":"2024","author":"Lin","key":"ref37"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.172"},{"year":"2023","key":"ref39","article-title":"Llmtest needle in a haystack - pressure testing llms"}],"event":{"name":"2025 IEEE\/ACM International Conference On Computer Aided Design (ICCAD)","start":{"date-parts":[[2025,10,26]]},"location":"Munich, Germany","end":{"date-parts":[[2025,10,30]]}},"container-title":["2025 IEEE\/ACM International Conference On Computer Aided Design (ICCAD)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11240608\/11240621\/11240713.pdf?arnumber=11240713","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,21]],"date-time":"2025-11-21T05:43:19Z","timestamp":1763703799000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11240713\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,26]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/iccad66269.2025.11240713","relation":{},"subject":[],"published":{"date-parts":[[2025,10,26]]}}}