{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T00:29:33Z","timestamp":1770337773081,"version":"3.49.0"},"reference-count":32,"publisher":"IEEE","license":[{"start":{"date-parts":[[2026,1,9]],"date-time":"2026-01-09T00:00:00Z","timestamp":1767916800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,1,9]],"date-time":"2026-01-09T00:00:00Z","timestamp":1767916800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100004358","name":"Samsung","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100004358","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026,1,9]]},"DOI":"10.1109\/ccnc65079.2026.11366259","type":"proceedings-article","created":{"date-parts":[[2026,2,4]],"date-time":"2026-02-04T20:45:15Z","timestamp":1770237915000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["Toward Cost-Efficient LLM Serving: A System-Level Memory Optimization Approach"],"prefix":"10.1109","author":[{"given":"Geunsik","family":"Lim","sequence":"first","affiliation":[{"name":"Samsung Electronics,Seoul,South Korea"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Megatron-lm: Training multi-billion parameter language models using model parallelism","author":"Mohammad","year":"2019"},{"key":"ref2","article-title":"Beyond malloc efficiency to fleet efficiency: a hugepage-aware memory allocator","volume-title":"Proc. OSDI","author":"Hunter"},{"key":"ref3","article-title":"Numalloc: A faster numa memory allocator","volume-title":"Proc. ISMM","author":"Hanmei"},{"key":"ref4","article-title":"LLM inference serving: Survey of recent advances and opportunities","volume-title":"Proc. HPEC","author":"Baolin"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3754448"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/AIIoT61789.2024.10579033"},{"key":"ref7","article-title":"mCAP: Memory-centric partitioning for large-scale pipeline-parallel DNN training","volume-title":"Proc. Euro-Par","author":"Henk"},{"key":"ref8","article-title":"Optimal GPU-CPU offloading strategies for deep neural network training","volume-title":"Proc. Euro-Par","author":"Olivier"},{"issue":"6","key":"ref9","first-page":"11 152","article-title":"CPU\u2013GPU heterogeneous computation offloading and resource allocation scheme for industrial internet of things","volume":"11","author":"Zixuan","year":"2023","journal-title":"IEEE Internet of Things Journal"},{"key":"ref10","article-title":"FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness","volume-title":"Proc. NeurIPS","author":"Tri"},{"key":"ref11","article-title":"FlexGen: high-throughput generative inference of large language models with a single GPU","volume-title":"Proc. ICML","author":"Ying"},{"key":"ref12","article-title":"GPipe: Efficient training of giant neural networks using pipeline parallelism","volume-title":"Proc. NeurIPS","author":"Yanpin"},{"key":"ref13","article-title":"eLLM: Elastic memory management framework for efficient LLM serving","author":"Jiale","year":"2025"},{"key":"ref14","article-title":"Pie: Pooling CPU memory for LLM inference","author":"Yi","year":"2024"},{"key":"ref15","article-title":"Neo: Saving GPU memory crisis with CPU offloading for online llm inference","author":"Jiang","year":"2024"},{"key":"ref16","article-title":"SwapAdvisor: Pushing deep learning beyond the GPU memory limit via smart swapping","volume-title":"Proc. ASPLOS","author":"Chien-Chin"},{"key":"ref17","article-title":"Edge-moe: Memory-efficient multi-task vision transformer architecture with task-level sparsity via mixture-of-experts","volume-title":"Proc. ICCAD","author":"Rishov"},{"key":"ref18","article-title":"Prefillonly: An inference engine for prefill-only workloads in large language model applications","volume-title":"Proc. SOSP","author":"Kuntai"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TCCN.2025.3612760"},{"key":"ref20","article-title":"Beyond the socket: NUMA-aware GPUs","volume-title":"Proc. MICRO","author":"Ugljesa"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/tmc.2024.3355764"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2025.3526606"},{"key":"ref23","article-title":"Llama release: a meta-llama\u2019s collections"},{"key":"ref24","article-title":"SpecOffload: Unlocking latent GPU capacity for LLM inference on resource-constrained devices","author":"Zhuge","year":"2025"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/SC41404.2022.00051"},{"key":"ref26","article-title":"P\/d-serve: Serving disaggregated large language model at scale","author":"Yibo","year":"2024"},{"key":"ref27","article-title":"Orca: A distributed serving system for transformer-based generative models","volume-title":"Proc. OSDI","author":"Gyeong-In"},{"key":"ref28","article-title":"Dynamollm: Designing LLM inference clusters for performance and energy efficiency","volume-title":"Proc. HPCA","author":"Jovan"},{"key":"ref29","article-title":"Sparsegpt: Massive language models can be accurately pruned in one-shot","volume-title":"Proc. ICML","author":"Elias"},{"key":"ref30","first-page":"341","article-title":"Reducing activation recomputation in large transformer models","volume-title":"Proc. MLSys","volume":"5","author":"Anand"},{"key":"ref31","article-title":"Efficient memory management for large language model serving with pagedattention","volume-title":"Proc. SOSP","author":"Woosuk"},{"key":"ref32","article-title":"Revisiting memory swapping for big-memory applications","volume-title":"Proc. HPCASIA","author":"Shun"}],"event":{"name":"2026 IEEE 23rd Consumer Communications &amp; Networking Conference (CCNC)","location":"Las Vegas, NV, USA","start":{"date-parts":[[2026,1,9]]},"end":{"date-parts":[[2026,1,12]]}},"container-title":["2026 IEEE 23rd Consumer Communications &amp;amp; Networking Conference (CCNC)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11366253\/11366254\/11366259.pdf?arnumber=11366259","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,5]],"date-time":"2026-02-05T06:16:36Z","timestamp":1770272196000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11366259\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,1,9]]},"references-count":32,"URL":"https:\/\/doi.org\/10.1109\/ccnc65079.2026.11366259","relation":{},"subject":[],"published":{"date-parts":[[2026,1,9]]}}}