{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,8]],"date-time":"2026-05-08T16:20:25Z","timestamp":1778257225704,"version":"3.51.4"},"reference-count":15,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"9","license":[{"start":{"date-parts":[[2025,9,1]],"date-time":"2025-09-01T00:00:00Z","timestamp":1756684800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,9,1]],"date-time":"2025-09-01T00:00:00Z","timestamp":1756684800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,9,1]],"date-time":"2025-09-01T00:00:00Z","timestamp":1756684800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100002920","name":"Research Grants Council of Hong Kong","doi-asserted-by":"publisher","award":["27213824"],"award-info":[{"award-number":["27213824"]}],"id":[{"id":"10.13039\/501100002920","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100009326","name":"CRS","doi-asserted-by":"publisher","award":["HKU702\/24"],"award-info":[{"award-number":["HKU702\/24"]}],"id":[{"id":"10.13039\/100009326","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003803","name":"HKU","doi-asserted-by":"publisher","award":["RFS2122-7S04"],"award-info":[{"award-number":["RFS2122-7S04"]}],"id":[{"id":"10.13039\/501100003803","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Commun. Mag."],"published-print":{"date-parts":[[2025,9]]},"DOI":"10.1109\/mcom.001.2400764","type":"journal-article","created":{"date-parts":[[2025,9,5]],"date-time":"2025-09-05T18:20:18Z","timestamp":1757096418000},"page":"52-59","source":"Crossref","is-referenced-by-count":33,"title":["Pushing Large Language Models to the 6G Edge: Vision, Challenges, and Opportunities"],"prefix":"10.1109","volume":"63","author":[{"given":"Zheng","family":"Lin","sequence":"first","affiliation":[{"name":"University of Hong Kong,China"}]},{"given":"Guanqiao","family":"Qu","sequence":"additional","affiliation":[{"name":"University of Hong Kong,China"}]},{"given":"Qiyuan","family":"Chen","sequence":"additional","affiliation":[{"name":"University of Hong Kong,China"}]},{"given":"Xianhao","family":"Chen","sequence":"additional","affiliation":[{"name":"University of Hong Kong,China"}]},{"given":"Zhe","family":"Chen","sequence":"additional","affiliation":[{"name":"Fudan University,China"}]},{"given":"Kaibin","family":"Huang","sequence":"additional","affiliation":[{"name":"University of Hong Kong,China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Gpt-4 Technical Report","author":"Achiam","year":"2023","journal-title":"arXiv preprint"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/MCOM.001.2300550"},{"key":"ref3","article-title":"Palm-e: An Embodied Multimodal Language Model","volume-title":"Proc. ICML","author":"Driess","year":"2023"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.678"},{"key":"ref5","article-title":"MobileLLM: Optimizing Sub-Billion Parameter Language Models for On-Device Use Cases","author":"Liu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref6","volume-title":"IMT-2030 Framework and Overall Objectives of the Future Development of IMT for 2030 and Beyond","year":"2023"},{"key":"ref7","article-title":"Split Learning for Health: Distributed Deep Learning Without Sharing Raw Patient Data","author":"Vepakomma","year":"2018","journal-title":"arXiv preprint"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/MWC.014.2300319"},{"key":"ref9","article-title":"Splitlora: A Split Parameter-Efficient Fine-tuning Framework for Large Language Models","author":"Lin","year":"2024","journal-title":"arXiv preprint"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3458817.3476209"},{"key":"ref11","article-title":"Gear: An Efficient KV Cache Compression Recipefor Near-lossless Generative Inference of LLM","author":"Kang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref12","volume-title":"Fastertransformer","year":"2023"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/mwc.003.2400046"},{"key":"ref14","article-title":"Fast Inference From Transformers via Speculative Decoding","volume-title":"Proc. ICML","author":"Leviathan","year":"2023"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3662006.3662067"}],"container-title":["IEEE Communications Magazine"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/35\/11152644\/11152695.pdf?arnumber=11152695","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,8]],"date-time":"2025-09-08T17:45:34Z","timestamp":1757353534000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11152695\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9]]},"references-count":15,"journal-issue":{"issue":"9"},"URL":"https:\/\/doi.org\/10.1109\/mcom.001.2400764","relation":{},"ISSN":["0163-6804","1558-1896"],"issn-type":[{"value":"0163-6804","type":"print"},{"value":"1558-1896","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9]]}}}