{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,8]],"date-time":"2025-11-08T05:30:22Z","timestamp":1762579822262,"version":"build-2065373602"},"reference-count":45,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Development Program of China","award":["2023YFC3306002"],"award-info":[{"award-number":["2023YFC3306002"]}]},{"DOI":"10.13039\/501100001809","name":"Joint Funds of the National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["U2336211","92467206","U23A20318"],"award-info":[{"award-number":["U2336211","92467206","U23A20318"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Comput."],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1109\/tc.2025.3604473","type":"journal-article","created":{"date-parts":[[2025,9,2]],"date-time":"2025-09-02T17:30:46Z","timestamp":1756834246000},"page":"4010-4024","source":"Crossref","is-referenced-by-count":0,"title":["CoFormer: Collaborating With Heterogeneous Edge Devices for Scalable Transformer Inference"],"prefix":"10.1109","volume":"74","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2771-9272","authenticated-orcid":false,"given":"Guanyu","family":"Xu","sequence":"first","affiliation":[{"name":"School of Information and Electronics, Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6237-7028","authenticated-orcid":false,"given":"Zhiwei","family":"Hao","sequence":"additional","affiliation":[{"name":"School of Information and Electronics, Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5659-3464","authenticated-orcid":false,"given":"Li","family":"Shen","sequence":"additional","affiliation":[{"name":"School of Cyber Science and Technology, Shenzhen Campus of Sun Yat-sen University, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2296-6370","authenticated-orcid":false,"given":"Yong","family":"Luo","sequence":"additional","affiliation":[{"name":"School of Computer Science, National Engineering Research Center for Multimedia Software, Wuhan University, Wuhan, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-5341-0158","authenticated-orcid":false,"given":"Fuhui","family":"Sun","sequence":"additional","affiliation":[{"name":"Information Technology Service Center of People&#x2019;s Court, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-3972-1575","authenticated-orcid":false,"given":"Xiaoyan","family":"Wang","sequence":"additional","affiliation":[{"name":"Information Technology Service Center of People&#x2019;s Court, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7532-0496","authenticated-orcid":false,"given":"Han","family":"Hu","sequence":"additional","affiliation":[{"name":"School of Information and Electronics, Beijing Institute of Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2751-5114","authenticated-orcid":false,"given":"Yonggang","family":"Wen","sequence":"additional","affiliation":[{"name":"College of Computing and Data Science, Nanyang Technological University, Singapore"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1","article-title":"SplatFormer: Point transformer for robust 3D Gaussian splatting","volume-title":"Proc. 13th Int. Conf. Learn. Representations (ICLR)","author":"Chen","year":"2025"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3200245"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00858"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"issue":"70","key":"ref5","first-page":"1","article-title":"Scaling instruction-finetuned language models","volume":"25","author":"Chung","year":"2024","journal-title":"J. Mach. Learn. Res."},{"issue":"8","key":"ref6","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2022.3178211"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2022.3177782"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2022.3183098"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1145\/3495243.3560551"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/s11633-022-1391-7"},{"key":"ref12","article-title":"3rd generation partnership project; Technical specification group services and system aspects; Study on traffic characteristics and performance requirements for AI\/ML model transfer in 5GS; (Release 18)","volume":"874","author":"3GPP","year":"2021","journal-title":"3rd Gener. Partnership Project (3GPP), Tech. Specification (TS) 22"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TCAD.2018.2858384"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TNET.2020.3042320"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/INFOCOM52122.2024.10621342"},{"key":"ref16","article-title":"Mobilevit: Light-weight, general-purpose, and mobile-friendly vision transformer","volume-title":"Proc. Tenth Int. Conf. Learn. Representations (ICLR)","author":"Mehta","year":"2022"},{"key":"ref17","first-page":"1","article-title":"Separable self-attention for mobile vision transformers","author":"Mehta","year":"2023","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref18","first-page":"10347","article-title":"Training data-efficient image transformers & distillation through attention","volume-title":"Proc. 38th Int. Conf. Mach. Learn., (ICML)","volume":"139","author":"Touvron","year":"2021"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02333"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02170"},{"key":"ref21","first-page":"8714","article-title":"Searching the search space of vision transformer","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","author":"Chen","year":"2021"},{"key":"ref22","first-page":"1","article-title":"Once-for-all: Train one network and specialize it for efficient deployment","volume-title":"Proc. 8th Int. Conf. Learn. Representations (ICLR)","author":"Cai","year":"2020"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2010.11929"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1810.04805"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2022.3222509"},{"key":"ref26","article-title":"EfficientFormer: Vision transformers at MobileNet speed","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","author":"Li","year":"2022"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1580"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/W19-4828"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01332"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-022-10283-5"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1093\/comjnl\/bxz120"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.322"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01055"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2023.3315138"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TPDS.2024.3521582"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2024.3524255"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/3487552.3487863"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/s41095-022-0274-8"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20077-9_17"},{"key":"ref41","first-page":"14200","article-title":"Attention bottlenecks for multimodal fusion","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","author":"Nagrani","year":"2021"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2913372"},{"key":"ref43","first-page":"79570","article-title":"One-for-all: Bridge the gap between heterogeneous architectures in knowledge distillation","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","author":"Hao","year":"2023"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2022.3177569"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.23919\/DATE58400.2024.10546617"}],"container-title":["IEEE Transactions on Computers"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/12\/11234919\/11146814.pdf?arnumber=11146814","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,8]],"date-time":"2025-11-08T05:26:23Z","timestamp":1762579583000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11146814\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12]]},"references-count":45,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tc.2025.3604473","relation":{},"ISSN":["0018-9340","1557-9956","2326-3814"],"issn-type":[{"type":"print","value":"0018-9340"},{"type":"electronic","value":"1557-9956"},{"type":"electronic","value":"2326-3814"}],"subject":[],"published":{"date-parts":[[2025,12]]}}}