{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:19:21Z","timestamp":1775229561247,"version":"3.50.1"},"reference-count":70,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Japan Society for the Promotion of Science (JSPS) KAKENHI","award":["23K28070"],"award-info":[{"award-number":["23K28070"]}]},{"name":"National Research Foundation, Singapore and Infocomm Media Development Authority","award":["FCP-NTU-RG-2022-010"],"award-info":[{"award-number":["FCP-NTU-RG-2022-010"]}]},{"name":"National Research Foundation, Singapore and Infocomm Media Development Authority","award":["FCP-ASTAR-TG-2022-003"],"award-info":[{"award-number":["FCP-ASTAR-TG-2022-003"]}]},{"name":"Singapore Ministry of Education (MOE) Tier 1","award":["RG87\/22"],"award-info":[{"award-number":["RG87\/22"]}]},{"name":"Singapore Ministry of Education (MOE) Tier 1","award":["RG24\/24"],"award-info":[{"award-number":["RG24\/24"]}]},{"name":"NTU Centre for Computational Technologies in Finance","award":["RIE2025"],"award-info":[{"award-number":["RIE2025"]}]},{"name":"RIE2025 Industry Alignment Fund - Industry Collaboration Projects","award":["I2301E0026"],"award-info":[{"award-number":["I2301E0026"]}]},{"name":"Alibaba Group"},{"DOI":"10.13039\/501100001475","name":"Nanyang Technological University","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001475","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Alibaba-NTU Global e-Sustainability CorpLab"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Mobile Comput."],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1109\/tmc.2025.3564543","type":"journal-article","created":{"date-parts":[[2025,4,25]],"date-time":"2025-04-25T13:42:34Z","timestamp":1745588554000},"page":"9822-9836","source":"Crossref","is-referenced-by-count":12,"title":["Task-Oriented Semantic Communication in Large Multimodal Models-Based Vehicle Networks"],"prefix":"10.1109","volume":"24","author":[{"given":"Baoxia","family":"Du","sequence":"first","affiliation":[{"name":"Institute of Science and Engineering, Kanazawa University, Kanazawa, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8220-6525","authenticated-orcid":false,"given":"Hongyang","family":"Du","sequence":"additional","affiliation":[{"name":"Department of Electrical and Electronic Engineering, University of Hong Kong, Pok Fu Lam, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7442-7416","authenticated-orcid":false,"given":"Dusit","family":"Niyato","sequence":"additional","affiliation":[{"name":"College of Computing and Data Science, Nanyang Technological University, Singapore"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9905-8952","authenticated-orcid":false,"given":"Ruidong","family":"Li","sequence":"additional","affiliation":[{"name":"Institute of Science and Engineering, Kanazawa University, Kanazawa, Japan"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s11432-024-4235-6"},{"key":"ref2","article-title":"MathVista: Evaluating math reasoning in visual contexts with GPT-4V, bard, and other large multimodal models","author":"Lu","year":"2023"},{"key":"ref3","article-title":"Improved baselines with visual instruction tuning","author":"Liu","year":"2023"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1561\/0600000110"},{"key":"ref5","article-title":"GPT-4V in wonderland: Large multimodal models for zero-shot smartphone GUI navigation","author":"Yan","year":"2023"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2024.3418881"},{"key":"ref7","article-title":"GPT-4 Technical Report","year":"2023"},{"key":"ref8","article-title":"ChatGPT","year":"2024"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/WACVW60836.2024.00106"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/tiv.2024.3402136"},{"key":"ref11","article-title":"HiLM-D: Towards high-resolution understanding in multimodal large language models for autonomous driving","author":"Ding","year":"2023"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/WACVW60836.2024.00101"},{"key":"ref13","article-title":"SurrealDriver: Designing generative driver agent simulation framework in urban contexts based on large language model","author":"Jin","year":"2023"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.3390\/s24134113"},{"key":"ref15","article-title":"Efficient multimodal large language models: A survey","author":"Jin","year":"2024"},{"key":"ref16","doi-asserted-by":"crossref","DOI":"10.1109\/VTC2024-Spring62846.2024.10683673","article-title":"Resource allocation in large language model integrated 6G vehicular networks","author":"Liu","year":"2024"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/IV55156.2024.10588403"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1093\/nsr\/nwae403"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/tmm.2025.3557680"},{"key":"ref20","article-title":"TinyGPT-V: Efficient multimodal large language model via small backbones","author":"Yuan","year":"2023"},{"key":"ref21","article-title":"TinyLLaVA: A framework of small-scale large multimodal models","author":"Zhou","year":"2024"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00704"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2024.3415661"},{"key":"ref24","article-title":"PerLLM: Personalized inference scheduling with edge-cloud collaboration for diverse LLM services","author":"Yang","year":"2024"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2023.3249835"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.3390\/sym12040676"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TCOMM.2024.3364990"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TSP.2021.3071210"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/JIOT.2023.3293154"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2022.3221990"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2023.3287547"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/MWC.004.2200050"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TMC.2024.3406375"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCT62411.2024.10946488"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/LWC.2021.3136045"},{"key":"ref36","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref37","article-title":"SQ-LLaVA: Self-questioning for large vision-language assistant","author":"Sun","year":"2024"},{"key":"ref38","article-title":"Vicuna: An open-source chatbot impressing GPT-4 with 90% ChatGPT quality","author":"Chiang","year":"2023"},{"key":"ref39","article-title":"Mistral 7B","author":"Jiang","year":"2023"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/D14-1162"},{"key":"ref41","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref42","article-title":"LLaVA-UHD: An LMM perceiving any aspect ratio and high-resolution images","author":"Xu","year":"2024"},{"key":"ref43","article-title":"Visual instruction tuning","author":"Liu","year":"2023"},{"key":"ref44","article-title":"MiniGPT-4: Enhancing vision-language understanding with advanced large language models","author":"Zhu","year":"2023"},{"key":"ref45","article-title":"MobileVLM: A fast, strong and open vision language assistant for mobile devices","author":"Chu","year":"2023"},{"key":"ref46","article-title":"LLM.int8(): 8-bit matrix multiplication for transformers at scale","author":"Dettmers","year":"2022"},{"key":"ref47","article-title":"PB-LLM: Partially binarized large language models","author":"Shang","year":"2023"},{"key":"ref48","article-title":"LLaVA-PruMerge: Adaptive token reduction for efficient large multimodal models","author":"Shang","year":"2024"},{"key":"ref49","article-title":"ConvLLaVA: Hierarchical backbones as visual encoder for large multimodal models","author":"Ge","year":"2024"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/COMST.2022.3223224"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/TCCN.2024.3435524"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/JSAC.2023.3288236"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1016\/S0960-9822(03)00135-0"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.3390\/app12052629"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR48806.2021.9412931"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2710620"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.80"},{"key":"ref59","article-title":"SalGAN: Visual saliency prediction with generative adversarial networks","author":"Pan","year":"2017"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2022.3152189"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2866563"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/iccv51070.2023.01100"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.268"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-76941-7_63"},{"key":"ref65","article-title":"YOLO by Ultralytics","author":"Jocher","year":"2023"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/MNET.128.2200338"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1016\/j.visres.2015.03.005"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2009.5459462"},{"key":"ref69","article-title":"LLaVA-Med: Training a large language-and-vision assistant for biomedicine in one day","author":"Li","year":"2023"},{"key":"ref70","article-title":"[CLS] attention is all you need for training-free visual token pruning: Make VLM inference faster","author":"Zhang","year":"2024"}],"container-title":["IEEE Transactions on Mobile Computing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/7755\/11154819\/10976624.pdf?arnumber=10976624","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,10]],"date-time":"2025-09-10T19:54:22Z","timestamp":1757534062000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10976624\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10]]},"references-count":70,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tmc.2025.3564543","relation":{},"ISSN":["1536-1233","1558-0660","2161-9875"],"issn-type":[{"value":"1536-1233","type":"print"},{"value":"1558-0660","type":"electronic"},{"value":"2161-9875","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,10]]}}}