{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,4]],"date-time":"2026-04-04T17:55:51Z","timestamp":1775325351031,"version":"3.50.1"},"reference-count":45,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key Research and Development Program of China","award":["2022YFB3103500"],"award-info":[{"award-number":["2022YFB3103500"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62473033"],"award-info":[{"award-number":["62473033"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62463002"],"award-info":[{"award-number":["62463002"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62062021"],"award-info":[{"award-number":["62062021"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62202499"],"award-info":[{"award-number":["62202499"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Beijing Municipal Natural Science Foundation","award":["L231012"],"award-info":[{"award-number":["L231012"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Multimedia"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tmm.2025.3557703","type":"journal-article","created":{"date-parts":[[2025,4,3]],"date-time":"2025-04-03T19:59:13Z","timestamp":1743710353000},"page":"2935-2948","source":"Crossref","is-referenced-by-count":4,"title":["Multi-Modal Self-Perception Enhanced Large Language Model for 3D Region-of-Interest Captioning With Limited Data"],"prefix":"10.1109","volume":"27","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3203-630X","authenticated-orcid":false,"given":"Lu","family":"Shi","sequence":"first","affiliation":[{"name":"State Key Laboratory of Advanced Rail Autonomous Operation, the School of Computer Science and Technology, and Visual Intellgence +X International Cooperation Joint Laboratory of MOE, Bejing Jiaotong University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0097-6196","authenticated-orcid":false,"given":"Shichao","family":"Kan","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, Central South University, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8408-3816","authenticated-orcid":false,"given":"Yi","family":"Jin","sequence":"additional","affiliation":[{"name":"Key Laboratory of Big Data and Artificial Intelligence in Transportation, Ministry of Education and the School of Computer Science and Technology, Beijing Jiaotong University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6255-9422","authenticated-orcid":false,"given":"Linna","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Mechanical Engineering, Guizhou University, Guiyang, China"}]},{"given":"Yigang","family":"Cen","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Advanced Rail Autonomous Operation, the School of Computer Science and Technology, and Visual Intellgence +X International Cooperation Joint Laboratory of MOE, Bejing Jiaotong University, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3331583"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3304892"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3304054"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3318073"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/JSEN.2024.3405079"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00931"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/2480741.2480751"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3275366"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00272"},{"key":"ref10","article-title":"When LLMs step into the 3D world: A survey and meta-analysis of 3D tasks via multi-modal large language models","author":"Ma","year":"2024"},{"key":"ref11","article-title":"Regionblip: A unified multi-modal pre-training framework for holistic and regional comprehension","author":"Zhou","year":"2023"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02496"},{"key":"ref13","first-page":"20 482","article-title":"3D-LLM: Injecting the 3D world into large language models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Hong","year":"2024"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72698-9_8"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICMEW63481.2024.10645462"},{"key":"ref16","first-page":"26 650","article-title":"LAMM: Language-assisted multi-modal instruction-tuning dataset, framework, and benchmark","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Yin","year":"2023"},{"key":"ref17","article-title":"Chat-3D: Data-efficiently tuning large language model for universal dialogue of 3D scenes","author":"Wang","year":"2023"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2024.3428317"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58565-5_13"},{"key":"ref20","first-page":"34 892","article-title":"Visual instruction tuning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Liu","year":"2023"},{"key":"ref21","first-page":"25278","article-title":"Laion-5b: An open large-scale dataset for training next generation image-text models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Schuhmann","year":"2022"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr46437.2021.00356"},{"key":"ref23","first-page":"19 730","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"Proc. 40th Int. Conf. Mach. Learn.","volume":"202","author":"Li","year":"2023"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01070"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3387838"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_25"},{"key":"ref27","article-title":"Chatgpt","year":"2022"},{"key":"ref28","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref29","article-title":"Vicuna: An open-source chatbot impressing Gpt-4 with 90 Chatgpt quality","author":"Chiang","year":"2023"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i3.27999"},{"key":"ref31","article-title":"Shikra: Unleashing multimodal LLM\u2019s referential dialogue magic","author":"Chen","year":"2023"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-91813-1_4"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01309"},{"issue":"2","key":"ref34","first-page":"1","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Representations","volume":"1","author":"Hu","year":"2022"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.9"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01871"},{"key":"ref38","article-title":"OPT: Open pre-trained transformer language models","author":"Zhang","year":"2022"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.261"},{"key":"ref40","first-page":"1","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov","year":"2018"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1410"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.552"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01508"},{"key":"ref45","first-page":"1","article-title":"Octavius: Mitigating task interference in MLLMs via LoRA-MoE","volume-title":"Proc. 12th Int. Conf. Learn. Representations","author":"Chen","year":"2024"}],"container-title":["IEEE Transactions on Multimedia"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6046\/10844992\/10948342.pdf?arnumber=10948342","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,29]],"date-time":"2025-05-29T17:30:53Z","timestamp":1748539853000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10948342\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":45,"URL":"https:\/\/doi.org\/10.1109\/tmm.2025.3557703","relation":{},"ISSN":["1520-9210","1941-0077"],"issn-type":[{"value":"1520-9210","type":"print"},{"value":"1941-0077","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}