{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,18]],"date-time":"2026-02-18T22:19:00Z","timestamp":1771453140565,"version":"3.50.1"},"reference-count":23,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,9,14]],"date-time":"2025-09-14T00:00:00Z","timestamp":1757808000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,9,14]],"date-time":"2025-09-14T00:00:00Z","timestamp":1757808000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,9,14]]},"DOI":"10.1109\/icipw68931.2025.11386418","type":"proceedings-article","created":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T21:05:43Z","timestamp":1771362343000},"page":"528-533","source":"Crossref","is-referenced-by-count":0,"title":["Closing the Modality Gap: Integrating LLMs With Lidar For 3D Object Detection and Object-Level Understanding"],"prefix":"10.1109","author":[{"given":"Youngchae","family":"Chee","sequence":"first","affiliation":[{"name":"KAIST,School of Electrical Engineering,South Korea"}]},{"given":"Taeheon","family":"Kim","sequence":"additional","affiliation":[{"name":"KAIST,School of Electrical Engineering,South Korea"}]},{"given":"Youngjoon","family":"Yu","sequence":"additional","affiliation":[{"name":"KAIST,School of Electrical Engineering,South Korea"}]},{"given":"Hyun Wook","family":"Park","sequence":"additional","affiliation":[{"name":"KAIST,School of Electrical Engineering,South Korea"}]},{"given":"Yong Man","family":"Ro","sequence":"additional","affiliation":[{"name":"KAIST,School of Electrical Engineering,South Korea"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Visual instruction tuning","volume-title":"Thirty-seventh Conference on Neural Information Processing Systems","author":"Liu"},{"key":"ref2","article-title":"Gpt-4o system card","author":"Hurst","year":"2024","journal-title":"arXiv preprint arXiv:2410.21276"},{"key":"ref3","article-title":"Gemini: a family of highly capable multimodal models","author":"Team","year":"2023","journal-title":"arXiv preprint arXiv:2312.11805"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.175"},{"key":"ref5","article-title":"Vila2: Vila augmented vila","author":"Fang","year":"2024","journal-title":"arXiv preprint arXiv:2407.17453"},{"key":"ref6","article-title":"Cambrian-1: A fully open, vision-centric exploration of multimodal 11 ms","author":"Tong","year":"2024","journal-title":"arXiv preprint arXiv:2406.16860"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i9.33001"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01164"},{"key":"ref9","article-title":"Llava-next: Improved reasoning, ocr, and world knowledge","author":"Liu","year":"2024"},{"key":"ref10","article-title":"Mm1: Methods, analysis & insights from multimodal 11 m pretraining","author":"McKinzie","year":"2024","journal-title":"arXiv preprint arXiv:2403.09611"},{"key":"ref11","article-title":"Yi: Open foundation models by 01. ai","author":"Young","year":"2024","journal-title":"arXiv preprint arXiv:2403.04652"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2025.3637265"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72640-8_7"},{"key":"ref14","article-title":"Deepseek-vl: towards realworld vision-language understanding","author":"Lu","year":"2024","journal-title":"arXiv preprint arXiv:2403.05525"},{"key":"ref15","article-title":"Omnifusion technical report","volume-title":"arXiv preprint arXiv:2404.06212","author":"Goncharova","year":"2024"},{"key":"ref16","article-title":"Am-radio: Agglomerative model-reduce all domains into one","author":"Ranzinger","year":"2023","journal-title":"arXiv preprint arXiv:2312.06709"},{"key":"ref17","article-title":"Cobra: Extending mamba to multi-modal large language model for efficient inference","author":"Zhao","year":"2024","journal-title":"arXiv preprint arXiv:2403.14520"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.15607\/rss.2014.x.007"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01298"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.52202\/079017-0435"},{"key":"ref21","first-page":"10421","article-title":"Bevfusion: A simple and robust lidarcamera fusion framework","volume":"35","author":"Liang","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72658-3_24"},{"key":"ref23","article-title":"Chatrex: Taming multimodal llm for joint perception and understanding","author":"Jiang","year":"2024","journal-title":"arXiv preprint arXiv:2411.18363"}],"event":{"name":"2025 IEEE International Conference on Image Processing Workshops (ICIPW)","location":"Anchorage, AK, USA","start":{"date-parts":[[2025,9,14]]},"end":{"date-parts":[[2025,9,17]]}},"container-title":["2025 IEEE International Conference on Image Processing Workshops (ICIPW)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11385856\/11385840\/11386418.pdf?arnumber=11386418","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,18]],"date-time":"2026-02-18T21:14:51Z","timestamp":1771449291000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11386418\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,14]]},"references-count":23,"URL":"https:\/\/doi.org\/10.1109\/icipw68931.2025.11386418","relation":{},"subject":[],"published":{"date-parts":[[2025,9,14]]}}}