{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,19]],"date-time":"2025-12-19T10:13:48Z","timestamp":1766139228966,"version":"3.46.0"},"reference-count":61,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Neural Netw. Learning Syst."],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1109\/tnnls.2025.3581411","type":"journal-article","created":{"date-parts":[[2025,6,25]],"date-time":"2025-06-25T14:24:09Z","timestamp":1750861449000},"page":"17679-17692","source":"Crossref","is-referenced-by-count":1,"title":["Argus: Leveraging Multiview Images for Improved 3-D Scene Understanding With Large Language Models"],"prefix":"10.1109","volume":"36","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-5136-9083","authenticated-orcid":false,"given":"Yifan","family":"Xu","sequence":"first","affiliation":[{"name":"School of Computer Science and Engineering, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-3780-7453","authenticated-orcid":false,"given":"Chao","family":"Zhang","sequence":"additional","affiliation":[{"name":"Beijing Digital Native Digital City Research Center, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-2778-959X","authenticated-orcid":false,"given":"Hanqi","family":"Jiang","sequence":"additional","affiliation":[{"name":"School of Computing, University of Georgia, Athens, GA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-3711-3477","authenticated-orcid":false,"given":"Xiaoyan","family":"Wang","sequence":"additional","affiliation":[{"name":"Beijing Digital Native Digital City Research Center, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-8481-2906","authenticated-orcid":false,"given":"Ruifei","family":"Ma","sequence":"additional","affiliation":[{"name":"School of Computer and Communication Engineering, University of Science and Technology Beijing, Beijing, China"}]},{"given":"Yiwei","family":"Li","sequence":"additional","affiliation":[{"name":"School of Computing, University of Georgia, Athens, GA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7483-6570","authenticated-orcid":false,"given":"Zihao","family":"Wu","sequence":"additional","affiliation":[{"name":"School of Computing, University of Georgia, Athens, GA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-5453-4394","authenticated-orcid":false,"given":"Zeju","family":"Li","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0008-0689-8401","authenticated-orcid":false,"given":"Xiangde","family":"Liu","sequence":"additional","affiliation":[{"name":"Beijing Digital Native Digital City Research Center, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref2","first-page":"1","article-title":"Uni3D: Exploring unified 3D representation at scale","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Zhou"},{"key":"ref3","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv:2302.13971"},{"key":"ref4","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023","journal-title":"arXiv:2307.09288"},{"article-title":"Vicuna: An open-source chatbot impressing GPT-4 with 90%* ChatGPT quality","year":"2023","author":"Chiang","key":"ref5"},{"issue":"70","key":"ref6","first-page":"1","article-title":"Scaling instruction-finetuned language models","volume":"25","author":"Chung","year":"2024","journal-title":"J. Mach. Learn. Res."},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3330926"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2021.3094987"},{"key":"ref9","article-title":"Flamingo: A visual language model for few-shot learning","author":"Alayrac","year":"2022","journal-title":"arXiv:2204.14198"},{"key":"ref10","article-title":"Visual instruction tuning","author":"Liu","year":"2023","journal-title":"arXiv:2304.08485"},{"key":"ref11","article-title":"MultiModal-GPT: A vision and language model for dialogue with humans","author":"Gong","year":"2023","journal-title":"arXiv:2305.04790"},{"article-title":"Octavius: Mitigating task interference in MLLMs via MoE","volume-title":"arXiv:2311.02684","author":"Chen","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.542"},{"key":"ref15","article-title":"Microsoft COCO captions: Data collection and evaluation server","author":"Chen","year":"2015","journal-title":"arXiv:1504.00325"},{"key":"ref16","article-title":"Kosmos-2: Grounding multimodal large language models to the world","author":"Peng","year":"2023","journal-title":"arXiv:2306.14824"},{"key":"ref17","first-page":"19730","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref18","article-title":"InstructBLIP: Towards general-purpose vision-language models with instruction tuning","author":"Dai","year":"2023","journal-title":"arXiv:2305.06500"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3335859"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3331841"},{"key":"ref22","article-title":"Point-bind & point-LLM: Aligning point cloud with multi-modality for 3D understanding, generation, and instruction following","author":"Guo","year":"2023","journal-title":"arXiv:2309.00615"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72698-9_8"},{"key":"ref24","article-title":"3D-LLM: Injecting the 3D world into large language models","author":"Hong","year":"2023","journal-title":"arXiv:2307.12981"},{"key":"ref25","article-title":"LAMM: Language-assisted multi-modal instruction-tuning dataset, framework, and benchmark","author":"Yin","year":"2023","journal-title":"arXiv:2306.06687"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICMEW63481.2024.10645462"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02496"},{"key":"ref28","article-title":"Chat-scene: Bridging 3D scene and large language models with object identifiers","author":"Huang","year":"2023","journal-title":"arXiv:2312.08168"},{"key":"ref29","article-title":"SceneGPT: A language model for 3D scene understanding","author":"Chandhok","year":"2024","journal-title":"arXiv:2408.06926"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00677"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00272"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01854"},{"key":"ref33","first-page":"1","article-title":"SQA3D: Situated question answering in 3D scenes","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Ma"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2022.3198163"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3229081"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_25"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58565-5_13"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01843"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00321"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.261"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00775"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/3DV.2017.00081"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3275156"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1093\/nsr\/nwae403"},{"article-title":"LLaMA-adapter: Efficient fine-tuning of language models with zero-init attention","volume-title":"arXiv:2303.16199","author":"Zhang","key":"ref45"},{"key":"ref46","article-title":"LLaMA-adapter v2: Parameter-efficient visual instruction model","author":"Gao","year":"2023","journal-title":"arXiv:2304.15010"},{"key":"ref47","article-title":"Shikra: Unleashing multimodal LLM\u2019s referential dialogue magic","author":"Chen","year":"2023","journal-title":"arXiv:2306.15195"},{"key":"ref48","article-title":"Attention is all you need","author":"Vaswani","year":"2017","journal-title":"arXiv:1706.03762"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i3.28013"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"ref51","article-title":"LAVIS: A library for language-vision intelligence","author":"Li","year":"2022","journal-title":"arXiv:2209.09019"},{"key":"ref52","first-page":"1","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Loshchilov"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-32248-9_23"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00644"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01293"},{"key":"ref56","first-page":"394","article-title":"Vision-and-dialog navigation","volume-title":"Proc. Conf. Robot Learn.","author":"Thomason"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01250"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00387"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01000"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00492"},{"key":"ref61","article-title":"LLM-grounder: Open-vocabulary 3D visual grounding with large language model as an agent","author":"Yang","year":"2023","journal-title":"arXiv:2309.12311"}],"container-title":["IEEE Transactions on Neural Networks and Learning Systems"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/5962385\/11195929\/11049948.pdf?arnumber=11049948","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,8]],"date-time":"2025-12-08T18:41:34Z","timestamp":1765219294000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11049948\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10]]},"references-count":61,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tnnls.2025.3581411","relation":{},"ISSN":["2162-237X","2162-2388"],"issn-type":[{"type":"print","value":"2162-237X"},{"type":"electronic","value":"2162-2388"}],"subject":[],"published":{"date-parts":[[2025,10]]}}}