{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T17:40:23Z","timestamp":1771954823070,"version":"3.50.1"},"reference-count":34,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iccvw69036.2025.00560","type":"proceedings-article","created":{"date-parts":[[2026,2,23]],"date-time":"2026-02-23T20:44:02Z","timestamp":1771879442000},"page":"5366-5373","source":"Crossref","is-referenced-by-count":1,"title":["TrafficVILA: Scaling Vision-Language Models to High-Resolution Video Understanding for Traffic Safety Analysis"],"prefix":"10.1109","author":[{"given":"Zaid Pervaiz","family":"Bhat","sequence":"first","affiliation":[{"name":"NVIDIA"}]},{"given":"Seunghwan","family":"Cha","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Rohan","family":"Gulati","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Monika","family":"Jhuria","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Varun","family":"Praveen","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Tomasz","family":"Kornuta","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Yao","family":"Lu","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Vidya","family":"Murali","sequence":"additional","affiliation":[{"name":"NVIDIA"}]}],"member":"263","reference":[{"key":"ref1","author":"Bai","year":"2025","journal-title":"Qwen2.5-v1 technical report"},{"key":"ref2","first-page":"65","article-title":"Meteor: An automatic metric for mt evaluation with improved correlation with human judgments","volume-title":"Proceedings of the ACL workshop on intrinsic and extrinsic evaluation measures for machine translation and\/or summarization","author":"Banerjee","year":"2005"},{"key":"ref3","article-title":"Paligemma: A versatile 3b vlm for transfer","author":"Beyer","year":"2024","journal-title":"arXiv preprint"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01227"},{"key":"ref5","article-title":"Nvila: Efficient video language models","author":"Cheng","year":"2024","journal-title":"arXiv preprint"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW63382.2024.00708"},{"key":"ref7","article-title":"The Llama 3 Herd of Models","author":"Dubey","year":"2024","journal-title":"arXiv preprint"},{"key":"ref8","article-title":"Moa: Mixture of sparse attention for automatic large language model compression","author":"Fan","year":"2024","journal-title":"arXiv preprint"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.02245"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.563"},{"key":"ref11","article-title":"Lita: Language instructed temporal-localization assistant","author":"De-An","year":"2024","journal-title":"arXiv preprint"},{"key":"ref12","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proceedings of the 38th International Conference on Machine Learning","author":"Jia","year":"2021"},{"key":"ref13","article-title":"Mistral 7B","author":"Jiang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.290"},{"key":"ref15","article-title":"Llava-onevision: Easy visual task transfer","author":"Li","year":"2024","journal-title":"arXiv preprint"},{"key":"ref16","article-title":"BLIP-2: Bootstrapping Language-Image Pretraining with Frozen Image Encoders and Large Language Models","author":"Li","year":"2023","journal-title":"arXiv preprint"},{"key":"ref17","first-page":"74","article-title":"Rouge: A package for automatic evaluation of summaries","volume-title":"Text summarization branches out","author":"Lin","year":"2004"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02520"},{"key":"ref19","article-title":"Bridging the gap between human and machine understanding in traffic safety analysis","author":"Malla","year":"2024","journal-title":"arXiv preprint"},{"key":"ref20","year":"2024","journal-title":"Gpt-4o system card"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.3115\/1073083.1073135"},{"key":"ref22","article-title":"Perception test: A diagnostic benchmark for multimodal video models","volume-title":"Advances in Neural Information Processing Systems","author":"P\u0103tr\u0103ucean","year":"2023"},{"key":"ref23","article-title":"Trafficdomain video question answering with automatic captioning (trivia)","author":"Qasemi","year":"2023","journal-title":"arXiv preprint"},{"key":"ref24","author":"Qwen","year":"2025","journal-title":"Qwen2.5 technical report"},{"key":"ref25","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proceedings of the 38th International Conference on Machine Learning","author":"Radford","year":"2021"},{"key":"ref26","article-title":"Sam 2: Segment anything in images and videos","author":"Ravi","year":"2024","journal-title":"arXiv preprint"},{"key":"ref27","doi-asserted-by":"crossref","DOI":"10.1109\/ICCVW69036.2025.00579","article-title":"The 9th AI City Challenge","volume-title":"InProc. ICCVWorkshops","author":"Tang"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref29","article-title":"Mitigating hallucination in large multimodal models via robust instruction tuning","author":"Wang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref30","article-title":"Cityllava: Efficient fine-tuning for vlms in city scenarios","author":"Wang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref31","article-title":"Set-of-mark prompting unleashes extraordinary visual grounding in gpt-4v","author":"Yang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr42600.2020.00271"},{"key":"ref33","article-title":"Llava-video: Video instruction tuning with large language models","author":"Zhang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref34","article-title":"Internvl3: Exploring advanced training and testtime recipes for opensource multimodal models","author":"Zhu","year":"2025","journal-title":"arXiv preprint"}],"event":{"name":"2025 IEEE\/CVF International Conference on Computer Vision Workshops (ICCVW)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,10,19]]},"end":{"date-parts":[[2025,10,20]]}},"container-title":["2025 IEEE\/CVF International Conference on Computer Vision Workshops (ICCVW)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11373940\/11374285\/11375657.pdf?arnumber=11375657","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T07:46:20Z","timestamp":1771919180000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11375657\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/iccvw69036.2025.00560","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}