{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,9]],"date-time":"2026-03-09T22:59:54Z","timestamp":1773097194661,"version":"3.50.1"},"reference-count":56,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"9","license":[{"start":{"date-parts":[[2025,9,1]],"date-time":"2025-09-01T00:00:00Z","timestamp":1756684800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,9,1]],"date-time":"2025-09-01T00:00:00Z","timestamp":1756684800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,9,1]],"date-time":"2025-09-01T00:00:00Z","timestamp":1756684800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100020950","name":"National Science and Technology Council","doi-asserted-by":"publisher","award":["112-2221-E-011-099-MY2"],"award-info":[{"award-number":["112-2221-E-011-099-MY2"]}],"id":[{"id":"10.13039\/501100020950","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100020950","name":"National Science and Technology Council","doi-asserted-by":"publisher","award":["113-2221-E-011-132-MY2"],"award-info":[{"award-number":["113-2221-E-011-132-MY2"]}],"id":[{"id":"10.13039\/501100020950","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2025,9]]},"DOI":"10.1109\/tcsvt.2025.3552895","type":"journal-article","created":{"date-parts":[[2025,3,19]],"date-time":"2025-03-19T16:01:32Z","timestamp":1742400092000},"page":"8989-9002","source":"Crossref","is-referenced-by-count":2,"title":["A Multi-Modal Architecture With Spatio-Temporal-Text Adaptation for Video-Based Traffic Accident Anticipation"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-7597-1923","authenticated-orcid":false,"given":"Patrik","family":"Patera","sequence":"first","affiliation":[{"name":"Department of Electronic and Computer Engineering, National Taiwan University of Science and Technology, Taipei, Taiwan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7221-1603","authenticated-orcid":false,"given":"Yie-Tarng","family":"Chen","sequence":"additional","affiliation":[{"name":"Department of Electronic and Computer Engineering, National Taiwan University of Science and Technology, Taipei, Taiwan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6402-2688","authenticated-orcid":false,"given":"Wen-Hsien","family":"Fang","sequence":"additional","affiliation":[{"name":"Department of Electronic and Computer Engineering, National Taiwan University of Science and Technology, Taipei, Taiwan"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-54190-7_9"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICPR48806.2021.9412338"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00371"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413827"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.146"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00752"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3155613"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TIV.2023.3257169"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.110071"},{"key":"ref10","first-page":"1","article-title":"A note on over-smoothing for graph neural networks","volume-title":"Proc. 37th Int. Conf. Mach. Learn.","author":"Cai"},{"key":"ref11","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref12","first-page":"1","article-title":"AIM: Adapting image models for efficient video action recognition","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Yang"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i6.28423"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00207"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP51287.2024.10647316"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2020.3031984"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3338743"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3157254"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3147826"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/AVSS56176.2022.9959545"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2778563"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20047-2_9"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3237328"},{"key":"ref25","first-page":"2790","article-title":"Parameter-efficient transfer learning for NLP","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Houlsby"},{"key":"ref26","first-page":"1","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Represen.","author":"Hu"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acllong.353"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref29","first-page":"26462","article-title":"ST-adapter: Parameter-efficient image-to-video transfer learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Pan"},{"key":"ref30","first-page":"16664","article-title":"AdaptFormer: Adapting vision transformers for scalable visual recognition","volume-title":"Proc. NIPS","author":"Chen"},{"key":"ref31","article-title":"Towards efficient visual adaption via structural re-parameterization","author":"Luo","year":"2023","journal-title":"arXiv:2302.08106"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2018.2835308"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3077512"},{"key":"ref34","first-page":"1","article-title":"FROSTER: Frozen CLIP is a strong teacher for open-vocabulary action recognition","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Huang"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00121"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3402952"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-024-02224-2"},{"key":"ref38","first-page":"14200","article-title":"Attention bottlenecks for multimodal fusion","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Nagrani"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/tcsvt.2024.3482007"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3491176"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3254530"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3284979"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3301933"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2010.11929"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"issue":"140","key":"ref46","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/ICRA.2016.7487478"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00680"},{"key":"ref49","article-title":"What makes CLIP more robust to long-tailed pre-training data? A controlled study for transferable insights","author":"Wen","year":"2024","journal-title":"arXiv:2405.21070"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00241"},{"key":"ref51","volume-title":"Deep Learning","author":"Goodfellow","year":"2016"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00320"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00157"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/76\/11154820\/10933925.pdf?arnumber=10933925","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,10]],"date-time":"2025-09-10T17:49:13Z","timestamp":1757526553000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10933925\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9]]},"references-count":56,"journal-issue":{"issue":"9"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2025.3552895","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9]]}}}