{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,10]],"date-time":"2026-02-10T16:09:09Z","timestamp":1770739749388,"version":"3.49.0"},"reference-count":105,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Kyung Hee University in 2024","award":["KHU-20241094"],"award-info":[{"award-number":["KHU-20241094"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1109\/tpami.2025.3628653","type":"journal-article","created":{"date-parts":[[2025,11,4]],"date-time":"2025-11-04T18:36:00Z","timestamp":1762281360000},"page":"2803-2819","source":"Crossref","is-referenced-by-count":1,"title":["${\\text{CA}^{2}\\text{ST}}$: Cross-Attention in Audio, Space, and Time for Holistic Video Recognition"],"prefix":"10.1109","volume":"48","author":[{"given":"Jongseo","family":"Lee","sequence":"first","affiliation":[{"name":"Kyung Hee University, Yongin, Republic of Korea"}]},{"given":"Joohyun","family":"Chang","sequence":"additional","affiliation":[{"name":"Kyung Hee University, Yongin, Republic of Korea"}]},{"given":"Dongho","family":"Lee","sequence":"additional","affiliation":[{"name":"Kyung Hee University, Yongin, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7043-0610","authenticated-orcid":false,"given":"Jinwoo","family":"Choi","sequence":"additional","affiliation":[{"name":"Kyung Hee University, Yongin, Republic of Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref2","article-title":"An image is worth 16 x 16 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Dosovitskiy","year":"2021"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2102.05095"},{"key":"ref4","first-page":"10078","article-title":"VideoMAE: Masked autoencoders are data-efficient learners for self-supervised video pre-training","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Tong","year":"2022"},{"issue":"5","key":"ref5","first-page":"7","article-title":"Aim: Adapting image models for efficient video understanding","volume-title":"Proc. Int. Conf. Learn. Representations","volume":"3","author":"Yang","year":"2023"},{"key":"ref6","first-page":"26462","article-title":"St-adapter: Parameter-efficient image-to-video transfer learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Pan","year":"2022"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19833-5_23"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01432"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01231-1_32"},{"key":"ref10","first-page":"853","article-title":"Why can\u2019t i dance in the mall? learning to mitigate scene bias in action recognition","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Choi","year":"2019"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00058"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01361"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-021-01531-2"},{"key":"ref14","first-page":"568","article-title":"Two-stream convolutional networks for action recognition in videos","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"27","author":"Simonyan","year":"2014"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.213"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/s00221-003-1603-5"},{"key":"ref17","first-page":"79399","article-title":"Cast: Cross-attention in space and time for video action recognition","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Lee","year":"2024"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.622"},{"key":"ref19","article-title":"The kinetics human action video dataset","author":"Kay","year":"2017"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298698"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.02226"},{"key":"ref22","article-title":"A dataset of 101 human action classes from videos in the wild","author":"Soomro","year":"2012"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053174"},{"key":"ref24","article-title":"Audiovisual slowfast networks for video recognition","author":"Xiao","year":"2020"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2025.3590390"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2868668"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01246-5_49"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00718"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.510"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00675"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2890749"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3367412"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01267-0_19"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00630"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00315"},{"key":"ref38","first-page":"12493","article-title":"Keeping your eye on the ball: Trajectory attention in video transformers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Patrick","year":"2021"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01322"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00333"},{"key":"ref42","first-page":"34892","article-title":"Visual instruction tuning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Liu","year":"2024"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01681"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00637"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3168137"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00228"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02283"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73013-9_23"},{"key":"ref49","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Alayrac","year":"2022"},{"key":"ref50","first-page":"19730","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li","year":"2023"},{"key":"ref51","first-page":"14200","article-title":"Attention bottlenecks for multimodal fusion","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Nagrani","year":"2021"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2024.3371220"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00465"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-27066-6_23"},{"key":"ref55","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Brown","year":"2020"},{"key":"ref56","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2021"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72848-8_1"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01100"},{"key":"ref59","article-title":"Internvideo: General video foundation models via generative and discriminative learning","author":"Wang","year":"2022"},{"key":"ref60","article-title":"Bloom: A 176b-parameter open-access multilingual language model","author":"Scao","year":"2022"},{"key":"ref61","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01457"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3399607"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19772-7_1"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01742"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.679"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.342"},{"key":"ref68","first-page":"1022","article-title":"Compacter: Efficient low-rank hypercomplex adapter layers","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Mahabadi","year":"2021"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00024"},{"key":"ref71","first-page":"506","article-title":"Learning multiple visual domains with residual adapters","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"30","author":"Rebuffi","year":"2017"},{"key":"ref72","first-page":"20371","article-title":"Mavil: Masked audio-video learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Huang","year":"2024"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01719"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02531"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02497"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-698"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952261"},{"key":"ref78","article-title":"Gaussian error linear units (gelus)","author":"Hendrycks","year":"2016"},{"key":"ref79","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov","year":"2019"},{"key":"ref80","first-page":"1691","article-title":"Generative pretraining from pixels","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Chen","year":"2020"},{"key":"ref81","article-title":"SGDR: Stochastic gradient descent with warm restarts","author":"Loshchilov","year":"2016"},{"key":"ref82","article-title":"BEiT: BERT pre-training of image transformers","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bao","year":"2022"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21315"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00320"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01563"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413376"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02573"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i2.27837"},{"key":"ref90","first-page":"9758","article-title":"Self-supervised learning by cross-modal audio-video clustering","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Alwassel","year":"2020"},{"key":"ref91","first-page":"25","article-title":"Self-supervised multimodal versatile networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Alayrac","year":"2020"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01229"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00944"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i13.29407"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446627"},{"key":"ref96","article-title":"Efficient multiscale multimodal bottleneck transformer for audio-video classification","author":"Zhu","year":"2024"},{"key":"ref97","first-page":"24327","article-title":"EquiAV: Leveraging equivariance for audio-visual contrastive learning","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"235","author":"Kim","year":"2024"},{"key":"ref98","article-title":"Contrastive audio-visual masked autoencoder","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Gong","year":"2023"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01479"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02523"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.3007826"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i2.16235"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00632"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19830-4_42"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19830-4_40"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/11372200\/11224949.pdf?arnumber=11224949","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,9]],"date-time":"2026-02-09T21:06:11Z","timestamp":1770671171000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11224949\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3]]},"references-count":105,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2025.3628653","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3]]}}}