{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,10]],"date-time":"2026-02-10T16:07:27Z","timestamp":1770739647605,"version":"3.49.0"},"reference-count":50,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"Institute of Information and Communications Technology Planning and Evaluation","award":["2710017875"],"award-info":[{"award-number":["2710017875"]}]},{"name":"Institute of Information and Communications Technology Planning and Evaluation","award":["IITP-2025-RS-2023-00254529"],"award-info":[{"award-number":["IITP-2025-RS-2023-00254529"]}]},{"DOI":"10.13039\/501100003725","name":"National Research Foundation of Korea","doi-asserted-by":"publisher","award":["RS-2025-00553785"],"award-info":[{"award-number":["RS-2025-00553785"]}],"id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2026]]},"DOI":"10.1109\/access.2026.3658103","type":"journal-article","created":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T05:59:23Z","timestamp":1769493563000},"page":"16188-16203","source":"Crossref","is-referenced-by-count":0,"title":["TAME: Temporal-Aware Mixture-of-Experts for Text\u2013Video Retrieval"],"prefix":"10.1109","volume":"14","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-6638-3044","authenticated-orcid":false,"given":"Uicheol","family":"Jung","sequence":"first","affiliation":[{"name":"Sejong University, Gwangjin-gu, Seoul, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8614-8775","authenticated-orcid":false,"given":"Juyoung","family":"Hong","sequence":"additional","affiliation":[{"name":"Sejong University, Gwangjin-gu, Seoul, Republic of Korea"}]},{"given":"Hojung","family":"Kwon","sequence":"additional","affiliation":[{"name":"Wisenut, Bundang-gu, Seongnam-si, Gyeonggi-do, Republic of Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9970-0132","authenticated-orcid":false,"given":"Yukyung","family":"Choi","sequence":"additional","affiliation":[{"name":"Sejong University, Gwangjin-gu, Seoul, Republic of Korea"}]}],"member":"263","reference":[{"key":"ref1","first-page":"22895","article-title":"Contrastive language-image pre-training with knowledge graphs","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Pan"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2022.07.028"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547910"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00495"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19781-9_19"},{"key":"ref6","article-title":"Clip-vip: Adapting pre-trained image-text model to video-language alignment","author":"Xue","year":"2022","journal-title":"arXiv:2209.06430"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/3477495.3531950"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01622"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02563"},{"key":"ref10","article-title":"CLIP2 Video: Mastering video-text retrieval via image CLIP","author":"Fang","year":"2021","journal-title":"arXiv:2106.11097"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01434"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.01031"},{"key":"ref13","article-title":"Deep learning scaling is predictable, empirically","author":"Hestness","year":"2017","journal-title":"arXiv:1712.00409"},{"key":"ref14","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Brown"},{"key":"ref15","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Alayrac"},{"key":"ref16","article-title":"Scaling laws for neural language models","author":"Kaplan","year":"2020","journal-title":"arXiv:2001.08361"},{"key":"ref17","first-page":"30016","article-title":"Training compute-optimal large language models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Hoffmann"},{"key":"ref18","article-title":"Outrageously large neural networks: The sparsely-gated mixture-of-experts layer","author":"Shazeer","year":"2017","journal-title":"arXiv:1701.06538"},{"key":"ref19","first-page":"8583","article-title":"Scaling vision with sparse mix-ture of experts","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Riquelme"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.52202\/079017-1863"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.emnlp-main.275"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-emnlp.1156"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.35378\/gujs.710730"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2021.102601"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.3906\/elk-2105-242"},{"issue":"120","key":"ref26","first-page":"1","article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","volume":"23","author":"Fedus","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref27","article-title":"Improving video-text retrieval by multi-stream corpus alignment and dual softmax loss","author":"Cheng","year":"2021","journal-title":"arXiv:2109.04290"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00513"},{"key":"ref29","article-title":"Disentangled representation learning for text-video retrieval","author":"Wang","year":"2022","journal-title":"arXiv:2203.07111"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00634"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01262"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00379"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i6.28327"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2025.3527369"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2025.3574925"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73013-9_23"},{"key":"ref37","article-title":"Representation learning with contrastive predictive coding","author":"van den Oord","year":"2018","journal-title":"arXiv:1807.03748"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1703.07737"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2921562"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58548-8_13"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.618"},{"key":"ref43","first-page":"190","article-title":"Collecting highly parallel data for paraphrase evaluation","volume-title":"Proc. 49th Annu. Meeting Assoc. Comput. Linguistics, Human Lang. Technol.","author":"Chen"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298940"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298698"},{"key":"ref46","first-page":"374","article-title":"Cross-modal and hierarchical modeling of video and text","volume-title":"Proc. Eur. Conf. Comput. Vis. (ECCV)","author":"Hua"},{"key":"ref47","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"},{"key":"ref48","article-title":"SGDR: Stochastic gradient descent with warm restarts","author":"Loshchilov","year":"2016","journal-title":"arXiv:1608.03983"},{"key":"ref49","article-title":"All in one: Exploring unified video-language pre-training","author":"Jinpeng Wang","year":"2022","journal-title":"arXiv:2203.07303"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00490"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/11323511\/11364210.pdf?arnumber=11364210","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,9]],"date-time":"2026-02-09T21:08:43Z","timestamp":1770671323000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11364210\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"references-count":50,"URL":"https:\/\/doi.org\/10.1109\/access.2026.3658103","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]}}}