{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T21:01:32Z","timestamp":1775682092170,"version":"3.50.1"},"reference-count":46,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100001321","name":"National Research Foundation","doi-asserted-by":"publisher","award":["RS-2026-25498346"],"award-info":[{"award-number":["RS-2026-25498346"]}],"id":[{"id":"10.13039\/501100001321","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100010418","name":"Institute for Information and Communications Technology Promotion","doi-asserted-by":"publisher","award":["RS-2021-II211341"],"award-info":[{"award-number":["RS-2021-II211341"]}],"id":[{"id":"10.13039\/501100010418","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2026]]},"DOI":"10.1109\/access.2026.3680314","type":"journal-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:51:30Z","timestamp":1775159490000},"page":"51576-51588","source":"Crossref","is-referenced-by-count":0,"title":["SLICE: An Efficient and Tuning-Free Keyframe Sampling Framework for Long-Form Video Understanding"],"prefix":"10.1109","volume":"14","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-6432-1633","authenticated-orcid":false,"given":"Sungjin","family":"Han","sequence":"first","affiliation":[{"name":"Department of Artificial Intelligence, Chung-Ang University, Seoul, Republic of Korea"}]},{"given":"Thang","family":"Vu","sequence":"additional","affiliation":[{"name":"Woven by Toyota, Palo Alto, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7871-9627","authenticated-orcid":false,"given":"Junyeong","family":"Kim","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence, Chung-Ang University, Seoul, Republic of Korea"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Qwen2.5-VL technical report","volume-title":"arXiv:2502.13923","author":"Bai","year":"2025"},{"key":"ref2","article-title":"LLaVA-onevision: Easy visual task transfer","author":"Li","year":"2024","journal-title":"arXiv:2408.03326"},{"key":"ref3","article-title":"LLaVA-video: Video instruction tuning with synthetic data","author":"Zhang","year":"2025","journal-title":"arXiv:2410.02713"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1093\/nsr\/nwae403"},{"key":"ref5","article-title":"Lost in the middle: How language models use long contexts","author":"Liu","year":"2023","journal-title":"arXiv:2307.03172"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00495"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.02711"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.00802"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01012"},{"key":"ref10","article-title":"DrVideo: Document retrieval based long video understanding","author":"Ma","year":"2024","journal-title":"arXiv:2406.12846"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.02709"},{"key":"ref12","article-title":"Self-chained image-language model for video localization and question answering","author":"Yu","year":"2023","journal-title":"arXiv:2305.06988"},{"key":"ref13","article-title":"Qwen2-VL: Enhancing vision-language model\u2019s perception of the world at any resolution","author":"Wang","year":"2024","journal-title":"arXiv:2409.12191"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10599-4_35"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46478-7_47"},{"key":"ref16","article-title":"Learning transferable visual models from natural language supervision","author":"Radford","year":"2021","journal-title":"arXiv:2103.00020"},{"key":"ref17","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","author":"Li","year":"2022","journal-title":"arXiv:2201.12086"},{"key":"ref18","article-title":"ActionCLIP: A new paradigm for video action recognition","author":"Wang","year":"2021","journal-title":"arXiv:2109.08472"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2025.3590936"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00305"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.02707"},{"key":"ref22","article-title":"ReFoCUS: Reinforcement-guided frame optimization for contextual understanding","author":"Lee","year":"2025","journal-title":"arXiv:2506.01274"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12255"},{"key":"ref24","article-title":"Logic-in-frames: Dynamic keyframe search via visual semantic-logical verification for long video understanding","author":"Guo","year":"2025","journal-title":"arXiv:2503.13139"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01599"},{"key":"ref26","article-title":"VSI: Visual subtitle integration for keyframe selection to enhance long video understanding","author":"He","year":"2025","journal-title":"arXiv:2508.06869"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"ref28","article-title":"Two-stream convolutional networks for action recognition in videos","author":"Simonyan","year":"2014","journal-title":"arXiv:1406.2199"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/icme59968.2025.11209469"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/290941.291025"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1561\/2200000044"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72670-5_5"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.naacl-long.166"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.02245"},{"key":"ref35","article-title":"LongVideoBench: A benchmark for long-context interleaved video-language understanding","author":"Wu","year":"2024","journal-title":"arXiv:2407.15754"},{"key":"ref36","article-title":"Video-LLaVA: Learning united visual representation by alignment before projection","author":"Lin","year":"2023","journal-title":"arXiv:2311.10122"},{"key":"ref37","article-title":"PLLaVA: Parameter-free LLaVA extension from images to videos for video dense captioning","author":"Xu","year":"2024","journal-title":"arXiv:2404.16994"},{"key":"ref38","article-title":"MiniCPM-V: A GPT-4V level MLLM on your phone","author":"Yao","year":"2024","journal-title":"arXiv:2408.01800"},{"key":"ref39","article-title":"VILA: On pre-training for visual language models","author":"Lin","year":"2023","journal-title":"arXiv:2312.07533"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.01279"},{"key":"ref41","article-title":"Q-frame: Query-aware frame selection and multi-resolution adaptation for video-LLMs","author":"Zhang","year":"2025","journal-title":"arXiv:2506.22139"},{"key":"ref42","article-title":"MaxInfo: A training-free key-frame selection method using maximum volume for enhanced video understanding","author":"Li","year":"2025","journal-title":"arXiv:2502.03183"},{"key":"ref43","article-title":"LongVU: Spatiotemporal adaptive compression for long video-language understanding","author":"Shen","year":"2024","journal-title":"arXiv:2410.17434"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/d19-1410"},{"key":"ref45","article-title":"LAION-5B: An open large-scale dataset for training next generation image-text models","author":"Schuhmann","year":"2022","journal-title":"arXiv:2210.08402"},{"key":"ref46","article-title":"Understanding guided image captioning performance across domains","author":"Ng","year":"2020","journal-title":"arXiv:2012.02339"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/11323511\/11471756.pdf?arnumber=11471756","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,8]],"date-time":"2026-04-08T20:07:42Z","timestamp":1775678862000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11471756\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/access.2026.3680314","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]}}}