{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T07:18:54Z","timestamp":1761895134161,"version":"build-2065373602"},"reference-count":38,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/icme59968.2025.11210126","type":"proceedings-article","created":{"date-parts":[[2025,10,30]],"date-time":"2025-10-30T17:57:42Z","timestamp":1761847062000},"page":"1-6","source":"Crossref","is-referenced-by-count":0,"title":["Quality-Guided Dynamic Memory for LLMs-based Long-Term Video Understanding"],"prefix":"10.1109","author":[{"given":"Bimei","family":"Wang","sequence":"first","affiliation":[{"name":"Jinan University,College of Cyber Security,China"}]},{"given":"Jingmei","family":"Jiao","sequence":"additional","affiliation":[{"name":"Lanzhou Jiaotong University,School of Electronic Information Engineering,China"}]},{"given":"Jisheng","family":"Dang","sequence":"additional","affiliation":[{"name":"Lanzhou University,School of Information Science &amp; Engineering,China"}]},{"given":"Qingrun","family":"Jiang","sequence":"additional","affiliation":[{"name":"Sun Yat-sen University,School of Electronics and Communication Engineering,China"}]},{"given":"Jiyuan","family":"Lin","sequence":"additional","affiliation":[{"name":"Sun Yat-sen University,School of Computer Science and Engineering,China"}]},{"given":"Zhixuan","family":"Chen","sequence":"additional","affiliation":[{"name":"Sun Yat-sen University,School of Computer Science and Engineering,China"}]},{"given":"Teng","family":"Wang","sequence":"additional","affiliation":[{"name":"The University of Hong Kong,Faculty of Engineering,China"}]},{"given":"Jun","family":"Yang","sequence":"additional","affiliation":[{"name":"Lanzhou Jiaotong University,School of Electronic Information Engineering,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCA59364.2023.10401518"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2023.3341457"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3072171"},{"article-title":"Gpt-4 technical report","year":"2023","author":"Josh Achiam","key":"ref4"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/COMPSAC61105.2024.00253"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73113-6_11"},{"key":"ref7","first-page":"42071","article-title":"Ow-viscaptor: Abstractors for open-world video instance segmentation and captioning","volume":"37","author":"Choudhuri","year":"2024","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01930"},{"key":"ref9","first-page":"19730","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"ICML.","author":"Li","year":"2023"},{"author":"Dai","key":"ref10","article-title":"Instructblip: Towards general-purpose vision-language models with instruction tuning"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.679"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-demo.49"},{"article-title":"Opt: Open pre-trained transformer language models","year":"2022","author":"Zhang","key":"ref13"},{"article-title":"Llama 2: Open foundation and fine-tuned chat models","year":"2023","author":"Touvron","key":"ref14"},{"key":"ref15","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume":"35","author":"Ouyang","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref16","first-page":"23716","article-title":"Flamingo: a visual language model for few-shot learning","volume":"35","author":"Alayrac","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"article-title":"Minigpt-4: Enhancing vision-language understanding with advanced large language models","year":"2023","author":"Zhu","key":"ref17"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00037"},{"article-title":"Maximizing spatio-temporal entropy of deep 3d cnns for efficient video recognition","year":"2023","author":"Wang","key":"ref19"},{"article-title":"Point-bind & point-llm: Aligning point cloud with multi-modality for 3d understanding, generation, and instruction following","year":"2023","author":"Guo","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2024.3357118"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3141886"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TCDS.2024.3375620"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2022.3167910"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413879"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-69525-5_2"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58517-4_10"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3423390"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TITS.2024.3421623"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2023.3280389"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01282"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00192"},{"article-title":"Eva-clip: Improved training techniques for clip at scale","year":"2023","author":"Sun","key":"ref33"},{"article-title":"Vicuna: An open-source chatbot impressing gpt-4 with 90% chatgpt quality","year":"2023","author":"Chiang","key":"ref34"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19833-5_6"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00756"},{"key":"ref37","first-page":"12493","article-title":"Keeping your eye on the ball: Trajectory attention in video transformers","author":"Patrick","year":"2021","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.2307\/j.ctv36zrf8.5"}],"event":{"name":"2025 IEEE International Conference on Multimedia and Expo (ICME)","start":{"date-parts":[[2025,6,30]]},"location":"Nantes, France","end":{"date-parts":[[2025,7,4]]}},"container-title":["2025 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11208895\/11208897\/11210126.pdf?arnumber=11210126","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T05:43:47Z","timestamp":1761889427000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11210126\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":38,"URL":"https:\/\/doi.org\/10.1109\/icme59968.2025.11210126","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}