{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,4]],"date-time":"2026-04-04T17:48:36Z","timestamp":1775324916016,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":87,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T00:00:00Z","timestamp":1730073600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-sa\/4.0\/"}],"funder":[{"DOI":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62372314"],"award-info":[{"award-number":["62372314"]}],"id":[{"id":"10.13039\/https:\/\/doi.org\/10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"HK RGC Theme-based Research Scheme","award":["T43-513\/23-N"],"award-info":[{"award-number":["T43-513\/23-N"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,10,28]]},"DOI":"10.1145\/3664647.3681115","type":"proceedings-article","created":{"date-parts":[[2024,10,26]],"date-time":"2024-10-26T06:59:49Z","timestamp":1729925989000},"page":"7249-7258","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":16,"title":["Prior Knowledge Integration via LLM Encoding and Pseudo Event Regulation for Video Moment Retrieval"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0007-1169-4465","authenticated-orcid":false,"given":"Yiyang","family":"Jiang","sequence":"first","affiliation":[{"name":"The Hong Kong Polytechnic University, Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-2347-4183","authenticated-orcid":false,"given":"Wengyu","family":"Zhang","sequence":"additional","affiliation":[{"name":"The Hong Kong Polytechnic University, Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2473-460X","authenticated-orcid":false,"given":"Xulu","family":"Zhang","sequence":"additional","affiliation":[{"name":"The Hong Kong Polytechnic University, Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5706-5177","authenticated-orcid":false,"given":"Xiao-Yong","family":"Wei","sequence":"additional","affiliation":[{"name":"The Hong Kong Polytechnic University &amp; Sichuan University, Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6720-234X","authenticated-orcid":false,"given":"Chang Wen","family":"Chen","sequence":"additional","affiliation":[{"name":"The Hong Kong Polytechnic University, Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3370-471X","authenticated-orcid":false,"given":"Qing","family":"Li","sequence":"additional","affiliation":[{"name":"The Hong Kong Polytechnic University, Hong Kong, Hong Kong"}]}],"member":"320","published-online":{"date-parts":[[2024,10,28]]},"reference":[{"key":"e_1_3_2_2_1_1","unstructured":"2023. GPT-4V(ision) System Card. https:\/\/api.semanticscholar.org\/CorpusID: 263218031"},{"key":"e_1_3_2_2_2_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00802"},{"key":"e_1_3_2_2_3_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01365"},{"key":"e_1_3_2_2_4_1","volume-title":"Localization, Text Reading, and Beyond. arXiv preprint arXiv:2308.12966","author":"Bai Jinze","year":"2023","unstructured":"Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. 2023. Qwen-VL: A Versatile Vision-Language Model for Understanding, Localization, Text Reading, and Beyond. arXiv preprint arXiv:2308.12966 (2023)."},{"key":"e_1_3_2_2_5_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01264-9_12"},{"key":"e_1_3_2_2_6_1","doi-asserted-by":"crossref","unstructured":"Nicolas Carion Francisco Massa Gabriel Synnaeve Nicolas Usunier Alexander Kirillov and Sergey Zagoruyko. 2020. End-to-End Object Detection with Transformers. arXiv:2005.12872 [cs.CV]","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"e_1_3_2_2_7_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.502"},{"key":"e_1_3_2_2_8_1","unstructured":"Jun Chen Deyao Zhu Xiaoqian Shen Xiang Li Zechun Liu Pengchuan Zhang Raghuraman Krishnamoorthi Vikas Chandra Yunyang Xiong and Mohamed Elhoseiny. 2023. MiniGPT-v2: large language model as a unified interface for vision-language multi-task learning. arXiv:2310.09478 [cs.CV]"},{"key":"e_1_3_2_2_9_1","volume-title":"Xing","author":"Chiang Wei-Lin","year":"2023","unstructured":"Wei-Lin Chiang, Zhuohan Li, Zi Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E. Gonzalez, Ion Stoica, and Eric P. Xing. 2023. Vicuna: An Open-Source Chatbot Impressing GPT-4 with 90%* ChatGPT Quality. https:\/\/lmsys.org\/blog\/2023-03-30-vicuna\/"},{"key":"e_1_3_2_2_10_1","unstructured":"HyungWon Chung Le Hou Shayne Longpre Barret Zoph Yi Tay William Fedus Yunxuan Li Xuezhi Wang Mostafa Dehghani Siddhartha Brahma Albert Webson Shixiang Shane Gu Zhuyun Dai Mirac Suzgun Xinyun Chen Aakanksha Chowdhery Alex Castro-Ros Marie Pellat Kevin Robinson Dasha Valter Sharan Narang Gaurav Mishra Adams Yu Vincent Zhao Yanping Huang Andrew Dai Hongkun Yu Slav Petrov Ed H. Chi Jeff Dean Jacob Devlin Adam Roberts Denny Zhou Quoc V. Le and Jason Wei. 2022. Scaling Instruction-Finetuned Language Models. arXiv:2210.11416 [cs.LG]"},{"key":"e_1_3_2_2_11_1","volume-title":"Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi.","author":"Dai Wenliang","year":"2023","unstructured":"Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. 2023. InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning. arXiv:2305.06500 [cs.CV]"},{"key":"e_1_3_2_2_12_1","unstructured":"Victor Escorcia Mattia Soldan Josef Sivic Bernard Ghanem and Bryan Russell. 2019. Temporal Localization of Moments in Video Collections with Natural Language."},{"key":"e_1_3_2_2_13_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00630"},{"key":"e_1_3_2_2_14_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.563"},{"key":"e_1_3_2_2_15_1","volume-title":"Beyond Visual Cues: Synchronously Exploring Target-Centric Semantics for Vision-Language Tracking. arXiv preprint arXiv:2311.17085","author":"Ge Jiawei","year":"2023","unstructured":"Jiawei Ge, Xiangmei Chen, Jiuxin Cao, Xuelin Zhu, Weijia Liu, and Bo Liu. 2023. Beyond Visual Cues: Synchronously Exploring Target-Centric Semantics for Vision-Language Tracking. arXiv preprint arXiv:2311.17085 (2023)."},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.114"},{"key":"e_1_3_2_2_17_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.618"},{"key":"e_1_3_2_2_18_1","doi-asserted-by":"crossref","unstructured":"Fa-Ting Hong Xuanteng Huang Wei-Hong Li and Wei-Shi Zheng. 2020. MINI-Net: Multiple Instance Ranking Network for Video Highlight Detection. arXiv:2007.09833 [cs.CV]","DOI":"10.1007\/978-3-030-58601-0_21"},{"key":"e_1_3_2_2_19_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01273"},{"key":"e_1_3_2_2_20_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01944"},{"key":"e_1_3_2_2_21_1","unstructured":"Jing Yu Koh Daniel Fried and Ruslan Salakhutdinov. 2023. Generating Images with Multimodal Language Models. arXiv:2305.17216 [cs.CL]"},{"key":"e_1_3_2_2_22_1","unstructured":"Jing Yu Koh Ruslan Salakhutdinov and Daniel Fried. 2023. Grounding Language Models to Images for Multimodal Inputs and Outputs. arXiv:2301.13823 [cs.CL]"},{"key":"e_1_3_2_2_23_1","unstructured":"Jie Lei Tamara L. Berg and Mohit Bansal. [n. d.]. QVhighlights test split. https: \/\/codalab.lisn.upsaclay.fr\/competitions\/6937#results"},{"key":"e_1_3_2_2_24_1","unstructured":"Jie Lei Tamara L. Berg and Mohit Bansal. 2021. QVHighlights: Detecting Moments and Highlights in Videos via Natural Language Queries. arXiv:2107.09609 [cs.CV]"},{"key":"e_1_3_2_2_25_1","volume-title":"TVR: A Large-Scale Dataset for Video-Subtitle Moment Retrieval. arXiv:2001.09099 [cs.CV]","author":"Lei Jie","year":"2020","unstructured":"Jie Lei, Licheng Yu, Tamara L. Berg, and Mohit Bansal. 2020. TVR: A Large-Scale Dataset for Video-Subtitle Moment Retrieval. arXiv:2001.09099 [cs.CV]"},{"key":"e_1_3_2_2_26_1","doi-asserted-by":"crossref","unstructured":"Chenliang Li Haiyang Xu Junfeng Tian Wei Wang Ming Yan Bin Bi Jiabo Ye Hehong Chen Guohai Xu Zheng Cao et al. 2022. mPLUG: Effective and Efficient Vision-Language Learning by Cross-modal Skip-connections. arXiv preprint arXiv:2205.12005 (2022).","DOI":"10.18653\/v1\/2022.emnlp-main.488"},{"key":"e_1_3_2_2_27_1","unstructured":"Junnan Li Dongxu Li Silvio Savarese and Steven Hoi. 2023. BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models. arXiv:2301.12597 [cs.CV]"},{"key":"e_1_3_2_2_28_1","volume-title":"BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation. arXiv:2201.12086 [cs.CV]","author":"Li Junnan","year":"2022","unstructured":"Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. 2022. BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation. arXiv:2201.12086 [cs.CV]"},{"key":"e_1_3_2_2_29_1","unstructured":"Kun Chang Li Yinan He Yi Wang Yizhuo Li Wenhai Wang Ping Luo Yali Wang Limin Wang and Yu Qiao. 2024. VideoChat: Chat-Centric Video Understanding. arXiv:2305.06355 [cs.CV]"},{"key":"e_1_3_2_2_30_1","unstructured":"Yanwei Li Chengyao Wang and Jiaya Jia. 2023. LLaMA-VID: An Image is Worth 2 Tokens in Large Language Models. arXiv:2311.17043 [cs.CV]"},{"key":"e_1_3_2_2_31_1","volume-title":"Rui Yan, and Mike Zheng Shou.","author":"Lin Kevin Qinghong","year":"2023","unstructured":"Kevin Qinghong Lin, Pengchuan Zhang, Joya Chen, Shraman Pramanick, Difei Gao, Alex Jinpeng Wang, Rui Yan, and Mike Zheng Shou. 2023. UniVTG: Towards Unified Video-Language Temporal Grounding. arXiv:2307.16715 [cs.CV]"},{"key":"e_1_3_2_2_32_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01108"},{"key":"e_1_3_2_2_33_1","unstructured":"Haotian Liu Chunyuan Li Qingyang Wu and Yong Jae Lee. 2023. Visual Instruction Tuning. arXiv:2304.08485 [cs.CV]"},{"key":"e_1_3_2_2_34_1","doi-asserted-by":"publisher","DOI":"10.1145\/3209978.3210003"},{"key":"e_1_3_2_2_35_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298994"},{"key":"e_1_3_2_2_36_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00305"},{"key":"e_1_3_2_2_37_1","volume-title":"Fixing Weight Decay Regularization in Adam. CoRR abs\/1711.05101","author":"Loshchilov Ilya","year":"2017","unstructured":"Ilya Loshchilov and Frank Hutter. 2017. Fixing Weight Decay Regularization in Adam. CoRR abs\/1711.05101 (2017). arXiv:1711.05101 http:\/\/arxiv.org\/abs\/1711. 05101"},{"key":"e_1_3_2_2_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW60793.2023.00297"},{"key":"e_1_3_2_2_39_1","doi-asserted-by":"crossref","unstructured":"Muhammad Maaz Hanoona Rasheed Salman Khan and Fahad Shahbaz Khan. 2023. Video-ChatGPT: Towards Detailed Video Understanding via Large Vision and Language Models. arXiv:2306.05424 [cs.CV]","DOI":"10.18653\/v1\/2024.acl-long.679"},{"key":"e_1_3_2_2_40_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.318"},{"key":"e_1_3_2_2_41_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.318"},{"key":"e_1_3_2_2_42_1","volume-title":"Correlation guided Query-Dependency Calibration in Video Representation Learning for Temporal Grounding. arXiv preprint arXiv:2311.08835","author":"Moon WonJun","year":"2023","unstructured":"WonJun Moon, Sangeek Hyun, SuBeen Lee, and Jae-Pil Heo. 2023. Correlation guided Query-Dependency Calibration in Video Representation Learning for Temporal Grounding. arXiv preprint arXiv:2311.08835 (2023)."},{"key":"e_1_3_2_2_43_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02205"},{"key":"e_1_3_2_2_44_1","unstructured":"OpenAI. 2023. Chatgpt. https:\/\/chat.openai.com\/"},{"key":"e_1_3_2_2_45_1","unstructured":"Long Ouyang Jeff Wu Xu Jiang Diogo Almeida Carroll L. Wainwright Pamela Mishkin Chong Zhang Sandhini Agarwal Katarina Slama Alex Ray John Schulman Jacob Hilton Fraser Kelton Luke Miller Maddie Simens Amanda Askell Peter Welinder Paul Christiano Jan Leike and Ryan Lowe. 2022. Training language models to follow instructions with human feedback. arXiv:2203.02155 [cs.CL]"},{"key":"e_1_3_2_2_46_1","unstructured":"Ziqi Pang Ziyang Xie Yunze Man and Yu-Xiong Wang. 2023. Frozen Transformers in Language Models Are Effective Visual Encoder Layers. arXiv:2310.12973 [cs.CV]"},{"key":"e_1_3_2_2_47_1","unstructured":"Zhiliang Peng Wenhui Wang Li Dong Yaru Hao Shaohan Huang Shuming Ma and Furu Wei. 2023. Kosmos-2: Grounding Multimodal Large Language Models to the World. arXiv:2306.14824 [cs.CL]"},{"key":"e_1_3_2_2_48_1","volume-title":"Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever.","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021. Learning Transferable Visual Models From Natural Language Supervision. arXiv:2103.00020 [cs.CV]"},{"key":"e_1_3_2_2_49_1","first-page":"1","article-title":"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer","volume":"21","author":"Raffel Colin","year":"2020","unstructured":"Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. Journal of Machine Learning Research 21, 140 (2020), 1--67. http:\/\/jmlr.org\/papers\/v21\/20-074.html","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_2_50_1","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00207"},{"key":"e_1_3_2_2_51_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00075"},{"key":"e_1_3_2_2_52_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58589-1_16"},{"key":"e_1_3_2_2_53_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-11752-2_15"},{"key":"e_1_3_2_2_54_1","unstructured":"Karen Simonyan and Andrew Zisserman. 2015. Very Deep Convolutional Networks for Large-Scale Image Recognition. arXiv:1409.1556 [cs.CV]"},{"key":"e_1_3_2_2_55_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW54120.2021.00361"},{"key":"e_1_3_2_2_56_1","doi-asserted-by":"crossref","unstructured":"Yale Song Miriam Redi Jordi Vallmitjana and Alejandro Jaimes. 2016. To Click or Not To Click: Automatic Selection of Beautiful Thumbnails from Videos. arXiv:1609.01388 [cs.MM]","DOI":"10.1145\/2983323.2983349"},{"key":"e_1_3_2_2_57_1","volume-title":"Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR).","author":"Song Yale","year":"2015","unstructured":"Yale Song, Jordi Vallmitjana, Amanda Stent, and Alejandro Jaimes. 2015. TVSum: Summarizing Web Videos Using Titles. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)."},{"key":"e_1_3_2_2_58_1","doi-asserted-by":"crossref","unstructured":"Min Sun Ali Farhadi and Steve Seitz. 2014. Ranking Domain-specific Highlights by Analyzing Edited Videos. In ECCV.","DOI":"10.1007\/978-3-319-10590-1_51"},{"key":"e_1_3_2_2_59_1","unstructured":"Quan Sun Qiying Yu Yufeng Cui Fan Zhang Xiaosong Zhang Yueze Wang Hongcheng Gao Jingjing Liu Tiejun Huang and XinlongWang. 2023. Generative Pretraining in Multimodality. arXiv:2307.05222 [cs.CV]"},{"key":"e_1_3_2_2_60_1","volume-title":"Gemini: A Family of Highly Capable Multimodal Models. arXiv:2312.11805 [cs.CL]","author":"Team Gemini","year":"2024","unstructured":"Gemini Team. 2024. Gemini: A Family of Highly Capable Multimodal Models. arXiv:2312.11805 [cs.CL]"},{"key":"e_1_3_2_2_61_1","unstructured":"InternLM Team. 2023. InternLM: A Multilingual Language Model with Progressively Enhanced Capabilities. https:\/\/github.com\/InternLM\/InternLMtechreport."},{"key":"e_1_3_2_2_62_1","unstructured":"Hugo Touvron Thibaut Lavril Gautier Izacard Xavier Martinet Marie-Anne Lachaux Timoth\u00e9e Lacroix Baptiste Rozi\u00e8re Naman Goyal Eric Hambro Faisal Azhar Aurelien Rodriguez Armand Joulin Edouard Grave and Guillaume Lample. 2023. LLaMA: Open and Efficient Foundation Language Models. arXiv:2302.13971 [cs.CL] https:\/\/arxiv.org\/abs\/2302.13971"},{"key":"e_1_3_2_2_63_1","unstructured":"Hugo Touvron Louis Martin Kevin Stone Peter Albert Amjad Almahairi Yasmine Babaei Nikolay Bashlykov Soumya Batra Prajjwal Bhargava Shruti Bhosale Dan Bikel Lukas Blecher Cristian Canton Ferrer Moya Chen Guillem Cucurull David Esiobu Jude Fernandes Jeremy Fu Wenyin Fu Brian Fuller Cynthia Gao Vedanuj Goswami Naman Goyal Anthony Hartshorn Saghar Hosseini Rui Hou Hakan Inan Marcin Kardas Viktor Kerkez Madian Khabsa Isabel Kloumann Artem Korenev Punit Singh Koura Marie-Anne Lachaux Thibaut Lavril Jenya Lee Diana Liskovich Yinghai Lu Yuning Mao Xavier Martinet Todor Mihaylov Pushkar Mishra Igor Molybog Yixin Nie Andrew Poulton Jeremy Reizenstein Rashi Rungta Kalyan Saladi Alan Schelten Ruan Silva Eric Michael Smith Ranjan Subramanian Xiaoqing Ellen Tan Binh Tang Ross Taylor Adina Williams Jian Xiang Kuan Puxin Xu Zheng Yan Iliyan Zarov Yuchen Zhang Angela Fan Melanie Kambadur Sharan Narang Aurelien Rodriguez Robert Stojnic Sergey Edunov and Thomas Scialom. 2023. Llama 2: Open Foundation and Fine-Tuned Chat Models. arXiv:2307.09288 [cs.CL]"},{"key":"e_1_3_2_2_64_1","volume-title":"Metaxas","author":"Wang Lezi","year":"2020","unstructured":"Lezi Wang, Dong Liu, Rohit Puri, and Dimitris N. Metaxas. 2020. Learning Trailer Moments in Full-Length Movies. arXiv:2008.08502 [cs.CV]"},{"key":"e_1_3_2_2_65_1","unstructured":"Wenhai Wang Zhe Chen Xiaokang Chen Jiannan Wu Xizhou Zhu Gang Zeng Ping Luo Tong Lu Jie Zhou Yu Qiao and Jifeng Dai. 2023. VisionLLM: Large Language Model is also an Open-Ended Decoder for Vision-Centric Tasks. arXiv:2305.11175 [cs.CV]"},{"key":"e_1_3_2_2_66_1","unstructured":"Weihan Wang Qingsong Lv Wenmeng Yu Wenyi Hong Ji Qi Yan Wang Junhui Ji Zhuoyi Yang Lei Zhao Xixuan Song Jiazheng Xu Bin Xu Juanzi Li Yuxiao Dong Ming Ding and Jie Tang. 2023. CogVLM: Visual Expert for Pretrained Language Models. arXiv:2311.03079 [cs.CV]"},{"key":"e_1_3_2_2_67_1","doi-asserted-by":"publisher","DOI":"10.1145\/1459359.1459371"},{"key":"e_1_3_2_2_68_1","doi-asserted-by":"publisher","DOI":"10.1145\/2072298.2072356"},{"key":"e_1_3_2_2_69_1","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2012.2222902"},{"key":"e_1_3_2_2_70_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i4.16406"},{"key":"e_1_3_2_2_71_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00135"},{"key":"e_1_3_2_2_72_1","doi-asserted-by":"crossref","unstructured":"Bo Xiong Yannis Kalantidis Deepti Ghadiyaram and Kristen Grauman. 2019. Less is More: Learning Highlight Detection from Video Duration. arXiv:1903.00859 [cs.CV]","DOI":"10.1109\/CVPR.2019.00135"},{"key":"e_1_3_2_2_73_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00787"},{"key":"e_1_3_2_2_74_1","unstructured":"Minghao Xu HangWang Bingbing Ni Riheng Zhu Zhenbang Sun and Changhu Wang. 2021. Cross-category Video Highlight Detection via Set-based Learning. arXiv:2108.11770 [cs.CV]"},{"key":"e_1_3_2_2_75_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01253"},{"key":"e_1_3_2_2_76_1","doi-asserted-by":"crossref","unstructured":"Huan Yang Baoyuan Wang Stephen Lin David Wipf Minyi Guo and Baining Guo. 2015. Unsupervised Extraction of Video Highlights Via Robust Recurrent Auto-encoders. arXiv:1510.01442 [cs.CV]","DOI":"10.1109\/ICCV.2015.526"},{"key":"e_1_3_2_2_77_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00785"},{"key":"e_1_3_2_2_78_1","volume-title":"Semantic conditioned dynamic modulation for temporal sentence grounding in videos. Advances in Neural Information Processing Systems 32","author":"Yuan Yitian","year":"2019","unstructured":"Yitian Yuan, Lin Ma, Jingwen Wang, Wei Liu, and Wenwu Zhu. 2019. Semantic conditioned dynamic modulation for temporal sentence grounding in videos. Advances in Neural Information Processing Systems 32 (2019)."},{"key":"e_1_3_2_2_79_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00134"},{"key":"e_1_3_2_2_80_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.acl-main.585"},{"key":"e_1_3_2_2_81_1","doi-asserted-by":"crossref","unstructured":"Ke Zhang Wei-Lun Chao Fei Sha and Kristen Grauman. 2016. Video Summarization with Long Short-term Memory. arXiv:1605.08110 [cs.CV]","DOI":"10.1007\/978-3-319-46478-7_47"},{"key":"e_1_3_2_2_82_1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6984"},{"key":"e_1_3_2_2_83_1","doi-asserted-by":"publisher","DOI":"10.1145\/3343031.3350879"},{"key":"e_1_3_2_2_84_1","doi-asserted-by":"publisher","DOI":"10.1145\/3331184.3331235"},{"key":"e_1_3_2_2_85_1","unstructured":"Kaizhi Zheng Xuehai He and Xin Eric Wang. 2024. MiniGPT-5: Interleaved Vision-and-Language Generation via Generative Vokens. arXiv:2310.02239 [cs.CV]"},{"key":"e_1_3_2_2_86_1","volume-title":"Proceedings of the 29th ACM International Conference on Multimedia.","author":"Zhijian Hou","year":"2021","unstructured":"Hou Zhijian, Ngo Chong-Wah, and Chan Wing-Kwong. 2021. Conquer: Contextual query-aware ranking for video corpus moment retrieval. In Proceedings of the 29th ACM International Conference on Multimedia."},{"key":"e_1_3_2_2_87_1","unstructured":"Deyao Zhu Jun Chen Xiaoqian Shen Xiang Li and Mohamed Elhoseiny. 2023. MiniGPT-4: Enhancing Vision-Language Understanding with Advanced Large Language Models. arXiv:2304.10592 [cs.CV]"}],"event":{"name":"MM '24: The 32nd ACM International Conference on Multimedia","location":"Melbourne VIC Australia","acronym":"MM '24","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 32nd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3681115","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3664647.3681115","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T00:57:53Z","timestamp":1750294673000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3681115"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,28]]},"references-count":87,"alternative-id":["10.1145\/3664647.3681115","10.1145\/3664647"],"URL":"https:\/\/doi.org\/10.1145\/3664647.3681115","relation":{},"subject":[],"published":{"date-parts":[[2024,10,28]]},"assertion":[{"value":"2024-10-28","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}