{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,7]],"date-time":"2026-02-07T13:06:30Z","timestamp":1770469590320,"version":"3.49.0"},"publisher-location":"Singapore","reference-count":21,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819620739","type":"print"},{"value":"9789819620746","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-96-2074-6_36","type":"book-chapter","created":{"date-parts":[[2024,12,31]],"date-time":"2024-12-31T16:07:40Z","timestamp":1735661260000},"page":"302-309","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Interactive Video Search with Multi-modal LLM Video Captioning"],"prefix":"10.1007","author":[{"given":"Yu-Tong","family":"Cheng","sequence":"first","affiliation":[]},{"given":"Jiaxin","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Zhixin","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Jiangshan","family":"He","sequence":"additional","affiliation":[]},{"given":"Xiao-Yong","family":"Wei","sequence":"additional","affiliation":[]},{"given":"Chong-Wah","family":"Ngo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,1,1]]},"reference":[{"key":"36_CR1","unstructured":"Amato, G., et al.: VISIONE 5.0: enhanced user interface and ai models for VBS2024. In: Rudinac, S., Hanjalic, A., Liem, C., Worring, M., J\u00f3nsson, B.\u00ee, Liu, B., Yamakata, Y. (eds.) MultiMedia Modeling, pp. 332\u2013339. Springer Nature Switzerland, Cham (2024)"},{"key":"36_CR2","doi-asserted-by":"crossref","unstructured":"Bain, M., Nagrani, A., Varol, G., Zisserman, A.: Frozen in time: a joint video and image encoder for end-to-end retrieval. In: IEEE International Conference on Computer Vision, pp. 1\u201315 (2021)","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"36_CR3","doi-asserted-by":"publisher","unstructured":"Berns, F., Rossetto, L., Schoeffmann, K., Beecks, C., Awad, G.: V3c1 dataset: an evaluation of content characteristics. In: Proceedings of the 2019 on International Conference on Multimedia Retrieval, p. 334\u2013338. ICMR 2019, Association for Computing Machinery, New York, NY, USA (2019). https:\/\/doi.org\/10.1145\/3323873.3325051, https:\/\/doi.org\/10.1145\/3323873.3325051","DOI":"10.1145\/3323873.3325051"},{"key":"36_CR4","doi-asserted-by":"publisher","unstructured":"Heller, S., et\u00a0al.: Interactive video retrieval evaluation at a distance: comparing sixteen interactive video search systems in a remote setting at the 10th video browser showdown. Multimedia Tools Appl. 81(2), 2887\u20132906 (2022). https:\/\/doi.org\/10.1007\/s13735-021-00225-2","DOI":"10.1007\/s13735-021-00225-2"},{"key":"36_CR5","doi-asserted-by":"crossref","unstructured":"Hezel, N., Schall, K., Jung, K., Barthel, K.U.: Efficient search and browsing of large-scale video collections with vibro. In: \u00de\u00f3r J\u00f3nsson, B., et al. (eds.) MultiMedia Modeling, pp. 487\u2013492. Springer International Publishing, Cham (2022)","DOI":"10.1007\/978-3-030-98355-0_43"},{"key":"36_CR6","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In: ICML, pp. 19730\u201319742 (2023)"},{"key":"36_CR7","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: TGIF: a new dataset and benchmark on animated GIF description. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4641\u20134650 (2016)","DOI":"10.1109\/CVPR.2016.502"},{"key":"36_CR8","doi-asserted-by":"publisher","unstructured":"Loko\u010d, J., et al.: Interactive video retrieval in the age of effective joint embedding deep models: lessons from the 11th VBS. Multimedia Syst.29, 3481\u20133504 (2023). https:\/\/doi.org\/10.1007\/s00530-023-01143-5","DOI":"10.1007\/s00530-023-01143-5"},{"key":"36_CR9","doi-asserted-by":"crossref","unstructured":"Ma, Z., Wu, J., Loo, W., Ngo, C.W.: Reinforcement learning enhanced picHunter for interactive search. In: MultiMedia Modeling: 29th International Conference, MMM 2023, Proceedings, Part I, pp. 690\u2013696 (2023)","DOI":"10.1007\/978-3-031-27077-2_60"},{"key":"36_CR10","doi-asserted-by":"crossref","unstructured":"Ma, Z., Wu, J., Ngo, C.W.: Leveraging LLMs and generative models for interactive known-item video search. In: Rudinac, S., et al. (eds.) MultiMedia Modeling, pp. 380\u2013386. Springer Nature Switzerland, Cham (2024)","DOI":"10.1007\/978-3-031-53302-0_35"},{"key":"36_CR11","doi-asserted-by":"crossref","unstructured":"Miech, A., Zhukov, D., Alayrac, J.B., Tapaswi, M., Laptev, I., Sivic, J.: HowTo100M: Learning a Text-Video Embedding by Watching Hundred Million Narrated Video Clips. In: ICCV, pp. 1\u201311 (2019)","DOI":"10.1109\/ICCV.2019.00272"},{"key":"36_CR12","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision (2021). https:\/\/arxiv.org\/abs\/2103.00020"},{"key":"36_CR13","doi-asserted-by":"crossref","unstructured":"Reimers, N., Gurevych, I.: Sentence-BERT: Sentence embeddings using siamese BERT-networks. In: Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics (2019). https:\/\/arxiv.org\/abs\/1908.10084","DOI":"10.18653\/v1\/D19-1410"},{"key":"36_CR14","doi-asserted-by":"crossref","unstructured":"Schall, K., Hezel, N., Jung, K., Barthel, K.U.: Vibro: Video browsing with semantic and visual image embeddings. In: Dang-Nguyen, D.T., et al(eds.) MultiMedia Modeling, pp. 665\u2013670. Springer International Publishing, Cham (2023)","DOI":"10.1007\/978-3-031-27077-2_56"},{"key":"36_CR15","unstructured":"Schuhmann, C., et al.: LAION-5b: an open large-scale dataset for training next generation image-text models. In: Thirty-sixth Conference on Neural Information Processing Systems, pp. 1\u201350 (2022)"},{"key":"36_CR16","doi-asserted-by":"publisher","unstructured":"Vadicamo, L., et al.: Evaluating performance and trends in interactive video retrieval: Insights from the 12th VBS competition. IEEE Access 12, 79342\u201379366 (2024). https:\/\/doi.org\/10.1109\/ACCESS.2024.3405638","DOI":"10.1109\/ACCESS.2024.3405638"},{"key":"36_CR17","doi-asserted-by":"crossref","unstructured":"Wang, X., Wu, J., Chen, J., Li, L., Wang, Y.F., Wang, W.Y.: VATEX: a large-scale, high-quality multilingual dataset for video-and-language research. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 4580\u20134590 (2019)","DOI":"10.1109\/ICCV.2019.00468"},{"key":"36_CR18","doi-asserted-by":"publisher","unstructured":"Wu, J., Ngo, C.W.: Interpretable embedding for ad-hoc video search. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 3357\u20133366. MM 2020, Association for Computing Machinery, New York, NY, USA (2020). https:\/\/doi.org\/10.1145\/3394171.3413916, https:\/\/doi.org\/10.1145\/3394171.3413916","DOI":"10.1145\/3394171.3413916"},{"key":"36_CR19","doi-asserted-by":"publisher","unstructured":"Wu, J., Ngo, C.W., Chan, W.K.: Improving interpretable embeddings for ad-hoc video search with generative captions and multi-word concept bank. In: Proceedings of the 2024 International Conference on Multimedia Retrieval, pp. 73\u201382. ICMR 2024, Association for Computing Machinery, New York, NY, USA (2024). https:\/\/doi.org\/10.1145\/3652583.3658052, https:\/\/doi.org\/10.1145\/3652583.3658052","DOI":"10.1145\/3652583.3658052"},{"key":"36_CR20","doi-asserted-by":"crossref","unstructured":"Xu, J., Mei, T., Yao, T., Rui, Y.: MSR-VTT: A large video description dataset for bridging video and language. In: Proceedings of the IEEE International Conference on Computer Vision and Pattern Recognition, pp. 5288\u20135296 (2016)","DOI":"10.1109\/CVPR.2016.571"},{"key":"36_CR21","unstructured":"Zhang, Y., et al.: LLaVA-NeXT: A strong zero-shot video understanding model (2024). https:\/\/llava-vl.github.io\/blog\/2024-04-30-llava-next-video\/"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-96-2074-6_36","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,31]],"date-time":"2024-12-31T17:09:32Z","timestamp":1735664972000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-96-2074-6_36"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9789819620739","9789819620746"],"references-count":21,"URL":"https:\/\/doi.org\/10.1007\/978-981-96-2074-6_36","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"1 January 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Nara","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Japan","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"9 January 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 January 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"31","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/mmm2025.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}