{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,17]],"date-time":"2025-09-17T16:00:09Z","timestamp":1758124809204,"version":"3.41.0"},"publisher-location":"Cham","reference-count":49,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031918551","type":"print"},{"value":"9783031918568","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-91856-8_22","type":"book-chapter","created":{"date-parts":[[2025,5,23]],"date-time":"2025-05-23T10:28:09Z","timestamp":1747996089000},"page":"378-394","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["LSVOS Challenge Report: Large-Scale Complex and\u00a0Long Video Object Segmentation"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4868-6526","authenticated-orcid":false,"given":"Henghui","family":"Ding","sequence":"first","affiliation":[]},{"given":"Lingyi","family":"Hong","sequence":"additional","affiliation":[]},{"given":"Chang","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Ning","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Linjie","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Yuchen","family":"Fan","sequence":"additional","affiliation":[]},{"given":"Deshui","family":"Miao","sequence":"additional","affiliation":[]},{"given":"Yameng","family":"Gu","sequence":"additional","affiliation":[]},{"given":"Xin","family":"Li","sequence":"additional","affiliation":[]},{"given":"Zhenyu","family":"He","sequence":"additional","affiliation":[]},{"given":"Yaowei","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Ming-Hsuan","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Jinming","family":"Chai","sequence":"additional","affiliation":[]},{"given":"Qin","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Junpei","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Licheng","family":"Jiao","sequence":"additional","affiliation":[]},{"given":"Fang","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Xinyu","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Jing","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Kexin","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Xu","family":"Liu","sequence":"additional","affiliation":[]},{"given":"LingLing","family":"Li","sequence":"additional","affiliation":[]},{"given":"Hao","family":"Fang","sequence":"additional","affiliation":[]},{"given":"Feiyu","family":"Pan","sequence":"additional","affiliation":[]},{"given":"Xiankai","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Wei","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Runmin","family":"Cong","sequence":"additional","affiliation":[]},{"given":"Tuyen","family":"Tran","sequence":"additional","affiliation":[]},{"given":"Bin","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Yisi","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Hanyi","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xingjian","family":"He","sequence":"additional","affiliation":[]},{"given":"Jing","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,12]]},"reference":[{"key":"22_CR1","unstructured":"Cao, L., et al.: The second place solution for the 4th large-scale video object segmentation challenge\u2013track 3: referring video object segmentation. arXiv preprint arXiv:2206.12035 (2022)"},{"key":"22_CR2","doi-asserted-by":"crossref","unstructured":"Chen, X., Li, Z., Yuan, Y., Yu, G., Shen, J., Qi, D.: State-aware tracker for real-time video object segmentation. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00940"},{"key":"22_CR3","doi-asserted-by":"crossref","unstructured":"Cheng, H.K., Oh, S.W., Price, B., Lee, J.Y., Schwing, A.: Putting the object back into video object segmentation. arXiv (2023)","DOI":"10.1109\/CVPR52733.2024.00304"},{"key":"22_CR4","doi-asserted-by":"crossref","unstructured":"Cheng, J., Tsai, Y.H., Hung, W.C., Wang, S., Yang, M.H.: Fast and accurate online video object segmentation via tracking parts. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00774"},{"key":"22_CR5","doi-asserted-by":"crossref","unstructured":"Ding, H., Cohen, S., Price, B., Jiang, X.: Phraseclick: toward achieving flexible interactive segmentation by phrase and click. In: ECCV. Springer (2020)","DOI":"10.1007\/978-3-030-58580-8_25"},{"key":"22_CR6","doi-asserted-by":"crossref","unstructured":"Ding, H., Liu, C., He, S., Jiang, X., Loy, C.C.: MeViS: a large-scale benchmark for video segmentation with motion expressions. In: ICCV, pp. 2694\u20132703 (2023)","DOI":"10.1109\/ICCV51070.2023.00254"},{"key":"22_CR7","doi-asserted-by":"crossref","unstructured":"Ding, H., Liu, C., He, S., Jiang, X., Torr, P.H., Bai, S.: MOSE: a new dataset for video object segmentation in complex scenes. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 20224\u201320234 (2023)","DOI":"10.1109\/ICCV51070.2023.01850"},{"key":"22_CR8","doi-asserted-by":"crossref","unstructured":"Ding, H., Liu, C., Wang, S., Jiang, X.: Vision-language transformer and query generation for referring segmentation. In: ICCV, pp. 16321\u201316330 (2021)","DOI":"10.1109\/ICCV48922.2021.01601"},{"issue":"6","key":"22_CR9","doi-asserted-by":"publisher","first-page":"7900","DOI":"10.1109\/TPAMI.2022.3217852","volume":"45","author":"H Ding","year":"2023","unstructured":"Ding, H., Liu, C., Wang, S., Jiang, X.: VLT: vision-language transformer and query generation for referring segmentation. IEEE Trans. Pattern Anal. Mach. Intell. 45(6), 7900\u20137916 (2023)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"22_CR10","unstructured":"Ding, H., et al.: PVUW 2024 challenge on complex video understanding: methods and results. In: ECCV Workshop (2024)"},{"key":"22_CR11","doi-asserted-by":"crossref","unstructured":"Han, J., Yang, L., Zhang, D., Chang, X., Liang, X.: Reinforcement cutting-agent learning for video object segmentation. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00946"},{"key":"22_CR12","doi-asserted-by":"crossref","unstructured":"He, S., Ding, H.: Decoupling static and hierarchical motion perception for referring video segmentation. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.01266"},{"key":"22_CR13","unstructured":"He, S., Ding, H., Liu, C., Jiang, X.: GREC: generalized referring expression comprehension. arXiv preprint arXiv:2308.16182 (2023)"},{"key":"22_CR14","doi-asserted-by":"crossref","unstructured":"Hong, L., et al.: LVOS: a benchmark for long-term video object segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 13480\u201313492 (2023)","DOI":"10.1109\/ICCV51070.2023.01240"},{"key":"22_CR15","doi-asserted-by":"crossref","unstructured":"Hong, L., et al.: LVOS: a benchmark for large-scale long-term video object segmentation. arXiv preprint arXiv:2404.19326 (2024)","DOI":"10.1109\/ICCV51070.2023.01240"},{"key":"22_CR16","doi-asserted-by":"crossref","unstructured":"Hu, P., Wang, G., Kong, X., Kuen, J., Tan, Y.P.: Motion-guided cascaded refinement network for video object segmentation. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00152"},{"key":"22_CR17","unstructured":"Hu, Z., Chen, B., Gao, Y., Ji, Z., Bai, J.: 1st place solution for youtubevos challenge 2022: referring video object segmentation. arXiv preprint arXiv:2212.14679 (2022)"},{"key":"22_CR18","doi-asserted-by":"crossref","unstructured":"Huang, X., Xu, J., Tai, Y.W., Tang, C.K.: Fast video object segmentation with temporal aggregation network and dynamic template matching. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00890"},{"key":"22_CR19","doi-asserted-by":"crossref","unstructured":"Jampani, V., Gadde, R., Gehler, P.V.: Video propagation networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.336"},{"key":"22_CR20","doi-asserted-by":"crossref","unstructured":"Jang, W.D., Kim, C.S.: Online video object segmentation via convolutional trident network. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.790"},{"key":"22_CR21","unstructured":"Ke, L., et al.: Segment anything in high quality. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"22_CR22","doi-asserted-by":"crossref","unstructured":"Khoreva, A., Rohrbach, A., Schiele, B.: Video object segmentation with language referring expressions. In: ACCV (2018)","DOI":"10.1007\/978-3-030-20870-7_8"},{"key":"22_CR23","doi-asserted-by":"publisher","unstructured":"Kitaev, N., Cao, S., Klein, D.: Multilingual constituency parsing with self-attention and pre-training. In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, Florence, Italy, pp. 3499\u20133505. Association for Computational Linguistics (2019). https:\/\/doi.org\/10.18653\/v1\/P19-1340. https:\/\/www.aclweb.org\/anthology\/P19-1340","DOI":"10.18653\/v1\/P19-1340"},{"key":"22_CR24","doi-asserted-by":"crossref","unstructured":"Li, X., et al.: Transformer-based visual segmentation: a survey. IEEE TPAMI (2024)","DOI":"10.1109\/TPAMI.2024.3434373"},{"key":"22_CR25","unstructured":"Li, X., Miao, D., He, Z., Wang, Y., Lu, H., Yang, M.H.: Learning spatial-semantic features for robust video object segmentation (2024). https:\/\/arxiv.org\/abs\/2407.07760"},{"key":"22_CR26","doi-asserted-by":"crossref","unstructured":"Lin, H., Qi, X., Jia, J.: AGSS-VOS: attention guided single-shot video object segmentation. In: CVPR (2019)","DOI":"10.1109\/ICCV.2019.00405"},{"key":"22_CR27","doi-asserted-by":"crossref","unstructured":"Liu, C., Ding, H., Jiang, X.: GRES: generalized referring expression segmentation. In: CVPR, pp. 23592\u201323601 (2023)","DOI":"10.1109\/CVPR52729.2023.02259"},{"key":"22_CR28","doi-asserted-by":"publisher","first-page":"3054","DOI":"10.1109\/TIP.2023.3277791","volume":"32","author":"C Liu","year":"2023","unstructured":"Liu, C., Ding, H., Zhang, Y., Jiang, X.: Multi-modal mutual attention and iterative interaction for referring image segmentation. IEEE Trans. Image Process. 32, 3054\u20133065 (2023)","journal-title":"IEEE Trans. Image Process."},{"key":"22_CR29","doi-asserted-by":"crossref","unstructured":"Liu, C., Jiang, X., Ding, H.: Instance-specific feature propagation for referring segmentation. IEEE TMM (2023)","DOI":"10.1109\/TMM.2022.3163578"},{"issue":"1","key":"22_CR30","doi-asserted-by":"publisher","first-page":"16","DOI":"10.1007\/s44267-024-00049-8","volume":"2","author":"C Liu","year":"2024","unstructured":"Liu, C., Jiang, X., Ding, H.: PrimitiveNet: decomposing the global constraints for referring segmentation. Vis. Intell. 2(1), 16 (2024)","journal-title":"Vis. Intell."},{"key":"22_CR31","doi-asserted-by":"crossref","unstructured":"Liu, S., et al.: Grounding dino: marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499 (2023)","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"22_CR32","unstructured":"Luo, Z., et al.: 1st place solution for 5th LSVOS challenge: referring video object segmentation. arXiv preprint arXiv:2401.00663 (2024)"},{"key":"22_CR33","doi-asserted-by":"crossref","unstructured":"Perazzi, F., Khoreva, A., Benenson, R., Schiele, B., Sorkine-Hornung, A.: Learning video object segmentation from static images. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.372"},{"key":"22_CR34","doi-asserted-by":"crossref","unstructured":"Perazzi, F., Pont-Tuset, J., McWilliams, B., Van\u00a0Gool, L., Gross, M., Sorkine-Hornung, A.: A benchmark dataset and evaluation methodology for video object segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 724\u2013732 (2016)","DOI":"10.1109\/CVPR.2016.85"},{"key":"22_CR35","unstructured":"Ravi, N., et al.: Sam 2: segment anything in images and videos. arXiv preprint arXiv:2408.00714 (2024). https:\/\/arxiv.org\/abs\/2408.00714"},{"key":"22_CR36","doi-asserted-by":"crossref","unstructured":"Seo, S., Lee, J.Y., Han, B.: URVOS: unified referring video object segmentation network with a large-scale benchmark. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58555-6_13"},{"key":"22_CR37","doi-asserted-by":"crossref","unstructured":"Wang, H., Deng, C., Yan, J., Tao, D.: Asymmetric cross-guided attention network for actor and action video segmentation from natural language query. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00404"},{"key":"22_CR38","doi-asserted-by":"crossref","unstructured":"Wu, J., et al.: Towards open vocabulary learning: a survey. IEEE TPAMI (2024)","DOI":"10.1109\/TPAMI.2024.3361862"},{"key":"22_CR39","doi-asserted-by":"crossref","unstructured":"Wug\u00a0Oh, S., Lee, J.Y., Sunkavalli, K., Joo\u00a0Kim, S.: Fast video object segmentation by reference-guided mask propagation. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00770"},{"key":"22_CR40","doi-asserted-by":"crossref","unstructured":"Xiao, H., Feng, J., Lin, G., Liu, Y., Zhang, M.: Monet: deep motion exploitation for video object segmentation. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00125"},{"key":"22_CR41","doi-asserted-by":"crossref","unstructured":"Xu, N., et al.: Youtube-vos: sequence-to-sequence video object segmentation. In: ECCV (2018)","DOI":"10.1007\/978-3-030-01228-1_36"},{"key":"22_CR42","unstructured":"Xu, N., et al.: Youtube-vos: a large-scale video object segmentation benchmark. arXiv preprint arXiv:1809.03327 (2018)"},{"key":"22_CR43","doi-asserted-by":"crossref","unstructured":"Xu, S., Liu, D., Bao, L., Liu, W., Zhou, P.: MHP-VOS: multiple hypotheses propagation for video object segmentation. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00040"},{"key":"22_CR44","doi-asserted-by":"crossref","unstructured":"Yan, B., et al.: Universal instance perception as object discovery and retrieval. In: CVPR, pp. 15325\u201315336 (2023)","DOI":"10.1109\/CVPR52729.2023.01471"},{"key":"22_CR45","doi-asserted-by":"crossref","unstructured":"Yan, S., et al.: Referred by multi-modality: a unified temporal transformer for video object segmentation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a038, pp. 6449\u20136457 (2024)","DOI":"10.1609\/aaai.v38i6.28465"},{"key":"22_CR46","doi-asserted-by":"crossref","unstructured":"Yang, L., Fan, Y., Xu, N.: Video instance segmentation. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00529"},{"key":"22_CR47","doi-asserted-by":"crossref","unstructured":"Ye, L., Rochan, M., Liu, Z., Zhang, X., Wang, Y.: Referring segmentation in images and videos with cross-modal self-attention network. IEEE TPAMI (2021)","DOI":"10.1109\/TPAMI.2021.3054384"},{"key":"22_CR48","doi-asserted-by":"crossref","unstructured":"Zhang, L., Lin, Z., Zhang, J., Lu, H., He, Y.: Fast video object segmentation via dynamic targeting network. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00568"},{"key":"22_CR49","doi-asserted-by":"crossref","unstructured":"Zhang, T., et al.: DVIS: decoupled video instance segmentation framework. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1282\u20131291 (2023)","DOI":"10.1109\/ICCV51070.2023.00124"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-91856-8_22","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,23]],"date-time":"2025-05-23T10:28:31Z","timestamp":1747996111000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-91856-8_22"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031918551","9783031918568"],"references-count":49,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-91856-8_22","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"12 May 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}