{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T01:58:32Z","timestamp":1773799112501,"version":"3.50.1"},"publisher-location":"Cham","reference-count":61,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031727603","type":"print"},{"value":"9783031727610","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72761-0_16","type":"book-chapter","created":{"date-parts":[[2024,9,29]],"date-time":"2024-09-29T07:01:50Z","timestamp":1727593310000},"page":"276-293","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["Beyond MOT: Semantic Multi-object Tracking"],"prefix":"10.1007","author":[{"given":"Yunhao","family":"Li","sequence":"first","affiliation":[]},{"given":"Qin","family":"Li","sequence":"additional","affiliation":[]},{"given":"Hao","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xue","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Jiali","family":"Yao","sequence":"additional","affiliation":[]},{"given":"Shaohua","family":"Dong","sequence":"additional","affiliation":[]},{"given":"Heng","family":"Fan","sequence":"additional","affiliation":[]},{"given":"Libo","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,30]]},"reference":[{"key":"16_CR1","doi-asserted-by":"crossref","unstructured":"Bai, H., Cheng, W., Chu, P., Liu, J., Zhang, K., Ling, H.: GMOT-40: a benchmark for generic multiple object tracking. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00665"},{"key":"16_CR2","unstructured":"Banerjee, S., Lavie, A.: METEOR: an automatic metric for MT evaluation with improved correlation with human judgments. In: ACL Workshop (2005)"},{"key":"16_CR3","doi-asserted-by":"crossref","unstructured":"Bernardin, K., Stiefelhagen, R.: Evaluating multiple object tracking performance: the clear mot metrics. JIVP (2008)","DOI":"10.1155\/2008\/246309"},{"key":"16_CR4","doi-asserted-by":"crossref","unstructured":"Bewley, A., Ge, Z., Ott, L., Ramos, F., Upcroft, B.: Simple online and realtime tracking. In: ICIP (2016)","DOI":"10.1109\/ICIP.2016.7533003"},{"key":"16_CR5","doi-asserted-by":"crossref","unstructured":"Cao, J., Pang, J., Weng, X., Khirodkar, R., Kitani, K.: Observation-centric sort: rethinking sort for robust multi-object tracking. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00934"},{"key":"16_CR6","doi-asserted-by":"crossref","unstructured":"Chen, S., Shi, Z., Mettes, P., Snoek, C.G.: Social fabric: tubelet compositions for video relation detection. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01323"},{"key":"16_CR7","doi-asserted-by":"crossref","unstructured":"Chu, P., Wang, J., You, Q., Ling, H., Liu, Z.: TransMOT: spatial-temporal graph transformer for multiple object tracking. In: WACV (2023)","DOI":"10.1109\/WACV56688.2023.00485"},{"key":"16_CR8","doi-asserted-by":"crossref","unstructured":"Cui, Y., Zeng, C., Zhao, X., Yang, Y., Wu, G., Wang, L.: SportsMOT: a large multi-object tracking dataset in multiple sports scenes. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00910"},{"key":"16_CR9","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1007\/978-3-030-58558-7_26","volume-title":"Computer Vision \u2013 ECCV 2020","author":"A Dave","year":"2020","unstructured":"Dave, A., Khurana, T., Tokmakov, P., Schmid, C., Ramanan, D.: TAO: a large-scale benchmark for tracking any object. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12350, pp. 436\u2013454. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58558-7_26"},{"key":"16_CR10","unstructured":"Dendorfer, P., et al.: MOT20: a benchmark for multi object tracking in crowded scenes. arXiv (2020)"},{"key":"16_CR11","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: CVPR (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"16_CR12","doi-asserted-by":"crossref","unstructured":"Du, D., et al.: The unmanned aerial vehicle benchmark: object detection and tracking. In: ECCV (2018)","DOI":"10.1007\/978-3-030-01249-6_23"},{"key":"16_CR13","first-page":"8725","volume":"25","author":"Y Du","year":"2023","unstructured":"Du, Y., et al.: StrongSORT: make DeepSORT great again. TMM 25, 8725\u20138737 (2023)","journal-title":"TMM"},{"key":"16_CR14","doi-asserted-by":"crossref","unstructured":"Duan, K., Bai, S., Xie, L., Qi, H., Huang, Q., Tian, Q.: CenterNet: keypoint triplets for object detection. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00667"},{"key":"16_CR15","doi-asserted-by":"crossref","unstructured":"Fellbaum, C.: WordNet: an electronic lexical database (1998)","DOI":"10.7551\/mitpress\/7287.001.0001"},{"key":"16_CR16","doi-asserted-by":"crossref","unstructured":"Ferryman, J., Shahrokni, A.: PETS2009: dataset and challenge. In: PET Workshop (2009)","DOI":"10.1109\/PETS-WINTER.2009.5399556"},{"key":"16_CR17","doi-asserted-by":"crossref","unstructured":"Gao, R., Wang, L.: MeMOTR: long-term memory-augmented transformer for multi-object tracking. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00908"},{"key":"16_CR18","doi-asserted-by":"crossref","unstructured":"Geiger, A., Lenz, P., Stiller, C., Urtasun, R.: Vision meets robotics: the KITTI dataset (2013)","DOI":"10.1177\/0278364913491297"},{"key":"16_CR19","doi-asserted-by":"crossref","unstructured":"Girshick, R.: Fast R-CNN. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.169"},{"key":"16_CR20","doi-asserted-by":"crossref","unstructured":"Han, X., Pasquier, T., Bates, A., Mickens, J., Seltzer, M.: Unicorn: runtime provenance-based detector for advanced persistent threats. In: ECCV (2020)","DOI":"10.14722\/ndss.2020.24046"},{"key":"16_CR21","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"16_CR22","doi-asserted-by":"crossref","unstructured":"Heilbron, F.C., Escorcia, V., Ghanem, B., Niebles, J.C.: ActivityNet: a large-scale video benchmark for human activity understanding. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7298698"},{"key":"16_CR23","doi-asserted-by":"crossref","unstructured":"Krishna, R., Hata, K., Ren, F., Fei-Fei, L., Carlos\u00a0Niebles, J.: Dense-captioning events in videos. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.83"},{"key":"16_CR24","unstructured":"Leal-Taix\u00e9, L., Milan, A., Reid, I., Roth, S., Schindler, K.: MOTChallenge 2015: towards a benchmark for multi-target tracking. arXiv (2015)"},{"key":"16_CR25","doi-asserted-by":"crossref","unstructured":"Liang, X., Lee, L., Xing, E.P.: Deep variation-structured reinforcement learning for visual relationship and attribute detection. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.469"},{"key":"16_CR26","unstructured":"Lin, C.Y.: ROUGE: a package for automatic evaluation of summaries. In: ACL (2004)"},{"key":"16_CR27","doi-asserted-by":"crossref","unstructured":"Lin, K., et al.: SwinBERT: end-to-end transformers with sparse attention for video captioning. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01742"},{"key":"16_CR28","doi-asserted-by":"crossref","unstructured":"Liu, C., Jin, Y., Xu, K., Gong, G., Mu, Y.: Beyond short-term snippet: video relation detection with spatio-temporal global context. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01085"},{"key":"16_CR29","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"852","DOI":"10.1007\/978-3-319-46448-0_51","volume-title":"Computer Vision \u2013 ECCV 2016","author":"C Lu","year":"2016","unstructured":"Lu, C., Krishna, R., Bernstein, M., Fei-Fei, L.: Visual relationship detection with language priors. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9905, pp. 852\u2013869. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46448-0_51"},{"key":"16_CR30","doi-asserted-by":"crossref","unstructured":"Luiten, J., et al.: HOTA: a higher order metric for evaluating multi-object tracking. IJCV (2021)","DOI":"10.1007\/s11263-020-01375-2"},{"key":"16_CR31","doi-asserted-by":"crossref","unstructured":"Maggiolino, G., Ahmad, A., Cao, J., Kitani, K.: Deep OC-SORT: multi-pedestrian tracking by adaptive re-identification. In: ICIP (2023)","DOI":"10.1109\/ICIP49359.2023.10222576"},{"key":"16_CR32","doi-asserted-by":"crossref","unstructured":"Meinhardt, T., Kirillov, A., Leal-Taixe, L., Feichtenhofer, C.: TrackFormer: multi-object tracking with transformers. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00864"},{"key":"16_CR33","unstructured":"Milan, A., Leal-Taix\u00e9, L., Reid, I., Roth, S., Schindler, K.: MOT16: a benchmark for multi-object tracking. arXiv (2016)"},{"key":"16_CR34","doi-asserted-by":"crossref","unstructured":"Papineni, K., Roukos, S., Ward, T., Zhu, W.J.: BLEU: a method for automatic evaluation of machine translation. In: ACL (2002)","DOI":"10.3115\/1073083.1073135"},{"key":"16_CR35","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"17","DOI":"10.1007\/978-3-319-48881-3_2","volume-title":"Computer Vision \u2013 ECCV 2016 Workshops","author":"E Ristani","year":"2016","unstructured":"Ristani, E., Solera, F., Zou, R., Cucchiara, R., Tomasi, C.: Performance measures and a data set for\u00a0multi-target, multi-camera tracking. In: Hua, G., J\u00e9gou, H. (eds.) ECCV 2016. LNCS, vol. 9914, pp. 17\u201335. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-48881-3_2"},{"key":"16_CR36","doi-asserted-by":"crossref","unstructured":"Seo, P.H., Nagrani, A., Arnab, A., Schmid, C.: End-to-end generative pretraining for multimodal video captioning. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01743"},{"key":"16_CR37","doi-asserted-by":"crossref","unstructured":"Shen, Y., Gu, X., Xu, K., Fan, H., Wen, L., Zhang, L.: Accurate and fast compressed video captioning. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01426"},{"key":"16_CR38","doi-asserted-by":"crossref","unstructured":"Sun, P., Cao, J., Jiang, Y., Yuan, Z., Bai, S., Kitani, K., Luo, P.: DanceTrack: multi-object tracking in uniform appearance and diverse motion. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.02032"},{"key":"16_CR39","unstructured":"Sun, P., et al.: TransTrack: multiple object tracking with transformer. arXiv (2020)"},{"key":"16_CR40","doi-asserted-by":"publisher","unstructured":"Taud, H., Mas, J.: Multilayer perceptron (MLP). In: Camacho Olmedo, M., Paegelow, M., Mas, J.F., Escobar, F. (eds.) Geomatic Approaches for Modeling Land Change Scenarios. Lecture Notes in Geoinformation and Cartography, pp. 451\u2013455. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-319-60801-3_27","DOI":"10.1007\/978-3-319-60801-3_27"},{"key":"16_CR41","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NIPS (2017)"},{"key":"16_CR42","doi-asserted-by":"crossref","unstructured":"Vedantam, R., Lawrence\u00a0Zitnick, C., Parikh, D.: CIDEr: consensus-based image description evaluation. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"16_CR43","doi-asserted-by":"crossref","unstructured":"Wojke, N., Bewley, A., Paulus, D.: Simple online and realtime tracking with a deep association metric. In: ICIP (2017)","DOI":"10.1109\/ICIP.2017.8296962"},{"key":"16_CR44","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"733","DOI":"10.1007\/978-3-031-19803-8_43","volume-title":"Computer Vision \u2013 ECCV 2022","author":"B Yan","year":"2022","unstructured":"Yan, B., et al.: Towards grand unification of object tracking. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13681, pp. 733\u2013751. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19803-8_43"},{"key":"16_CR45","doi-asserted-by":"crossref","unstructured":"Yan, B., et al.: Universal instance perception as object discovery and retrieval. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01471"},{"key":"16_CR46","doi-asserted-by":"crossref","unstructured":"Yang, A., et al.: Vid2Seq: large-scale pretraining of a visual language model for dense video captioning. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01032"},{"key":"16_CR47","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"341","DOI":"10.1007\/978-3-031-20047-2_20","volume-title":"Computer Vision - ECCV 2022","author":"B Ye","year":"2022","unstructured":"Ye, B., Chang, H., Ma, B., Shan, S., Chen, X.: Joint feature learning and relation modeling for tracking: a one-stream framework. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13682, pp. 341\u2013357. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20047-2_20"},{"key":"16_CR48","doi-asserted-by":"crossref","unstructured":"Yu, F., et al.: BDD100K: a diverse driving dataset for heterogeneous multitask learning. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00271"},{"key":"16_CR49","doi-asserted-by":"crossref","unstructured":"Yu, F., Wang, D., Shelhamer, E., Darrell, T.: Deep layer aggregation. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00255"},{"key":"16_CR50","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"659","DOI":"10.1007\/978-3-031-19812-0_38","volume-title":"Computer Vision \u2013 ECCV 2022","author":"F Zeng","year":"2022","unstructured":"Zeng, F., Dong, B., Zhang, Y., Wang, T., Zhang, X., Wei, Y.: MOTR: end-to-end multiple-object tracking with transformer. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13687, pp. 659\u2013675. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19812-0_38"},{"key":"16_CR51","doi-asserted-by":"crossref","unstructured":"Zhang, H., Kyaw, Z., Chang, S.F., Chua, T.S.: Visual translation embedding network for visual relation detection. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.331"},{"key":"16_CR52","doi-asserted-by":"publisher","first-page":"496","DOI":"10.1007\/s11263-022-01711-8","volume":"131","author":"L Zhang","year":"2023","unstructured":"Zhang, L., Gao, J., Xiao, Z., Fan, H.: AnimalTrack: a benchmark for multi-animal tracking in the wild. IJCV 131, 496\u2013513 (2023)","journal-title":"IJCV"},{"key":"16_CR53","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/978-3-031-20047-2_1","volume-title":"Computer Vision - ECCV 2022","author":"Y Zhang","year":"2022","unstructured":"Zhang, Y., et al.: ByteTrack: multi-object tracking by associating every detection box. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13682, pp. 1\u201321. Springer, Cham (2022)"},{"key":"16_CR54","doi-asserted-by":"publisher","first-page":"3069","DOI":"10.1007\/s11263-021-01513-4","volume":"129","author":"Y Zhang","year":"2021","unstructured":"Zhang, Y., Wang, C., Wang, X., Zeng, W., Liu, W.: FairMOT: on the fairness of detection and re-identification in multiple object tracking. IJCV 129, 3069\u20133087 (2021)","journal-title":"IJCV"},{"key":"16_CR55","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Wang, T., Zhang, X.: MOTRv2: bootstrapping end-to-end multi-object tracking by pretrained object detectors. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.02112"},{"key":"16_CR56","doi-asserted-by":"crossref","unstructured":"Zheng, S., Chen, S., Jin, Q.: VRDFormer: end-to-end video visual relation detection with transformers. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01827"},{"key":"16_CR57","doi-asserted-by":"crossref","unstructured":"Zhou, L., Zhou, Y., Corso, J.J., Socher, R., Xiong, C.: End-to-end dense video captioning with masked transformer. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00911"},{"key":"16_CR58","doi-asserted-by":"crossref","unstructured":"Zhou, X., Arnab, A., Sun, C., Schmid, C.: Dense video object captioning from disjoint supervision. arXiv (2023)","DOI":"10.1109\/CVPR52733.2024.01727"},{"key":"16_CR59","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"474","DOI":"10.1007\/978-3-030-58548-8_28","volume-title":"Computer Vision \u2013 ECCV 2020","author":"X Zhou","year":"2020","unstructured":"Zhou, X., Koltun, V., Kr\u00e4henb\u00fchl, P.: Tracking objects as points. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12349, pp. 474\u2013490. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58548-8_28"},{"key":"16_CR60","doi-asserted-by":"crossref","unstructured":"Zhou, X., Yin, T., Koltun, V., Kr\u00e4henb\u00fchl, P.: Global tracking transformers. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00857"},{"key":"16_CR61","doi-asserted-by":"publisher","first-page":"7380","DOI":"10.1109\/TPAMI.2021.3119563","volume":"44","author":"P Zhu","year":"2021","unstructured":"Zhu, P., Wen, L., Du, D., Bian, X., Fan, H., Hu, Q., Ling, H.: Detection and tracking meet drones challenge. TPAMI 44, 7380\u20137399 (2021)","journal-title":"TPAMI"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72761-0_16","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,29]],"date-time":"2024-09-29T07:32:05Z","timestamp":1727595125000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72761-0_16"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,30]]},"ISBN":["9783031727603","9783031727610"],"references-count":61,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72761-0_16","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,9,30]]},"assertion":[{"value":"30 September 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}