{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:16:59Z","timestamp":1775578619270,"version":"3.50.1"},"publisher-location":"Cham","reference-count":58,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031730122","type":"print"},{"value":"9783031730139","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:00:00Z","timestamp":1732665600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:00:00Z","timestamp":1732665600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73013-9_6","type":"book-chapter","created":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T07:47:15Z","timestamp":1732607235000},"page":"89-106","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["Beyond Prompt Learning: Continual Adapter for\u00a0Efficient Rehearsal-Free Continual Learning"],"prefix":"10.1007","author":[{"given":"Xinyuan","family":"Gao","sequence":"first","affiliation":[]},{"given":"Songlin","family":"Dong","sequence":"additional","affiliation":[]},{"given":"Yuhang","family":"He","sequence":"additional","affiliation":[]},{"given":"Qiang","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yihong","family":"Gong","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,27]]},"reference":[{"key":"6_CR1","doi-asserted-by":"crossref","unstructured":"Aljundi, R., Babiloni, F., Elhoseiny, M., Rohrbach, M., Tuytelaars, T.: Memory aware synapses: learning what (not) to forget. In: Proceedings of the European conference on computer vision (ECCV), pp. 139\u2013154 (2018)","DOI":"10.1007\/978-3-030-01219-9_9"},{"key":"6_CR2","first-page":"15920","volume":"33","author":"P Buzzega","year":"2020","unstructured":"Buzzega, P., Boschini, M., Porrello, A., Abati, D., Calderara, S.: Dark experience for general continual learning: a strong, simple baseline. Adv. Neural. Inf. Process. Syst. 33, 15920\u201315930 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"6_CR3","doi-asserted-by":"crossref","unstructured":"Cha, H., Lee, J., Shin, J.: CO2L: contrastive continual learning. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9516\u20139525 (2021)","DOI":"10.1109\/ICCV48922.2021.00938"},{"key":"6_CR4","unstructured":"Chaudhry, A., et al.: On tiny episodic memories in continual learning. arXiv preprint arXiv:1902.10486 (2019)"},{"key":"6_CR5","unstructured":"Chen, S., et al.: AdaptFormer: adapting vision transformers for scalable visual recognition. Adv. Neural. Inf. Process. Syst. 35, 16664\u201316678 (2022)"},{"key":"6_CR6","doi-asserted-by":"crossref","unstructured":"Choi, Y., El-Khamy, M., Lee, J.: Dual-teacher class-incremental learning with data-free generative replay. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3543\u20133552 (2021)","DOI":"10.1109\/CVPRW53098.2021.00393"},{"key":"6_CR7","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"6_CR8","doi-asserted-by":"crossref","unstructured":"Dong, S., Hong, X., Tao, X., Chang, X., Wei, X., Gong, Y.: Few-shot class-incremental learning via relation knowledge distillation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a035, pp. 1255\u20131263 (2021)","DOI":"10.1609\/aaai.v35i2.16213"},{"key":"6_CR9","unstructured":"Dong, Y., Cordonnier, J.B., Loukas, A.: Attention is not all you need: pure attention loses rank doubly exponentially with depth. In: International Conference on Machine Learning, pp. 2793\u20132803. PMLR (2021)"},{"key":"6_CR10","unstructured":"Dosovitskiy, A., et al.: An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. In: ICLR (2021)"},{"key":"6_CR11","doi-asserted-by":"crossref","unstructured":"Douillard, A., Ram\u00e9, A., Couairon, G., Cord, M.: DyTox: transformers for continual learning with dynamic token expansion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9285\u20139295 (2022)","DOI":"10.1109\/CVPR52688.2022.00907"},{"key":"6_CR12","doi-asserted-by":"crossref","unstructured":"Fan, H., Xiong, B., Mangalam, K., Li, Y., Yan, Z., Malik, J., Feichtenhofer, C.: Multiscale vision transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6824\u20136835 (2021)","DOI":"10.1109\/ICCV48922.2021.00675"},{"issue":"4","key":"6_CR13","doi-asserted-by":"publisher","first-page":"128","DOI":"10.1016\/S1364-6613(99)01294-2","volume":"3","author":"RM French","year":"1999","unstructured":"French, R.M.: Catastrophic forgetting in connectionist networks. Trends Cogn. Sci. 3(4), 128\u2013135 (1999)","journal-title":"Trends Cogn. Sci."},{"key":"6_CR14","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"423","DOI":"10.1007\/978-3-031-20050-2_25","volume-title":"Computer Vision \u2013 ECCV 2022","author":"Q Gao","year":"2022","unstructured":"Gao, Q., Zhao, C., Ghanem, B., Zhang, J.: R-DFCIL: relation-guided representation learning for data-free class incremental learning. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13683, pp. 423\u2013439. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20050-2_25"},{"key":"6_CR15","doi-asserted-by":"crossref","unstructured":"Gao, X., He, Y., Dong, S., Cheng, J., Wei, X., Gong, Y.: DKT: diverse knowledge transfer transformer for class incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 24236\u201324245 (2023)","DOI":"10.1109\/CVPR52729.2023.02321"},{"key":"6_CR16","doi-asserted-by":"crossref","unstructured":"Gao, Z., Cen, J., Chang, X.: Consistent prompting for rehearsal-free continual learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 28463\u201328473 (2024)","DOI":"10.1109\/CVPR52733.2024.02689"},{"key":"6_CR17","doi-asserted-by":"crossref","unstructured":"Hendrycks, D., et\u00a0al.: The many faces of robustness: a critical analysis of out-of-distribution generalization. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8340\u20138349 (2021)","DOI":"10.1109\/ICCV48922.2021.00823"},{"key":"6_CR18","doi-asserted-by":"crossref","unstructured":"Hou, S., Pan, X., Loy, C.C., Wang, Z., Lin, D.: Learning a unified classifier incrementally via rebalancing. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 831\u2013839 (2019)","DOI":"10.1109\/CVPR.2019.00092"},{"key":"6_CR19","unstructured":"Houlsby, N., et al.: Parameter-efficient transfer learning for NLP. In: International Conference on Machine Learning, pp. 2790\u20132799. PMLR (2019)"},{"key":"6_CR20","unstructured":"Hu, E.J., et al.: LoRA: low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)"},{"key":"6_CR21","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"709","DOI":"10.1007\/978-3-031-19827-4_41","volume-title":"Computer Vision \u2013 ECCV 2022","author":"M Jia","year":"2022","unstructured":"Jia, M., et al.: Visual prompt tuning. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13693, pp. 709\u2013727. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19827-4_41"},{"key":"6_CR22","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"key":"6_CR23","doi-asserted-by":"crossref","unstructured":"Kirkpatrick, J., Pascanu, R., et al.: Overcoming catastrophic forgetting in neural networks. Proc. Natl. Acad. Sci. 114(13), 3521\u20133526 (2017)","DOI":"10.1073\/pnas.1611835114"},{"key":"6_CR24","unstructured":"Krizhevsky, A., Hinton, G.: Learning multiple layers of features from tiny images. Technical report, Citeseer (2009)"},{"issue":"12","key":"6_CR25","doi-asserted-by":"publisher","first-page":"2935","DOI":"10.1109\/TPAMI.2017.2773081","volume":"40","author":"Z Li","year":"2018","unstructured":"Li, Z., Hoiem, D.: Learning without forgetting. T-PAMI 40(12), 2935\u20132947 (2018)","journal-title":"T-PAMI"},{"key":"6_CR26","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"6_CR27","unstructured":"Lomonaco, V., Maltoni, D.: Core50: a new dataset and benchmark for continuous object recognition. In: Conference on Robot Learning, Pp. 17\u201326. PMLR (2017)"},{"key":"6_CR28","doi-asserted-by":"crossref","unstructured":"Mao, X., et al.: Towards robust vision transformer. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12042\u201312051 (2022)","DOI":"10.1109\/CVPR52688.2022.01173"},{"key":"6_CR29","unstructured":"McDonnell, M.D., Gong, D., Parvaneh, A., Abbasnejad, E., van\u00a0den Hengel, A.: RanPAC: random projections and pre-trained models for continual learning. Adv. Neural Inf. Process. Syst. 36 (2024)"},{"key":"6_CR30","doi-asserted-by":"crossref","unstructured":"Peng, X., Bai, Q., Xia, X., Huang, Z., Saenko, K., Wang, B.: Moment matching for multi-source domain adaptation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 1406\u20131415 (2019)","DOI":"10.1109\/ICCV.2019.00149"},{"key":"6_CR31","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"524","DOI":"10.1007\/978-3-030-58536-5_31","volume-title":"Computer Vision \u2013 ECCV 2020","author":"A Prabhu","year":"2020","unstructured":"Prabhu, A., Torr, P.H.S., Dokania, P.K.: GDumb: a simple approach that questions our progress in continual learning. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12347, pp. 524\u2013540. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58536-5_31"},{"key":"6_CR32","doi-asserted-by":"crossref","unstructured":"Qin, Y., Chen, Y., Peng, D., Peng, X., Zhou, J.T., Hu, P.: Noisy-correspondence learning for text-to-image person re-identification. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 27197\u201327206 (2024)","DOI":"10.1109\/CVPR52733.2024.02568"},{"key":"6_CR33","doi-asserted-by":"crossref","unstructured":"Qin, Y., Peng, D., Peng, X., Wang, X., Hu, P.: Deep evidential learning with noisy correspondence for cross-modal retrieval. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 4948\u20134956 (2022)","DOI":"10.1145\/3503161.3547922"},{"key":"6_CR34","unstructured":"Qin, Y., Sun, Y., Peng, D., Zhou, J.T., Peng, X., Hu, P.: Cross-modal active complementary learning with self-refining correspondence. Adv. Neural Inf. Process. Syst. 36 (2024)"},{"key":"6_CR35","doi-asserted-by":"crossref","unstructured":"Rebuffi, S.A., Kolesnikov, A., Sperl, G., Lampert, C.H.: iCaRL: incremental classifier and representation learning. In: CVPR, pp. 2001\u20132010 (2017)","DOI":"10.1109\/CVPR.2017.587"},{"issue":"23","key":"6_CR36","doi-asserted-by":"publisher","first-page":"6777","DOI":"10.3390\/s20236777","volume":"20","author":"JL Shieh","year":"2020","unstructured":"Shieh, J.L., et al.: Continual learning strategy in one-stage object detection framework based on experience replay for autonomous driving vehicle. Sensors 20(23), 6777 (2020)","journal-title":"Sensors"},{"key":"6_CR37","doi-asserted-by":"crossref","unstructured":"Smith, J., Hsu, Y.C., Balloch, J., Shen, Y., Jin, H., Kira, Z.: Always be dreaming: a new approach for data-free class-incremental learning. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9374\u20139384 (2021)","DOI":"10.1109\/ICCV48922.2021.00924"},{"key":"6_CR38","doi-asserted-by":"crossref","unstructured":"Smith, J.S., Karlinsky, L., et\u00a0al.: CODA-prompt: continual decomposed attention-based prompting for rehearsal-free continual learning. In: CVPR, pp. 11909\u201311919 (2023)","DOI":"10.1109\/CVPR52729.2023.01146"},{"key":"6_CR39","doi-asserted-by":"crossref","unstructured":"Song, X., He, Y., Dong, S., Gong, Y.: Non-exemplar domain incremental object detection via learning domain bias. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a038, pp. 15056\u201315065 (2024)","DOI":"10.1609\/aaai.v38i13.29427"},{"key":"6_CR40","doi-asserted-by":"crossref","unstructured":"Tang, Y.M., et\u00a0al.: When prompt-based incremental learning does not meet strong pretraining. In: CVPR (2023)","DOI":"10.1109\/ICCV51070.2023.00164"},{"key":"6_CR41","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"254","DOI":"10.1007\/978-3-030-58529-7_16","volume-title":"Computer Vision \u2013 ECCV 2020","author":"X Tao","year":"2020","unstructured":"Tao, X., Chang, X., Hong, X., Wei, X., Gong, Y.: Topology-preserving class-incremental learning. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12364, pp. 254\u2013270. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58529-7_16"},{"key":"6_CR42","doi-asserted-by":"crossref","unstructured":"Tu, C.H., Mai, Z., Chao, W.L.: Visual query tuning: towards effective usage of intermediate representations for parameter and memory efficient transfer learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7725\u20137735 (2023)","DOI":"10.1109\/CVPR52729.2023.00746"},{"key":"6_CR43","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"6_CR44","doi-asserted-by":"publisher","first-page":"659","DOI":"10.1016\/j.neunet.2023.02.001","volume":"161","author":"E Verwimp","year":"2023","unstructured":"Verwimp, E., et al.: CLAD: a realistic continual learning benchmark for autonomous driving. Neural Netw. 161, 659\u2013669 (2023)","journal-title":"Neural Netw."},{"key":"6_CR45","doi-asserted-by":"crossref","unstructured":"Wang, F.Y., Zhou, D.W., Ye, H.J., Zhan, D.C.: FOSTER: feature boosting and compression for class-incremental learning. arXiv preprint arXiv:2204.04662 (2022)","DOI":"10.1007\/978-3-031-19806-9_23"},{"key":"6_CR46","unstructured":"Wang, L., et\u00a0al.: Hierarchical decomposition of prompt-based continual learning: Rethinking obscured sub-optimality. In: NeurIPS (2024)"},{"issue":"10","key":"6_CR47","doi-asserted-by":"publisher","first-page":"5921","DOI":"10.1109\/TCSVT.2023.3262739","volume":"33","author":"S Wang","year":"2023","unstructured":"Wang, S., Shi, W., Dong, S., Gao, X., Song, X., Gong, Y.: Semantic knowledge guided class-incremental learning. IEEE Trans. Circuits Syst. Video Technol. 33(10), 5921\u20135931 (2023)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"6_CR48","doi-asserted-by":"crossref","unstructured":"Wang, S., Shi, W., He, Y., Yu, Y., Gong, Y.: Non-exemplar class-incremental learning via adaptive old class reconstruction. In: Proceedings of the 31st ACM International Conference on Multimedia, pp. 4524\u20134534 (2023)","DOI":"10.1145\/3581783.3611926"},{"key":"6_CR49","first-page":"5682","volume":"35","author":"Y Wang","year":"2022","unstructured":"Wang, Y., Huang, Z., Hong, X.: S-prompts learning with pre-trained transformers: An Occam\u2019s razor for domain incremental learning. Adv. Neural. Inf. Process. Syst. 35, 5682\u20135695 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"6_CR50","doi-asserted-by":"crossref","unstructured":"Wang, Z., et\u00a0al.: DualPrompt: complementary prompting for rehearsal-free continual learning. arXiv preprint arXiv:2204.04799 (2022)","DOI":"10.1007\/978-3-031-19809-0_36"},{"key":"6_CR51","doi-asserted-by":"crossref","unstructured":"Wang, Z., et al.: Learning to prompt for continual learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 139\u2013149 (2022)","DOI":"10.1109\/CVPR52688.2022.00024"},{"key":"6_CR52","unstructured":"Wu, Y., et al.: Large scale incremental learning. arXiv preprint arXiv:1905.13260 (2019)"},{"key":"6_CR53","doi-asserted-by":"crossref","unstructured":"Yan, S., Xie, J., He, X.: DER: dynamically expandable representation for class incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3014\u20133023 (2021)","DOI":"10.1109\/CVPR46437.2021.00303"},{"key":"6_CR54","unstructured":"Zenke, F., Poole, B., Ganguli, S.: Continual learning through synaptic intelligence. In: International Conference on Machine Learning, pp. 3987\u20133995. PMLR (2017)"},{"key":"6_CR55","doi-asserted-by":"crossref","unstructured":"Zhang, G., Wang, L., Kang, G., Chen, L., Wei, Y.: SLCA: slow learner with classifier alignment for continual learning on a pre-trained model. arXiv preprint arXiv:2303.05118 (2023)","DOI":"10.1109\/ICCV51070.2023.01754"},{"key":"6_CR56","doi-asserted-by":"crossref","unstructured":"Zhou, D.W., et\u00a0al.: Revisiting class-incremental learning with pre-trained models: generalizability and adaptivity are all you need. arXiv (2023)","DOI":"10.1007\/s11263-024-02218-0"},{"key":"6_CR57","doi-asserted-by":"crossref","unstructured":"Zhu, F., Zhang, X.Y., Wang, C., Yin, F., Liu, C.L.: Prototype augmentation and self-supervision for incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5871\u20135880 (2021)","DOI":"10.1109\/CVPR46437.2021.00581"},{"key":"6_CR58","doi-asserted-by":"crossref","unstructured":"Zhu, K., Zhai, W., Cao, Y., Luo, J., Zha, Z.J.: Self-sustaining representation expansion for non-exemplar class-incremental learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9296\u20139305 (2022)","DOI":"10.1109\/CVPR52688.2022.00908"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73013-9_6","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T08:27:01Z","timestamp":1732609621000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73013-9_6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,27]]},"ISBN":["9783031730122","9783031730139"],"references-count":58,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73013-9_6","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,27]]},"assertion":[{"value":"27 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}