{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:33:00Z","timestamp":1777656780303,"version":"3.51.4"},"publisher-location":"Cham","reference-count":56,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726835","type":"print"},{"value":"9783031726842","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72684-2_22","type":"book-chapter","created":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:10:15Z","timestamp":1730574615000},"page":"383-400","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["ActionSwitch: Class-Agnostic Detection of\u00a0Simultaneous Actions in\u00a0Streaming Videos"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1571-4359","authenticated-orcid":false,"given":"Hyolim","family":"Kang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8629-3929","authenticated-orcid":false,"given":"Jeongseok","family":"Hyun","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0418-900X","authenticated-orcid":false,"given":"Joungbin","family":"An","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5867-0782","authenticated-orcid":false,"given":"Youngjae","family":"Yu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8512-216X","authenticated-orcid":false,"given":"Seon Joo","family":"Kim","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,3]]},"reference":[{"key":"22_CR1","doi-asserted-by":"crossref","unstructured":"An, J., Kang, H., Han, S.H., Yang, M.H., Kim, S.J.: Miniroad: minimal rnn framework for online action detection. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2023)","DOI":"10.1109\/ICCV51070.2023.00949"},{"key":"22_CR2","doi-asserted-by":"crossref","unstructured":"Bar, A., et al.: Detreg: unsupervised pretraining with region priors for object detection. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022)","DOI":"10.1109\/CVPR52688.2022.01420"},{"key":"22_CR3","doi-asserted-by":"crossref","unstructured":"Bodla, N., Singh, B., Chellappa, R., Davis, L.S.: Soft-nms \u2013 improving object detection with one line of code. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2017)","DOI":"10.1109\/ICCV.2017.593"},{"key":"22_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"22_CR5","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo vadis, action recognition? A new model and the kinetics dataset. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"22_CR6","doi-asserted-by":"crossref","unstructured":"Chen, J., Mittal, G., Yu, Y., Kong, Y., Chen, M.: Gatehub: gated history unit with background suppression for online action detection. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022)","DOI":"10.1109\/CVPR52688.2022.01930"},{"key":"22_CR7","unstructured":"Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. In: NIPS 2014 Workshop on Deep Learning, December 2014"},{"key":"22_CR8","unstructured":"Dai, J., Li, Y., He, K., Sun, J.: R-fcn: object detection via region-based fully convolutional networks. Advances in Neural Information Processing Systems (NeurIPS) (2016)"},{"key":"22_CR9","unstructured":"Damen, D., et\u00a0al.: Rescaling egocentric vision. arXiv preprint arXiv:2006.13256 (2020)"},{"key":"22_CR10","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"269","DOI":"10.1007\/978-3-319-46454-1_17","volume-title":"Computer Vision \u2013 ECCV 2016","author":"R De Geest","year":"2016","unstructured":"De Geest, R., Gavves, E., Ghodrati, A., Li, Z., Snoek, C., Tuytelaars, T.: Online action detection. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9909, pp. 269\u2013284. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46454-1_17"},{"key":"22_CR11","doi-asserted-by":"crossref","unstructured":"Eun, H., Moon, J., Park, J., Jung, C., Kim, C.: Learning to discriminate information for online action detection. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.00089"},{"key":"22_CR12","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Fan, H., Malik, J., He, K.: Slowfast networks for video recognition. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2019)","DOI":"10.1109\/ICCV.2019.00630"},{"key":"22_CR13","doi-asserted-by":"crossref","unstructured":"Gao, J., Yang, Z., Nevatia, R.: Red: reinforced encoder-decoder networks for action anticipation. In: The British Machine Vision Conference (BMVC) (2017)","DOI":"10.5244\/C.31.92"},{"key":"22_CR14","doi-asserted-by":"crossref","unstructured":"Gao, M., Xu, M., Davis, L.S., Socher, R., Xiong, C.: Startnet: online detection of action start in untrimmed videos. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2019)","DOI":"10.1109\/ICCV.2019.00564"},{"key":"22_CR15","doi-asserted-by":"crossref","unstructured":"Gao, M., Zhou, Y., Xu, R., Socher, R., Xiong, C.: Woad: weakly supervised online action detection in untrimmed videos. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2021)","DOI":"10.1109\/CVPR46437.2021.00195"},{"key":"22_CR16","doi-asserted-by":"crossref","unstructured":"Girshick, R.: Fast r-cnn. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2015)","DOI":"10.1109\/ICCV.2015.169"},{"key":"22_CR17","unstructured":"Gu, X., Lin, T.Y., Kuo, W., Cui, Y.: Open-vocabulary object detection via vision and language knowledge distillation. In: ICLR (2021)"},{"key":"22_CR18","doi-asserted-by":"crossref","unstructured":"Jaiswal, A., Wu, Y., Natarajan, P., Natarajan, P.: Class-agnostic object detection. In: IEEE Winter Conference on Applications of Computer Vision (WACV) (2021)","DOI":"10.1109\/WACV48630.2021.00096"},{"key":"22_CR19","unstructured":"Jiang, Y.G., et al.: THUMOS challenge: action recognition with a large number of classes (2014). http:\/\/crcv.ucf.edu\/THUMOS14\/"},{"key":"22_CR20","doi-asserted-by":"crossref","unstructured":"Kang, H., Kim, K., Ko, Y., Kim, S.J.: Cag-qil: context-aware actionness grouping via q imitation learning for online temporal action localization. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.01347"},{"key":"22_CR21","doi-asserted-by":"crossref","unstructured":"Kim, D., Lin, T.Y., Angelova, A., Kweon, I.S., Kuo, W.: Learning open-world object proposals without learning to classify. IEEE Robot. Autom. Lett. (2022)","DOI":"10.1109\/LRA.2022.3146922"},{"key":"22_CR22","doi-asserted-by":"crossref","unstructured":"Kim, J., Misu, T., Chen, Y.T., Tawari, A., Canny, J.: Grounding human-to-vehicle advice for self-driving vehicles. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2019)","DOI":"10.1109\/CVPR.2019.01084"},{"key":"22_CR23","doi-asserted-by":"publisher","unstructured":"Kim, Y.H., Kang, H., Kim, S.J.: A sliding window scheme for online temporal action localization. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision \u2013 ECCV 2022. ECCV 2022. LNCS, vol. 13694, pp. 653\u2013669. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19830-4_37","DOI":"10.1007\/978-3-031-19830-4_37"},{"key":"22_CR24","doi-asserted-by":"crossref","unstructured":"Kim, Y.H., Nam, S., Kim, S.J.: Temporally smooth online action detection using cycle-consistent future anticipation. Pattern Recognit. (2021)","DOI":"10.1016\/j.patcog.2021.107954"},{"key":"22_CR25","doi-asserted-by":"crossref","unstructured":"Kim, Y.H., Nam, S., Kim, S.J.: 2PESNET: towards online processing of temporal action localization. Pattern Recognit. (2022)","DOI":"10.1016\/j.patcog.2022.108871"},{"key":"22_CR26","unstructured":"Kuhn, H.W.: The Hungarian method for the assignment problem. Nav. Res. Logist. Q. (1955)"},{"key":"22_CR27","unstructured":"Lee, Y.: Scaling Robot Learning with Skills. Ph.D. thesis, University of Southern California, Viterbi School of Engineering (2022)"},{"key":"22_CR28","doi-asserted-by":"crossref","unstructured":"Lin, T., Liu, X., Li, X., Ding, E., Wen, S.: Bmn: boundary-matching network for temporal action proposal generation. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2019)","DOI":"10.1109\/ICCV.2019.00399"},{"key":"22_CR29","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1007\/978-3-030-01225-0_1","volume-title":"Computer Vision \u2013 ECCV 2018","author":"T Lin","year":"2018","unstructured":"Lin, T., Zhao, X., Su, H., Wang, C., Yang, M.: BSN: boundary sensitive network for temporal action proposal generation. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11208, pp. 3\u201321. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01225-0_1"},{"key":"22_CR30","doi-asserted-by":"crossref","unstructured":"Liu, X., Hu, Y., Bai, S., Ding, F., Bai, X., Torr, P.H.: Multi-shot temporal event localization: a benchmark. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2021)","DOI":"10.1109\/CVPR46437.2021.01241"},{"key":"22_CR31","doi-asserted-by":"publisher","first-page":"5427","DOI":"10.1109\/TIP.2022.3195321","volume":"31","author":"X Liu","year":"2022","unstructured":"Liu, X., et al.: End-to-end temporal action detection with transformer. IEEE Trans. Image Process. 31, 5427\u20135441 (2022)","journal-title":"IEEE Trans. Image Process."},{"key":"22_CR32","doi-asserted-by":"publisher","first-page":"6937","DOI":"10.1109\/TIP.2022.3217368","volume":"31","author":"Y Liu","year":"2022","unstructured":"Liu, Y., Wang, L., Wang, Y., Ma, X., Qiao, Y.: Fineaction: a fine-grained video dataset for temporal action localization. IEEE Trans. Image Process. 31, 6937\u20136950 (2022)","journal-title":"IEEE Trans. Image Process."},{"key":"22_CR33","doi-asserted-by":"publisher","unstructured":"Maaz, M., Rasheed, H., Khan, S., Khan, F.S., Anwer, R.M., Yang, M.H.: Class-agnostic object detection with multi-modal transformer. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision \u2013 ECCV 2022. ECCV 2022. LNCS, vol. 13670, pp. 512\u2013531. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20080-9_30","DOI":"10.1007\/978-3-031-20080-9_30"},{"key":"22_CR34","doi-asserted-by":"crossref","unstructured":"Rasheed, H., Khattak, M.U., Maaz, M., Khan, S., Khan, F.S.: Fine-tuned clip models are efficient video learners. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.00633"},{"key":"22_CR35","doi-asserted-by":"crossref","unstructured":"Shou, M.Z., Lei, S.W., Wang, W., Ghadiyaram, D., Feiszli, M.: Generic event boundary detection: a benchmark for event segmentation. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.00797"},{"key":"22_CR36","doi-asserted-by":"crossref","unstructured":"Shou, Z., et al.: Online detection of action start in untrimmed, streaming videos. In: European Conference on Computer Vision (ECCV) (2018)","DOI":"10.1007\/978-3-030-01219-9_33"},{"key":"22_CR37","doi-asserted-by":"crossref","unstructured":"Su, H., Gan, W., Wu, W., Qiao, Y., Yan, J.: Bsn++: complementary boundary regressor with scale-balanced relation modeling for temporal action proposal generation. In: Association for the Advancement of Artificial Intelligence (AAAI) (2021)","DOI":"10.1609\/aaai.v35i3.16363"},{"key":"22_CR38","unstructured":"Sutton, R.S., Barto, A.G.: Reinforcement Learning: An Introduction (2018)"},{"key":"22_CR39","doi-asserted-by":"crossref","unstructured":"Tan, J., Tang, J., Wang, L., Wu, G.: Relaxed transformer decoders for direct action proposal generation. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.01327"},{"key":"22_CR40","doi-asserted-by":"crossref","unstructured":"Tan, M., Pang, R., Le, Q.V.: Efficientdet: scalable and efficient object detection. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.01079"},{"key":"22_CR41","unstructured":"Tang, T.N., Park, J., Kim, K., Sohn, K.: Simon: a simple framework for online temporal action localization. arXiv preprint arXiv:2211.04905 (2022)"},{"key":"22_CR42","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems (NeurIPS) (2017)"},{"key":"22_CR43","doi-asserted-by":"publisher","unstructured":"Wang, L., et al.: Temporal segment networks: towards good practices for deep action recognition. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) Computer Vision \u2013 ECCV 2016. ECCV 2016. LNCS, vol. 9912. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46484-8_2","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"22_CR44","doi-asserted-by":"crossref","unstructured":"Wang, W., Feiszli, M., Wang, H., Malik, J., Tran, D.: Open-world instance segmentation: exploiting pseudo ground truth from learned pairwise affinity. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022)","DOI":"10.1109\/CVPR52688.2022.00438"},{"key":"22_CR45","doi-asserted-by":"crossref","unstructured":"Wang, X., et al.: OadTR: online action detection with transformers. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.00747"},{"key":"22_CR46","doi-asserted-by":"crossref","unstructured":"Wu, X., Zhu, F., Zhao, R., Li, H.: Cora: adapting clip for open-vocabulary detection with region prompting and anchor pre-matching. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7031\u20137040 (2023)","DOI":"10.1109\/CVPR52729.2023.00679"},{"key":"22_CR47","doi-asserted-by":"crossref","unstructured":"Xu, M., Zhao, C., Rojas, D.S., Thabet, A., Ghanem, B.: G-TAD: sub-graph localization for temporal action detection. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.01017"},{"key":"22_CR48","doi-asserted-by":"crossref","unstructured":"Xu, M., Gao, M., Chen, Y.T., Davis, L.S., Crandall, D.J.: Temporal recurrent networks for online action detection. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2019)","DOI":"10.1109\/ICCV.2019.00563"},{"key":"22_CR49","unstructured":"Xu, M., et al.: Long short-term transformer for online action detection. Adv. Neural Inf. Process. Syst. (NeurIPS) (2021)"},{"key":"22_CR50","doi-asserted-by":"crossref","unstructured":"Yang, L., Han, J., Zhang, D.: Colar: effective and efficient online action detection by consulting exemplars. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022)","DOI":"10.1109\/CVPR52688.2022.00316"},{"key":"22_CR51","doi-asserted-by":"crossref","unstructured":"Yeung, S., Russakovsky, O., Jin, N., Andriluka, M., Mori, G., Fei-Fei, L.: Every moment counts: dense detailed labeling of actions in complex videos. Int. J. Comput. Vis. (IJCV) (2018)","DOI":"10.1007\/s11263-017-1013-y"},{"key":"22_CR52","doi-asserted-by":"crossref","unstructured":"Zeng, R., et al.: Graph convolutional networks for temporal action localization. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2019)","DOI":"10.1109\/ICCV.2019.00719"},{"key":"22_CR53","doi-asserted-by":"publisher","unstructured":"Zhang, CL., Wu, J., Li, Y.: ActionFormer: localizing moments of actions with transformers. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision \u2013 ECCV 2022. ECCV 2022. LNCS, vol. 13664, pp. 492\u2013510. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19772-7_29","DOI":"10.1007\/978-3-031-19772-7_29"},{"key":"22_CR54","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"539","DOI":"10.1007\/978-3-030-58598-3_32","volume-title":"Computer Vision \u2013 ECCV 2020","author":"P Zhao","year":"2020","unstructured":"Zhao, P., Xie, L., Ju, C., Zhang, Y., Wang, Y., Tian, Q.: Bottom-up temporal action localization with mutual regularization. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12353, pp. 539\u2013555. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58598-3_32"},{"key":"22_CR55","doi-asserted-by":"publisher","unstructured":"Zhao, Y., Kr\u00e4henb\u00fchl, P.: Real-time online video detection with temporal smoothing transformers. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision \u2013 ECCV 2022. ECCV 2022. LNCS, vol. 13694, pp. 485\u2013502. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19830-4_28","DOI":"10.1007\/978-3-031-19830-4_28"},{"key":"22_CR56","doi-asserted-by":"publisher","unstructured":"Zhou, X., Girdhar, R., Joulin, A., Kr\u00e4henb\u00fchl, P., Misra, I.: Detecting twenty-thousand classes using image-level supervision. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision \u2013 ECCV 2022. ECCV 2022. LNCS, vol. 13669, pp. 350\u2013368. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20077-9_21","DOI":"10.1007\/978-3-031-20077-9_21"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72684-2_22","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:11:18Z","timestamp":1730574678000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72684-2_22"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,3]]},"ISBN":["9783031726835","9783031726842"],"references-count":56,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72684-2_22","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,3]]},"assertion":[{"value":"3 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}