{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T15:33:17Z","timestamp":1771515197101,"version":"3.50.1"},"publisher-location":"Cham","reference-count":65,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031729515","type":"print"},{"value":"9783031729522","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72952-2_18","type":"book-chapter","created":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T05:02:02Z","timestamp":1727672522000},"page":"305-322","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":14,"title":["DyFADet: Dynamic Feature Aggregation for\u00a0Temporal Action Detection"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8379-4915","authenticated-orcid":false,"given":"Le","family":"Yang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0000-4896-3293","authenticated-orcid":false,"given":"Ziwei","family":"Zheng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5706-8784","authenticated-orcid":false,"given":"Yizeng","family":"Han","sequence":"additional","affiliation":[]},{"given":"Hao","family":"Cheng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7361-9283","authenticated-orcid":false,"given":"Shiji","family":"Song","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7251-0988","authenticated-orcid":false,"given":"Gao","family":"Huang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7566-1634","authenticated-orcid":false,"given":"Fan","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,1]]},"reference":[{"key":"18_CR1","unstructured":"Agarap, A.F.: Deep learning using rectified linear units (ReLU). arXiv preprint arXiv:1803.08375 (2018)"},{"key":"18_CR2","doi-asserted-by":"crossref","unstructured":"Alwassel, H., Giancola, S., Ghanem, B.: TSP: temporally-sensitive pretraining of video encoders for localization tasks. In: ICCV (2021)","DOI":"10.1109\/ICCVW54120.2021.00356"},{"key":"18_CR3","unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv preprint arXiv:1607.06450 (2016)"},{"key":"18_CR4","doi-asserted-by":"crossref","unstructured":"Bodla, N., Singh, B., Chellappa, R., Davis, L.S.: Soft-NMS\u2013improving object detection with one line of code. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.593"},{"key":"18_CR5","doi-asserted-by":"crossref","unstructured":"Caba\u00a0Heilbron, F., Escorcia, V., Ghanem, B., Carlos\u00a0Niebles, J.: ActivityNet: a large-scale video benchmark for human activity understanding. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7298698"},{"key":"18_CR6","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"18_CR7","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo vadis, action recognition? A new model and the kinetics dataset. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"18_CR8","doi-asserted-by":"crossref","unstructured":"Chen, G., Zheng, Y.D., Wang, L., Lu, T.: DCAN: improving temporal action detection via dual context aggregation. In: AAAI (2022)","DOI":"10.1609\/aaai.v36i1.19900"},{"key":"18_CR9","doi-asserted-by":"crossref","unstructured":"Chen, Y., Dai, X., Liu, M., Chen, D., Yuan, L., Liu, Z.: Dynamic convolution: attention over convolution kernels. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01104"},{"key":"18_CR10","doi-asserted-by":"publisher","first-page":"503","DOI":"10.1007\/978-3-031-19830-4_29","volume-title":"ECCV","author":"F Cheng","year":"2022","unstructured":"Cheng, F., Bertasius, G.: Tallformer: temporal action localization with a long-memory transformer. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13694, pp. 503\u2013521. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19830-4_29"},{"key":"18_CR11","unstructured":"Spconv Contributors: Spconv: spatially sparse convolution library (2022). https:\/\/github.com\/traveller59\/spconv"},{"key":"18_CR12","doi-asserted-by":"crossref","unstructured":"Dai, J., et al.: Deformable convolutional networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.89"},{"key":"18_CR13","doi-asserted-by":"publisher","first-page":"33","DOI":"10.1007\/s11263-021-01531-2","volume":"130","author":"D Damen","year":"2022","unstructured":"Damen, D., et al.: Rescaling egocentric vision: collection, pipeline and challenges for EPIC-KITCHENS-100. IJCV 130, 33\u201355 (2022)","journal-title":"IJCV"},{"key":"18_CR14","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Fan, H., Malik, J., He, K.: SlowFast networks for video recognition. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00630"},{"key":"18_CR15","unstructured":"Grauman, K., et al.: Ego4D: around the world in 3,000 hours of egocentric video. In: CVPR (2022)"},{"issue":"11","key":"18_CR16","doi-asserted-by":"publisher","first-page":"7436","DOI":"10.1109\/TPAMI.2021.3117837","volume":"44","author":"Y Han","year":"2021","unstructured":"Han, Y., Huang, G., Song, S., Yang, L., Wang, H., Wang, Y.: Dynamic neural networks: a survey. IEEE TPAMI 44(11), 7436\u20137456 (2021)","journal-title":"IEEE TPAMI"},{"key":"18_CR17","doi-asserted-by":"crossref","unstructured":"Han, Y., et al.: Latency-aware unified dynamic networks for efficient image recognition. IEEE TPAMI (2024)","DOI":"10.1109\/TPAMI.2024.3393530"},{"key":"18_CR18","unstructured":"Han, Y., et al.: Latency-aware spatial-wise dynamic networks. In: NeurIPS (2022)"},{"key":"18_CR19","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00745"},{"key":"18_CR20","unstructured":"Huang, G., Chen, D., Li, T., Wu, F., van\u00a0der Maaten, L., Weinberger, K.: Multi-scale dense networks for resource efficient image classification. In: ICLR (2018)"},{"issue":"4","key":"18_CR21","doi-asserted-by":"crossref","first-page":"4605","DOI":"10.1109\/TPAMI.2022.3199449","volume":"45","author":"G Huang","year":"2023","unstructured":"Huang, G., et al.: Glance and focus networks for dynamic visual recognition. IEEE TPAMI 45(4), 4605\u20134621 (2023)","journal-title":"IEEE TPAMI"},{"key":"18_CR22","unstructured":"Jiang, Y.G., et al.: THUMOS challenge: action recognition with a large number of classes (2014). http:\/\/crcv.ucf.edu\/THUMOS14\/"},{"key":"18_CR23","doi-asserted-by":"crossref","unstructured":"Lei, P., Todorovic, S.: Temporal deformable residual networks for action segmentation in videos. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00705"},{"key":"18_CR24","volume":"98","author":"J Li","year":"2020","unstructured":"Li, J., Liu, X., Zhang, M., Wang, D.: Spatio-temporal deformable 3D convnets with attention for action recognition. PR 98, 107037 (2020)","journal-title":"PR"},{"key":"18_CR25","doi-asserted-by":"crossref","unstructured":"Lin, C., et al.: Learning salient boundary feature for anchor-free temporal action localization. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00333"},{"key":"18_CR26","unstructured":"Lin, K.Q., et al.: Egocentric video-language pretraining. In: NeurIPS (2022)"},{"key":"18_CR27","doi-asserted-by":"crossref","unstructured":"Lin, T., Liu, X., Li, X., Ding, E., Wen, S.: BMN: boundary-matching network for temporal action proposal generation. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00399"},{"key":"18_CR28","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1007\/978-3-030-01225-0_1","volume-title":"Computer Vision \u2013 ECCV 2018","author":"T Lin","year":"2018","unstructured":"Lin, T., Zhao, X., Su, H., Wang, C., Yang, M.: BSN: boundary sensitive network for temporal action proposal generation. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11208, pp. 3\u201321. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01225-0_1"},{"key":"18_CR29","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., Goyal, P., Girshick, R., He, K., Doll\u00e1r, P.: Focal loss for dense object detection. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.324"},{"key":"18_CR30","doi-asserted-by":"crossref","unstructured":"Liu, Q., Wang, Z.: Progressive boundary refinement network for temporal action detection. In: AAAI (2020)","DOI":"10.1609\/aaai.v34i07.6829"},{"key":"18_CR31","first-page":"5427","volume":"31","author":"X Liu","year":"2022","unstructured":"Liu, X., et al.: End-to-end temporal action detection with transformer. IEEE TIP 31, 5427\u20135441 (2022)","journal-title":"IEEE TIP"},{"key":"18_CR32","first-page":"6937","volume":"31","author":"Y Liu","year":"2022","unstructured":"Liu, Y., Wang, L., Wang, Y., Ma, X., Qiao, Y.: FineAction: a fine-grained video dataset for temporal action localization. IEEE TIP 31, 6937\u20136950 (2022)","journal-title":"IEEE TIP"},{"key":"18_CR33","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101 (2017)"},{"key":"18_CR34","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"776","DOI":"10.1007\/978-3-030-58555-6_46","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Ma","year":"2020","unstructured":"Ma, N., Zhang, X., Huang, J., Sun, J.: WeightNet: revisiting the design space of weight networks. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12360, pp. 776\u2013792. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58555-6_46"},{"key":"18_CR35","doi-asserted-by":"crossref","unstructured":"Mac, K.N.C., Joshi, D., Yeh, R.A., Xiong, J., Feris, R.S., Do, M.N.: Learning motion in feature space: locally-consistent deformable convolution networks for fine-grained action detection. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00638"},{"key":"18_CR36","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"86","DOI":"10.1007\/978-3-030-58571-6_6","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y Meng","year":"2020","unstructured":"Meng, Y., et al.: AR-Net: adaptive frame resolution for efficient action recognition. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12352, pp. 86\u2013104. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58571-6_6"},{"key":"18_CR37","doi-asserted-by":"crossref","unstructured":"Qing, Z., et al.: Temporal context aggregation network for temporal action proposal refinement. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00055"},{"key":"18_CR38","doi-asserted-by":"crossref","unstructured":"Shao, J., Wang, X., Quan, R., Zheng, J., Yang, J., Yang, Y.: Action sensitivity learning for temporal action localization. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01238"},{"key":"18_CR39","doi-asserted-by":"crossref","unstructured":"Shi, D., Zhong, Y., Cao, Q., Ma, L., Li, J., Tao, D.: TriDet: temporal action detection with relative boundary modeling. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01808"},{"key":"18_CR40","doi-asserted-by":"publisher","first-page":"105","DOI":"10.1007\/978-3-031-20080-9_7","volume-title":"ECCV 2022","author":"D Shi","year":"2022","unstructured":"Shi, D., et al.: ReAct: temporal action detection with relational queries. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13670, pp. 105\u2013121. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-20080-9_7"},{"key":"18_CR41","unstructured":"Song, L., et al.: Fine-grained dynamic head for object detection. In: NeurIPS (2020)"},{"key":"18_CR42","doi-asserted-by":"crossref","unstructured":"Sridhar, D., Quader, N., Muralidharan, S., Li, Y., Dai, P., Lu, J.: Class semantics-based attention for action detection. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01348"},{"key":"18_CR43","doi-asserted-by":"crossref","unstructured":"Tan, J., Tang, J., Wang, L., Wu, G.: Relaxed transformer decoders for direct action proposal generation. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01327"},{"key":"18_CR44","unstructured":"Tang, T.N., Kim, K., Sohn, K.: TemporalMaxer: maximize temporal context with only max pooling for temporal action localization. arXiv preprint arXiv:2303.09055 (2023)"},{"key":"18_CR45","doi-asserted-by":"crossref","unstructured":"Wang, L., et al.: VideoMAE V2: scaling video masked autoencoders with dual masking. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01398"},{"key":"18_CR46","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"20","DOI":"10.1007\/978-3-319-46484-8_2","volume-title":"Computer Vision \u2013 ECCV 2016","author":"L Wang","year":"2016","unstructured":"Wang, L., et al.: Temporal segment networks: towards good practices for deep action recognition. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9912, pp. 20\u201336. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46484-8_2"},{"key":"18_CR47","unstructured":"Wang, Y., et al.: InternVideo: general video foundation models via generative and discriminative learning. arXiv preprint arXiv:2212.03191 (2022)"},{"key":"18_CR48","doi-asserted-by":"crossref","unstructured":"Wang, Y., Chen, Z., Jiang, H., Song, S., Han, Y., Huang, G.: Adaptive focus for efficient video recognition. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01594"},{"key":"18_CR49","doi-asserted-by":"publisher","first-page":"358","DOI":"10.1007\/978-3-031-19830-4_21","volume-title":"ECCV","author":"Y Weng","year":"2022","unstructured":"Weng, Y., Pan, Z., Han, M., Chang, X., Zhuang, B.: An efficient spatio-temporal pyramid transformer for action detection. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13694, pp. 358\u2013375. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19830-4_21"},{"key":"18_CR50","doi-asserted-by":"crossref","unstructured":"Wu, B., et al.: Shift: a zero flop, zero parameter alternative to spatial convolutions. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00951"},{"key":"18_CR51","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1007\/978-3-030-01261-8_1","volume-title":"Computer Vision \u2013 ECCV 2018","author":"Y Wu","year":"2018","unstructured":"Wu, Y., He, K.: Group normalization. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11217, pp. 3\u201319. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01261-8_1"},{"key":"18_CR52","doi-asserted-by":"crossref","unstructured":"Wu, Z., Xiong, C., Ma, C.Y., Socher, R., Davis, L.S.: AdaFrame: adaptive frame selection for fast video recognition. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00137"},{"key":"18_CR53","unstructured":"Xu, M., Perez\u00a0Rua, J.M., Zhu, X., Ghanem, B., Martinez, B.: Low-fidelity video encoder optimization for temporal action localization. In: NeurIPS (2021)"},{"key":"18_CR54","doi-asserted-by":"crossref","unstructured":"Xu, M., Zhao, C., Rojas, D.S., Thabet, A., Ghanem, B.: G-TAD: sub-graph localization for temporal action detection. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01017"},{"key":"18_CR55","unstructured":"Yang, B., Bender, G., Le, Q.V., Ngiam, J.: CondConv: conditionally parameterized convolutions for efficient inference. In: NeurIPS (2019)"},{"key":"18_CR56","doi-asserted-by":"crossref","unstructured":"Yang, L., Han, Y., Chen, X., Song, S., Dai, J., Huang, G.: Resolution adaptive networks for efficient inference. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00244"},{"key":"18_CR57","first-page":"8535","volume":"29","author":"L Yang","year":"2020","unstructured":"Yang, L., Peng, H., Zhang, D., Fu, J., Han, J.: Revisiting anchor mechanisms for temporal action localization. IEEE TIP 29, 8535\u20138548 (2020)","journal-title":"IEEE TIP"},{"key":"18_CR58","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2023.103692","volume":"232","author":"M Yang","year":"2023","unstructured":"Yang, M., Chen, G., Zheng, Y.D., Lu, T., Wang, L.: BasicTAD: an astounding RGB-only baseline for temporal action detection. Comput. Vis. Image Underst. 232, 103692 (2023)","journal-title":"Comput. Vis. Image Underst."},{"key":"18_CR59","doi-asserted-by":"publisher","first-page":"492","DOI":"10.1007\/978-3-031-19772-7_29","volume-title":"ECCV 2022","author":"CL Zhang","year":"2022","unstructured":"Zhang, C.L., Wu, J., Li, Y.: ActionFormer: localizing moments of actions with transformers. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13664, pp. 492\u2013510. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19772-7_29"},{"key":"18_CR60","doi-asserted-by":"crossref","unstructured":"Zhao, C., Thabet, A.K., Ghanem, B.: Video self-stitching graph network for temporal action localization. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01340"},{"key":"18_CR61","doi-asserted-by":"crossref","unstructured":"Zhao, H., Torralba, A., Torresani, L., Yan, Z.: HACS: human action clips and segments dataset for recognition and temporal localization. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00876"},{"key":"18_CR62","doi-asserted-by":"crossref","unstructured":"Zhao, Y., Xiong, Y., Wang, L., Wu, Z., Tang, X., Lin, D.: Temporal action detection with structured segment networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.317"},{"key":"18_CR63","doi-asserted-by":"crossref","unstructured":"Zheng, Z., Wang, P., Liu, W., Li, J., Ye, R., Ren, D.: Distance-IoU Loss: faster and better learning for bounding box regression. In: AAAI (2020)","DOI":"10.1609\/aaai.v34i07.6999"},{"key":"18_CR64","unstructured":"Zhou, C., Loy, C.C., Dai, B.: Interpret vision transformers as convnets with dynamic convolutions. arXiv preprint arXiv:2309.10713 (2023)"},{"key":"18_CR65","doi-asserted-by":"crossref","unstructured":"Zhu, Z., Tang, W., Wang, L., Zheng, N., Hua, G.: Enriching local and global contexts for temporal action localization. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01326"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72952-2_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T05:11:50Z","timestamp":1727673110000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72952-2_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,1]]},"ISBN":["9783031729515","9783031729522"],"references-count":65,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72952-2_18","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,1]]},"assertion":[{"value":"1 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}