{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,13]],"date-time":"2026-02-13T23:20:01Z","timestamp":1771024801146,"version":"3.50.1"},"publisher-location":"Cham","reference-count":67,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031729393","type":"print"},{"value":"9783031729409","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,17]],"date-time":"2024-11-17T00:00:00Z","timestamp":1731801600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,17]],"date-time":"2024-11-17T00:00:00Z","timestamp":1731801600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72940-9_13","type":"book-chapter","created":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T20:43:00Z","timestamp":1731789780000},"page":"222-239","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":27,"title":["Robo-ABC: Affordance Generalization Beyond Categories via Semantic Correspondence for Robot Manipulation"],"prefix":"10.1007","author":[{"given":"Yuanchen","family":"Ju","sequence":"first","affiliation":[]},{"given":"Kaizhe","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Guowei","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Gu","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Mingrun","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Huazhe","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,17]]},"reference":[{"key":"13_CR1","unstructured":"Agarwal, A., Uppal, S., Shaw, K., Pathak, D.: Dexterous functional grasping. In: 7th Annual Conference on Robot Learning (2023)"},{"key":"13_CR2","unstructured":"Amir, S., Gandelsman, Y., Bagon, S., Dekel, T.: Deep ViT features as dense visual descriptors 2(3), 4 (2021). arXiv preprint arXiv:2112.05814"},{"key":"13_CR3","doi-asserted-by":"crossref","unstructured":"Bahl, S., Gupta, A., Pathak, D.: Human-to-robot imitation in the wild. RSS (2022)","DOI":"10.15607\/RSS.2022.XVIII.026"},{"key":"13_CR4","doi-asserted-by":"crossref","unstructured":"Bahl, S., Mendonca, R., Chen, L., Jain, U., Pathak, D.: Affordances from human videos as a versatile representation for robotics. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13778\u201313790 (2023)","DOI":"10.1109\/CVPR52729.2023.01324"},{"key":"13_CR5","unstructured":"Brohan, A., et\u00a0al.: Do as I can, not as I say: grounding language in robotic affordances. In: Conference on Robot Learning, pp. 287\u2013318. PMLR (2023)"},{"key":"13_CR6","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"13_CR7","unstructured":"Cheng, K., Wu, R., Shen, Y., Ning, C., Zhan, G., Dong, H.: Learning environment-aware affordance for 3D articulated object manipulation under occlusions. arXiv preprint arXiv:2309.07510, 2023"},{"key":"13_CR8","doi-asserted-by":"crossref","unstructured":"Creem-Regehr, S.H., Lee, J.N.: Neural representations of graspable objects: are tools special? Cogn. Brain Res. 22(3), 457\u2013469 (2005)","DOI":"10.1016\/j.cogbrainres.2004.10.006"},{"key":"13_CR9","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"753","DOI":"10.1007\/978-3-030-01225-0_44","volume-title":"Computer Vision \u2013 ECCV 2018","author":"D Damen","year":"2018","unstructured":"Damen, D., et al.: Scaling egocentric vision: the epic-kitchens dataset. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11208, pp. 753\u2013771. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01225-0_44"},{"key":"13_CR10","doi-asserted-by":"crossref","unstructured":"Damen, D., et\u00a0al.: The epic-kitchens dataset: collection, challenges and baselines. IEEE Trans. Pattern Anal. Mach. Intell. 43(11), 4125\u20134141 (2020)","DOI":"10.1109\/TPAMI.2020.2991965"},{"key":"13_CR11","doi-asserted-by":"crossref","unstructured":"Damen, D., et\u00a0al.: Rescaling egocentric vision: collection, pipeline and challenges for epic-kitchens-100. Int. J. Comput. Vis. 1\u201323 (2022)","DOI":"10.1007\/s11263-021-01531-2"},{"key":"13_CR12","doi-asserted-by":"crossref","unstructured":"Di\u00a0Palo, N., Johns, E.: On the effectiveness of retrieval, alignment, and replay in manipulation. IEEE Rob. Autom. Lett. (2024)","DOI":"10.1109\/LRA.2024.3349832"},{"key":"13_CR13","doi-asserted-by":"crossref","unstructured":"Fang, H.-S.: Robust and efficient grasp perception in spatial and temporal domains. IEEE Trans. Rob. (2023)","DOI":"10.1109\/TRO.2023.3281153"},{"key":"13_CR14","doi-asserted-by":"crossref","unstructured":"Fang, K., Wu, T.-L., Yang, D., Savarese, S., Lim, J.J.: Demo2Vec: reasoning object affordances from online videos. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018","DOI":"10.1109\/CVPR.2018.00228"},{"key":"13_CR15","unstructured":"Gao, J., Hu, K., Xu, G., Xu, H.: Can pre-trained text-to-image models generate visual goals for reinforcement learning? arXiv preprint arXiv:2307.07837 (2023)"},{"key":"13_CR16","doi-asserted-by":"crossref","unstructured":"Geng, H., Li, Z., Geng, Y., Chen, J., Dong, H., Wang, H.: PartManip: learning cross-category generalizable part manipulation policy from point cloud observations. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2978\u20132988 (2023)","DOI":"10.1109\/CVPR52729.2023.00291"},{"key":"13_CR17","doi-asserted-by":"crossref","unstructured":"Geng, H., et al.: GAPartNet: cross-category domain-generalizable object perception and manipulation via generalizable and actionable parts. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7081\u20137091 (2023)","DOI":"10.1109\/CVPR52729.2023.00684"},{"key":"13_CR18","doi-asserted-by":"crossref","unstructured":"Geng, Y., An, B., Geng, H., Chen, Y., Yang, Y., Dong, H.: RLAfford: end-to-end affordance learning for robotic manipulation. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 5880\u20135886. IEEE (2023)","DOI":"10.1109\/ICRA48891.2023.10161571"},{"key":"13_CR19","doi-asserted-by":"crossref","unstructured":"Gibson, J.J.: The ecological approach to the visual perception of pictures. Leonardo 11(3), 227\u2013235 (1978)","DOI":"10.2307\/1574154"},{"key":"13_CR20","doi-asserted-by":"crossref","unstructured":"Goyal, M., Modi, S., Goyal, R., Gupta, S.: Human hands as probes for interactive object understanding. In: Computer Vision and Pattern Recognition (CVPR) (2022)","DOI":"10.1109\/CVPR52688.2022.00329"},{"key":"13_CR21","unstructured":"Grauman, K., et\u00a0al.: Ego4D: around the world in 3,000 hours of egocentric video. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18995\u201319012 (2022)"},{"key":"13_CR22","unstructured":"Hadjivelichkov, D., Zwane, S., Agapito, L., Deisenroth, M.P., Kanoulas, D.: One-shot transfer of affordance regions? Affcorrs! In: Conference on Robot Learning, pp. 550\u2013560. PMLR (2023)"},{"key":"13_CR23","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"220","DOI":"10.1007\/978-3-319-30285-0_18","volume-title":"Image and Video Technology \u2013 PSIVT 2015 Workshops","author":"M Hassan","year":"2016","unstructured":"Hassan, M., Dharmaratne, A.: Attribute based affordance detection from human-object interaction images. In: Huang, F., Sugimoto, A. (eds.) PSIVT 2015. LNCS, vol. 9555, pp. 220\u2013232. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-30285-0_18"},{"key":"13_CR24","unstructured":"Hedlin, E., et al.: Unsupervised semantic correspondence using stable diffusion. arXiv preprint arXiv:2305.15581 (2023)"},{"key":"13_CR25","doi-asserted-by":"crossref","unstructured":"Hou, Z., Yu, B., Qiao, Y., Peng, X., Tao, D.: Affordance transfer learning for human-object interaction detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 495\u2013504 (2021)","DOI":"10.1109\/CVPR46437.2021.00056"},{"key":"13_CR26","doi-asserted-by":"crossref","unstructured":"Jiang, Z., Jiang, H., Zhu, Y.: Doduo: dense visual correspondence from unsupervised semantic-aware flow. arXiv preprint arXiv:2309.15110 (2023)","DOI":"10.1109\/ICRA57147.2024.10611587"},{"key":"13_CR27","unstructured":"Kannan, A., Shaw, K., Bahl, S., Mannam, P., Pathak, D.: DEFT: dexterous fine-tuning for real-world hand policies. arXiv preprint arXiv:2310.19797 (2023)"},{"key":"13_CR28","doi-asserted-by":"crossref","unstructured":"Lai, Z., Purushwalkam, S., Gupta, A.: The functional correspondence problem. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 15772\u201315781 (2021)","DOI":"10.1109\/ICCV48922.2021.01548"},{"key":"13_CR29","doi-asserted-by":"crossref","unstructured":"Li, G., Jampani, V., Sun, D., Sevilla-Lara, L.: Locate: localize and transfer object parts for weakly supervised affordance grounding. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10922\u201310931 (2023)","DOI":"10.1109\/CVPR52729.2023.01051"},{"key":"13_CR30","doi-asserted-by":"crossref","unstructured":"Li, H., et al.: ViHOPE: visuotactile in-hand object 6d pose estimation with shape completion. IEEE Rob. Autom. Lett. (2023)","DOI":"10.1109\/LRA.2023.3313941"},{"key":"13_CR31","doi-asserted-by":"crossref","unstructured":"Li, P., et al.: GenDexGrasp: generalizable dexterous grasping. In: 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 8068\u20138074. IEEE (2023)","DOI":"10.1109\/ICRA48891.2023.10160667"},{"key":"13_CR32","unstructured":"Li, Y., Cheng, K., Wu, R., Shen, Y., Zhou, K., Dong, H.: MobileAfford: mobile robotic manipulation through differentiable affordance learning. In: 2nd Workshop on Mobile Manipulation and Embodied Intelligence at ICRA 2024 (2024)"},{"key":"13_CR33","unstructured":"Li, Y., Zhang, X., Wu, R., Zhang, Z., Geng, Y., Dong, H., He, Z.: UniDoorManip: learning universal door manipulation policy over large-scale and diverse door manipulation environments. arXiv preprint arXiv:2403.02604 (2024)"},{"key":"13_CR34","unstructured":"Li, Y.-L., et\u00a0al.: Discovering a variety of objects in spatio-temporal human-object interactions. arXiv preprint arXiv:2211.07501 (2022)"},{"key":"13_CR35","doi-asserted-by":"crossref","unstructured":"Liu, S., Tripathi, S., Majumdar, S., Wang, X.: Joint hand motion and interaction hotspots prediction from egocentric videos. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3282\u20133292 (2022)","DOI":"10.1109\/CVPR52688.2022.00328"},{"key":"13_CR36","unstructured":"Luo, G., Dunlap, L., Park, D.H., Holynski, A., Darrell, T.: Diffusion hyperfeatures: searching through time and space for semantic correspondence. In: Advances in Neural Information Processing Systems (2023)"},{"key":"13_CR37","doi-asserted-by":"crossref","unstructured":"Mandikal, P., Grauman, K.: Learning dexterous grasping with object-centric visual affordances. In: 2021 IEEE International Conference on Robotics and Automation (ICRA), pp. 6169\u20136176. IEEE (2021)","DOI":"10.1109\/ICRA48506.2021.9561802"},{"key":"13_CR38","unstructured":"Mandikal, P., Grauman, K.: DexVIP: learning dexterous grasping with human hand pose priors from video. In: Conference on Robot Learning, pp. 651\u2013661. PMLR (2022)"},{"key":"13_CR39","unstructured":"Medeiros, L.: lang-segment-anything. https:\/\/github.com\/luca-medeiros\/lang-segment-anything (2023)"},{"key":"13_CR40","doi-asserted-by":"crossref","unstructured":"Mendonca, R., Bahl, S., Pathak, D.: Structured world models from human videos. RSS (2023)","DOI":"10.15607\/RSS.2023.XIX.012"},{"key":"13_CR41","doi-asserted-by":"crossref","unstructured":"Mo, K., Guibas, L.J., Mukadam, M., Gupta, A., Tulsiani, S.: Where2Act: from pixels to actions for articulated 3D objects. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6813\u20136823 (2021)","DOI":"10.1109\/ICCV48922.2021.00674"},{"key":"13_CR42","unstructured":"Mo, K., Qin, Y., Xiang, F., Su, H., Guibas, L.: O2O-afford: annotation-free large-scale object-object affordance learning. In: Conference on Robot Learning, pp. 1666\u20131677. PMLR (2022)"},{"key":"13_CR43","doi-asserted-by":"crossref","unstructured":"Myers, A., Teo, C.L., Ferm\u00fcller, C., Aloimonos, Y.: Affordance detection of tool parts from geometric features. In: 2015 IEEE International Conference on Robotics and Automation (ICRA), pp. 1374\u20131381. IEEE (2015)","DOI":"10.1109\/ICRA.2015.7139369"},{"key":"13_CR44","doi-asserted-by":"crossref","unstructured":"Nagarajan, T., Feichtenhofer, C., Grauman, K.: Grounded human-object interaction hotspots from video. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8688\u20138697 (2019)","DOI":"10.1109\/ICCV.2019.00878"},{"key":"13_CR45","unstructured":"Nair, S., Rajeswaran, A., Kumar, V., Finn, C., Gupta, A.: R3M: a universal visual representation for robot manipulation. arXiv preprint arXiv:2203.12601 (2022)"},{"key":"13_CR46","unstructured":"Ning, C., Wu, R., Lu, H., Mo, K., Dong, H.: Where2Explore: few-shot affordance learning for unseen novel categories of articulated objects. arXiv preprint arXiv:2309.07473 (2023)"},{"key":"13_CR47","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"13_CR48","unstructured":"Rashid, A., et al.: Language embedded radiance fields for zero-shot task-oriented grasping. In: 7th Annual Conference on Robot Learning (2023)"},{"key":"13_CR49","unstructured":"Reed, S., et\u00a0al.: A generalist agent. arXiv preprint arXiv:2205.06175 (2022)"},{"key":"13_CR50","doi-asserted-by":"crossref","unstructured":"Saxen, F., Al-Hamadi, A.: Color-based skin segmentation: an evaluation of the state of the art. In: 2014 IEEE International Conference on Image Processing (ICIP), pp. 4467\u20134471. IEEE (2014)","DOI":"10.1109\/ICIP.2014.7025906"},{"key":"13_CR51","doi-asserted-by":"crossref","unstructured":"Shan, D., Geng, J., Shu, M., Fouhey, D.F.: Understanding human hands in contact at internet scale. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9869\u20139878 (2020)","DOI":"10.1109\/CVPR42600.2020.00989"},{"key":"13_CR52","unstructured":"Tang, L., Jia, M., Wang, Q., Phoo, C.P., Hariharan, B.: Emergent correspondence from image diffusion. arXiv preprint arXiv:2306.03881 (2023)"},{"key":"13_CR53","doi-asserted-by":"crossref","unstructured":"Wan, W., et al.: UniDexGrasp++: improving dexterous grasping policy learning via geometry-aware curriculum and iterative generalist-specialist learning. arXiv preprint arXiv:2304.00464 (2023)","DOI":"10.1109\/ICCV51070.2023.00360"},{"key":"13_CR54","unstructured":"Wang, Y.: $$d^3$$ fields: dynamic 3D descriptor fields for zero-shot generalizable robotic manipulation. arXiv preprint arXiv:2309.16118 (2023)"},{"key":"13_CR55","series-title":"LNCS","doi-asserted-by":"publisher","first-page":"90","DOI":"10.1007\/978-3-031-19818-2_6","volume-title":"ECCV 2022","author":"Y Wang","year":"2022","unstructured":"Wang, Y., et al.: AdaAfford: learning to adapt manipulation affordance for 3D articulated objects via few-shot interactions. In: ECCV 2022. LNCS, vol. 13689, pp. 90\u2013107. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19818-2_6"},{"key":"13_CR56","doi-asserted-by":"crossref","unstructured":"Wu, R., Lu, H., Wang, Y., Wang, Y., Dong, H.: UniGarmentManip: a unified framework for category-level garment manipulation via dense visual correspondence. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16340\u201316350 (2024)","DOI":"10.1109\/CVPR52733.2024.01546"},{"key":"13_CR57","doi-asserted-by":"crossref","unstructured":"Wu, R., Ning, C., Dong, H.: Learning foresightful dense visual affordance for deformable object manipulation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10947\u201310956 (2023)","DOI":"10.1109\/ICCV51070.2023.01005"},{"key":"13_CR58","unstructured":"Wu, R., et al.: VAT-Mart: learning visual action trajectory proposals for manipulating 3D articulated objects. arXiv preprint arXiv:2106.14440 (2021)"},{"key":"13_CR59","doi-asserted-by":"crossref","unstructured":"Xu, Y., et\u00a0al.: UniDexGrasp: universal robotic dexterous grasping via learning diverse proposal generation and goal-conditioned policy. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4737\u20134746 (2023)","DOI":"10.1109\/CVPR52729.2023.00459"},{"key":"13_CR60","doi-asserted-by":"crossref","unstructured":"Xue, Z., Yuan, Z., Wang, J., Wang, X., Gao, Y., Xu, H.: USEEK: unsupervised SE(3)-equivariant 3D keypoints for generalizable manipulation. In: 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 1715\u20131722. IEEE (2023)","DOI":"10.1109\/ICRA48891.2023.10160631"},{"key":"13_CR61","doi-asserted-by":"crossref","unstructured":"Xue, Z., et al.: ArrayBot: reinforcement learning for generalizable distributed manipulation through touch. arXiv preprint arXiv:2306.16857 (2023)","DOI":"10.1109\/ICRA57147.2024.10610350"},{"key":"13_CR62","unstructured":"Ye, W., et al.: Foundation reinforcement learning: towards embodied generalist agents with foundation prior assistance. arXiv preprint arXiv:2310.02635 (2023)"},{"key":"13_CR63","doi-asserted-by":"crossref","unstructured":"Ye, Y., et al.: Affordance diffusion: synthesizing hand-object interactions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22479\u201322489 (2023)","DOI":"10.1109\/CVPR52729.2023.02153"},{"key":"13_CR64","unstructured":"Zhang, J., et al.: A tale of two features: stable diffusion complements DINO for zero-shot semantic correspondence. arXiv preprint arXiv:2305.15347 (2023)"},{"key":"13_CR65","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"13_CR66","unstructured":"Zhao, Y., et al.: DualAfford: learning collaborative visual affordance for dual-gripper manipulation. arXiv preprint arXiv:2207.01971 (2022)"},{"key":"13_CR67","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"408","DOI":"10.1007\/978-3-319-10605-2_27","volume-title":"Computer Vision \u2013 ECCV 2014","author":"Y Zhu","year":"2014","unstructured":"Zhu, Y., Fathi, A., Fei-Fei, L.: Reasoning about object affordances in a knowledge base representation. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8690, pp. 408\u2013424. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10605-2_27"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72940-9_13","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T21:34:14Z","timestamp":1731792854000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72940-9_13"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,17]]},"ISBN":["9783031729393","9783031729409"],"references-count":67,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72940-9_13","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,17]]},"assertion":[{"value":"17 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}