{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T02:44:21Z","timestamp":1775529861281,"version":"3.50.1"},"publisher-location":"Cham","reference-count":79,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726835","type":"print"},{"value":"9783031726842","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72684-2_20","type":"book-chapter","created":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:07:18Z","timestamp":1730574438000},"page":"346-364","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["ReALFRED: An Embodied Instruction Following Benchmark in\u00a0Photo-Realistic Environments"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9999-6474","authenticated-orcid":false,"given":"Taewoong","family":"Kim","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0007-1495-2976","authenticated-orcid":false,"given":"Cheolhong","family":"Min","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3775-2778","authenticated-orcid":false,"given":"Byeonghwi","family":"Kim","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9122-3194","authenticated-orcid":false,"given":"Jinyeon","family":"Kim","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-8975-5720","authenticated-orcid":false,"given":"Wonje","family":"Jeung","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7934-8434","authenticated-orcid":false,"given":"Jonghyun","family":"Choi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,3]]},"reference":[{"key":"20_CR1","doi-asserted-by":"crossref","unstructured":"Anderson, P., et al.: Vision-and-language navigation: interpreting visually-grounded navigation instructions in real environments. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00387"},{"key":"20_CR2","unstructured":"Baruch, G., et al.: ARKitScenes - a diverse real-world dataset for 3D indoor scene understanding using mobile RGB-D data. In: NeurIPS Datasets and Benchmarks Track (2021)"},{"key":"20_CR3","unstructured":"Berseth, G., Xie, C., Cernek, P., van\u00a0de Panne, M.: Progressive reinforcement learning with distillation for multi-skilled motion control. In: ICLR (2018)"},{"key":"20_CR4","unstructured":"Bi\u0144kowski, M., Sutherland, D.J., Arbel, M., Gretton, A.: Demystifying MMD GANs. In: ICLR (2018)"},{"key":"20_CR5","unstructured":"Blukis, V., Paxton, C., Fox, D., Garg, A., Artzi, Y.: A persistent spatial semantic representation for high-level natural language instruction execution. In: CoRL (2021)"},{"key":"20_CR6","doi-asserted-by":"crossref","unstructured":"Bousmalis, K., et al.: Using simulation and domain adaptation to improve efficiency of deep robotic grasping. In: ICRA (2018)","DOI":"10.1109\/ICRA.2018.8460875"},{"key":"20_CR7","doi-asserted-by":"crossref","unstructured":"Chang, A., et al.: Matterport3D: learning from RGB-D data in indoor environments (2017). arXiv:1709.06158","DOI":"10.1109\/3DV.2017.00081"},{"key":"20_CR8","unstructured":"Chaplot, D.S., Gandhi, D., Gupta, S., Gupta, A., Salakhutdinov, R.: Learning to explore using active neural slam. In: ICLR (2020)"},{"key":"20_CR9","unstructured":"Chaplot, D.S., Gandhi, D.P., Gupta, A., Salakhutdinov, R.R.: Object goal navigation using goal-oriented semantic exploration. In: NeurIPS (2020)"},{"key":"20_CR10","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"17","DOI":"10.1007\/978-3-030-58539-6_2","volume-title":"Computer Vision \u2013 ECCV 2020","author":"C Chen","year":"2020","unstructured":"Chen, C., et al.: SoundSpaces: audio-visual navigation in\u00a03D\u00a0environments. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12351, pp. 17\u201336. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58539-6_2"},{"key":"20_CR11","unstructured":"Chen, C., et al.: SoundSpaces 2.0: a simulation platform for visual-acoustic learning. In: NeurIPS Datasets and Benchmarks Track (2022)"},{"key":"20_CR12","doi-asserted-by":"crossref","unstructured":"Chen, H., Suhr, A., Misra, D., Snavely, N., Artzi, Y.: Touchdown: natural language navigation and spatial reasoning in visual street environments. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.01282"},{"key":"20_CR13","unstructured":"Chen, T., Gupta, S., Gupta, A.: Learning exploration policies for navigation. In: ICLR (2019)"},{"key":"20_CR14","doi-asserted-by":"crossref","unstructured":"Choi, S., et al.: Learning quadrupedal locomotion on deformable terrain. Sci. Robot. 8(74), eade2256 (2023)","DOI":"10.1126\/scirobotics.ade2256"},{"key":"20_CR15","doi-asserted-by":"crossref","unstructured":"Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nie\u00dfner, M.: ScanNet: richly-annotated 3D reconstructions of indoor scenes. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.261"},{"key":"20_CR16","doi-asserted-by":"crossref","unstructured":"Deitke, M., et al.: RoboTHOR: an open simulation-to-real embodied AI platform. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00323"},{"key":"20_CR17","doi-asserted-by":"crossref","unstructured":"Deitke, M., Hendrix, R., Farhadi, A., Ehsani, K., Kembhavi, A.: Phone2Proc: bringing robust robots into our chaotic world. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00932"},{"key":"20_CR18","unstructured":"Deitke, M., et al.: ProcTHOR: large-scale embodied AI using procedural generation. In: NeurIPS (2022)"},{"key":"20_CR19","doi-asserted-by":"crossref","unstructured":"Ehsani, K., et al.: ManipulaTHOR: a framework for visual object manipulation. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00447"},{"key":"20_CR20","unstructured":"Gan, C., et al.: ThreeDWorld: a platform for interactive multi-modal physical simulation. In: NeurIPS Datasets and Benchmarks Track (2021)"},{"issue":"4","key":"20_CR21","first-page":"10049","volume":"7","author":"X Gao","year":"2022","unstructured":"Gao, X., Gao, Q., Gong, R., Lin, K., Thattai, G., Sukhatme, G.S.: DialFRED: dialogue-enabled agents for embodied instruction following. RA-L 7(4), 10049\u201310056 (2022)","journal-title":"RA-L"},{"key":"20_CR22","unstructured":"Ghallab, M., et al.: PDDL the planning domain definition language (1998)"},{"key":"20_CR23","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: NeurIPS (2014)"},{"key":"20_CR24","doi-asserted-by":"crossref","unstructured":"Gordon, D., Kembhavi, A., Rastegari, M., Redmon, J., Fox, D., Farhadi, A.: IQA: visual question answering in interactive environments. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00430"},{"key":"20_CR25","unstructured":"Gu, J., et al.: ManiSkill2: a unified benchmark for generalizable manipulation skills. In: ICLR (2023)"},{"key":"20_CR26","doi-asserted-by":"crossref","unstructured":"Heo, M., Lee, Y., Lee, D., Lim, J.J.: FurnitureBench: reproducible real-world benchmark for long-horizon complex manipulation. In: RSS (2023)","DOI":"10.15607\/RSS.2023.XIX.041"},{"key":"20_CR27","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: GANs trained by a two time-scale update rule converge to a local Nash equilibrium. In: NeurIPS (2017)"},{"key":"20_CR28","doi-asserted-by":"crossref","unstructured":"Ho, D., Rao, K., Xu, Z., Jang, E., Khansari, M., Bai, Y.: RetinaGAN: an object-aware approach to sim-to-real transfer. In: ICRA (2021)","DOI":"10.1109\/ICRA48506.2021.9561157"},{"key":"20_CR29","doi-asserted-by":"publisher","first-page":"253","DOI":"10.1613\/jair.855","volume":"14","author":"J Hoffmann","year":"2001","unstructured":"Hoffmann, J., Nebel, B.: The FF planning system: fast plan generation through heuristic search. JAIR 14, 253\u2013302 (2001)","journal-title":"JAIR"},{"key":"20_CR30","doi-asserted-by":"crossref","unstructured":"Hua, B.S., Pham, Q.H., Nguyen, D.T., Tran, M.K., Yu, L.F., Yeung, S.K.: SceneNN: a scene meshes dataset with annotations. In: 3DV (2016)","DOI":"10.1109\/3DV.2016.18"},{"key":"20_CR31","doi-asserted-by":"crossref","unstructured":"Khanna, M., et al.: Habitat synthetic scenes dataset (HSSD-200): an analysis of 3D scene scale and realism tradeoffs for objectgoal navigation. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.01550"},{"key":"20_CR32","doi-asserted-by":"crossref","unstructured":"Khansari, M., et al.: Practical visual deep imitation learning via task-level domain consistency. In: ICRA (2023)","DOI":"10.1109\/ICRA48891.2023.10161202"},{"key":"20_CR33","unstructured":"Kim, B., Bhambri, S., Singh, K.P., Mottaghi, R., Choi, J.: Agent with the big picture: perceiving surroundings for interactive instruction following. In: Embodied AI Workshop @ CVPR (2021)"},{"key":"20_CR34","doi-asserted-by":"crossref","unstructured":"Kim, B., Kim, J., Kim, Y., Min, C., Choi, J.: Context-aware planning and environment-aware memory for instruction following embodied agents. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01004"},{"key":"20_CR35","unstructured":"Kolve, E., et al.: AI2-THOR: an interactive 3D environment for visual AI (2017). arXiv:1712.05474"},{"key":"20_CR36","doi-asserted-by":"crossref","unstructured":"Krantz, J., et al.: Navigating to objects specified by images. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01002"},{"key":"20_CR37","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"104","DOI":"10.1007\/978-3-030-58604-1_7","volume-title":"Computer Vision \u2013 ECCV 2020","author":"J Krantz","year":"2020","unstructured":"Krantz, J., Wijmans, E., Majumdar, A., Batra, D., Lee, S.: Beyond the nav-graph: vision-and-language navigation in continuous environments. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12373, pp. 104\u2013120. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58604-1_7"},{"key":"20_CR38","doi-asserted-by":"crossref","unstructured":"Ku, A., Anderson, P., Patel, R., Ie, E., Baldridge, J.: Room-Across-Room: multilingual vision-and-language navigation with dense spatiotemporal grounding. In: EMNLP (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.356"},{"key":"20_CR39","doi-asserted-by":"crossref","unstructured":"Kumar, A., Fu, Z., Pathak, D., Malik, J.: RMA: rapid motor adaptation for legged robots. In: RSS (2021)","DOI":"10.15607\/RSS.2021.XVII.011"},{"key":"20_CR40","unstructured":"Li, C., et al.: iGibson 2.0: object-centric simulation for robot learning of everyday household tasks. In: CoRL (2021)"},{"key":"20_CR41","unstructured":"Li, C., et al.: BEHAVIOR-1K: a benchmark for embodied AI with 1,000 everyday activities and realistic simulation. In: CoRL (2022)"},{"key":"20_CR42","doi-asserted-by":"crossref","unstructured":"Li, Z., et al.: OpenRooms: an open framework for photorealistic indoor scene datasets. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00711"},{"key":"20_CR43","unstructured":"MacMahon, M., Stankiewicz, B., Kuipers, B.: Walk the talk: connecting language, knowledge, and action in route instructions. In: AAAI (2006)"},{"key":"20_CR44","unstructured":"Majumdar, et al.: FindThis: language-driven object disambiguation in indoor environments. In: CoRL (2023)"},{"key":"20_CR45","unstructured":"Mao, Y., Zhang, Y., Jiang, H., Chang, A., Savva, M.: MultiScan: scalable RGBD scanning for 3D environments with articulated objects. In: NeurIPS (2022)"},{"key":"20_CR46","unstructured":"Min, S.Y., Chaplot, D.S., Ravikumar, P., Bisk, Y., Salakhutdinov, R.: FILM: following instructions in language with modular methods. In: ICLR (2022)"},{"key":"20_CR47","unstructured":"Min, S.Y., et al.: Object goal navigation with end-to-end self-supervision. In: IROS (2023)"},{"key":"20_CR48","doi-asserted-by":"crossref","unstructured":"Misra, D., Bennett, A., Blukis, V., Niklasson, E., Shatkhin, M., Artzi, Y.: Mapping instructions to actions in 3D environments with visual goal prediction. In: EMNLP (2018)","DOI":"10.18653\/v1\/D18-1287"},{"key":"20_CR49","doi-asserted-by":"crossref","unstructured":"Padmakumar, A., et al.: TEACh: task-driven embodied agents that chat. In: AAAI (2022)","DOI":"10.1609\/aaai.v36i2.20097"},{"key":"20_CR50","doi-asserted-by":"crossref","unstructured":"Partsey, R., Wijmans, E., Yokoyama, N., Dobosevych, O., Batra, D., Maksymets, O.: Is mapping necessary for realistic pointgoal navigation? In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01672"},{"key":"20_CR51","doi-asserted-by":"crossref","unstructured":"Pashevich, A., Schmid, C., Sun, C.: Episodic transformer for vision-and-language navigation. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01564"},{"key":"20_CR52","doi-asserted-by":"crossref","unstructured":"Puig, X., et al.: VirtualHome: simulating household activities via programs. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00886"},{"key":"20_CR53","unstructured":"Puig, X., et al.: Habitat 3.0: a co-habitat for humans, avatars and robots. In: ICLR (2024)"},{"key":"20_CR54","doi-asserted-by":"crossref","unstructured":"Qi, Y., et al.: REVERIE: remote embodied visual referring expression in real indoor environments. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01000"},{"key":"20_CR55","doi-asserted-by":"crossref","unstructured":"Ramakrishnan, S.K., Chaplot, D.S., Al-Halah, Z., Malik, J., Grauman, K.: PONI: potential functions for objectgoal navigation with interaction-free learning. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01832"},{"key":"20_CR56","unstructured":"Ramakrishnan, S.K., et al.: Habitat-Matterport 3D Dataset (HM3D): 1000 large-scale 3D environments for embodied AI. In: NeurIPS Datasets and Benchmarks Track (2021)"},{"key":"20_CR57","doi-asserted-by":"crossref","unstructured":"Ramrakhya, R., Undersander, E., Batra, D., Das, A.: Habitat-web: learning embodied object-search strategies from human demonstrations at scale. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00511"},{"key":"20_CR58","doi-asserted-by":"crossref","unstructured":"Rao, K., Harris, C., Irpan, A., Levine, S., Ibarz, J., Khansari, M.: RL-CycleGAN: reinforcement learning aware simulation-to-real. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01117"},{"key":"20_CR59","doi-asserted-by":"crossref","unstructured":"Savva, M., et al.: Habitat: a platform for embodied AI research. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00943"},{"key":"20_CR60","doi-asserted-by":"crossref","unstructured":"Sethian, J.A.: A fast marching level set method for monotonically advancing fronts. In: PNAS (1996)","DOI":"10.1073\/pnas.93.4.1591"},{"key":"20_CR61","doi-asserted-by":"crossref","unstructured":"Shridhar, M., et al.: ALFRED: a benchmark for interpreting grounded instructions for everyday tasks. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01075"},{"key":"20_CR62","unstructured":"Singh, K.P., Bhambri, S., Kim, B., Mottaghi, R., Choi, J.: Factorizing perception and policy for interactive instruction following. In: ICCV (2021)"},{"key":"20_CR63","doi-asserted-by":"crossref","unstructured":"Song, C.H., Wu, J., Washington, C., Sadler, B.M., Chao, W.L., Su, Y.: LLM-Planner: few-shot grounded planning for embodied agents with large language models. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00280"},{"key":"20_CR64","doi-asserted-by":"crossref","unstructured":"Song, S., Lichtenberg, S.P., Xiao, J.: SUN RGB-D: A RGB-D scene understanding benchmark suite. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7298655"},{"key":"20_CR65","unstructured":"Srivastava, S., et al.: BEHAVIOR: benchmark for everyday household activities in virtual, interactive, and ecological environments. In: CoRL (2021)"},{"key":"20_CR66","unstructured":"Straub, J., et al.: The replica dataset: a digital replica of indoor spaces (2019). arXiv:1906.05797"},{"key":"20_CR67","unstructured":"Szot, A., et al.: Habitat 2.0: training home assistants to rearrange their habitat. In: NeurIPS (2021)"},{"key":"20_CR68","doi-asserted-by":"crossref","unstructured":"Tan, J., et al.: Sim-to-real: learning agile locomotion for quadruped robots. In: RSS (2018)","DOI":"10.15607\/RSS.2018.XIV.010"},{"key":"20_CR69","doi-asserted-by":"crossref","unstructured":"Tobin, J., Fong, R., Ray, A., Schneider, J., Zaremba, W., Abbeel, P.: Domain randomization for transferring deep neural networks from simulation to the real world. In: IROS (2017)","DOI":"10.1109\/IROS.2017.8202133"},{"key":"20_CR70","doi-asserted-by":"crossref","unstructured":"Torbunov, D., et al.: UVCGAN v2: an improved cycle-consistent GAN for unpaired image-to-image translation (2023). arXiv:2303.16280","DOI":"10.1109\/WACV56688.2023.00077"},{"key":"20_CR71","doi-asserted-by":"crossref","unstructured":"Truong, J., Chernova, S., Batra, D.: Bi-directional domain adaptation for sim2real transfer of embodied navigation agents. RA-L 6(2), 2634-2641 (2021)","DOI":"10.1109\/LRA.2021.3062303"},{"key":"20_CR72","doi-asserted-by":"crossref","unstructured":"Weihs, L., Deitke, M., Kembhavi, A., Mottaghi, R.: Visual room rearrangement. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00586"},{"key":"20_CR73","unstructured":"Wijmans, E., et al.: DD-PPO: learning near-perfect pointgoal navigators from 2.5 billion frames. In: ICLR (2020)"},{"key":"20_CR74","doi-asserted-by":"crossref","unstructured":"Xia, F., Zamir, A.R., He, Z., Sax, A., Malik, J., Savarese, S.: Gibson Env: real-world perception for embodied agents. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00945"},{"key":"20_CR75","unstructured":"Yenamandra, S., et al.: HomeRobot: open-vocabulary mobile manipulation. In: CoRL (2023)"},{"key":"20_CR76","doi-asserted-by":"crossref","unstructured":"Yeshwanth, C., Liu, Y.C., Nie\u00dfner, M., Dai, A.: ScanNet++: a high-fidelity dataset of 3D indoor scenes. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00008"},{"key":"20_CR77","doi-asserted-by":"crossref","unstructured":"Zhang, J., et al.: VR-goggles for robots: real-to-sim domain adaptation for visual control. RA-L 4(2), 1148\u20131155 (2019)","DOI":"10.1109\/LRA.2019.2894216"},{"key":"20_CR78","doi-asserted-by":"crossref","unstructured":"Zhu, H., et al.: EXCALIBUR: encouraging and evaluating embodied exploration. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01434"},{"key":"20_CR79","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.244"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72684-2_20","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:10:31Z","timestamp":1730574631000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72684-2_20"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,3]]},"ISBN":["9783031726835","9783031726842"],"references-count":79,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72684-2_20","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,3]]},"assertion":[{"value":"3 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}