{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,10]],"date-time":"2026-03-10T15:40:06Z","timestamp":1773157206615,"version":"3.50.1"},"publisher-location":"Cham","reference-count":68,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031729829","type":"print"},{"value":"9783031729836","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72983-6_22","type":"book-chapter","created":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T09:34:20Z","timestamp":1730108060000},"page":"382-399","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":18,"title":["Image Compression for\u00a0Machine and\u00a0Human Vision with\u00a0Spatial-Frequency Adaptation"],"prefix":"10.1007","author":[{"given":"Han","family":"Li","sequence":"first","affiliation":[]},{"given":"Shaohui","family":"Li","sequence":"additional","affiliation":[]},{"given":"Shuangrui","family":"Ding","sequence":"additional","affiliation":[]},{"given":"Wenrui","family":"Dai","sequence":"additional","affiliation":[]},{"given":"Maida","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Chenglin","family":"Li","sequence":"additional","affiliation":[]},{"given":"Junni","family":"Zou","sequence":"additional","affiliation":[]},{"given":"Hongkai","family":"Xiong","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,29]]},"reference":[{"key":"22_CR1","doi-asserted-by":"crossref","unstructured":"Bai, Y., Yang, X., Liu, X., Jiang, J., Wang, Y., Ji, X., Gao, W.: Towards end-to-end image compression and analysis with transformers. In: AAAI, vol.\u00a036, pp. 104\u2013112 (2022)","DOI":"10.1609\/aaai.v36i1.19884"},{"issue":"2","key":"22_CR2","first-page":"339","volume":"15","author":"J Ball\u00e9","year":"2020","unstructured":"Ball\u00e9, J., Chou, P.A., Minnen, D., Singh, S., Johnston, N., Agustsson, E., Hwang, S.J., Toderici, G.: Nonlinear transform coding. IEEE JSTSP 15(2), 339\u2013353 (2020)","journal-title":"Nonlinear transform coding. IEEE JSTSP"},{"key":"22_CR3","unstructured":"Ball\u00e9, J., Laparra, V., Simoncelli, E.P.: End-to-end optimized image compression. In: ICLR (2016)"},{"key":"22_CR4","unstructured":"Ball\u00e9, J., Minnen, D., Singh, S., Hwang, S.J., Johnston, N.: Variational image compression with a scale hyperprior. In: ICLR (2018)"},{"key":"22_CR5","unstructured":"B\u00e9gaint, J., Racap\u00e9, F., Feltman, S., Pushparaja, A.: Compressai: a pytorch library and evaluation platform for end-to-end compression research. arXiv preprint arXiv:2011.03029 (2020)"},{"key":"22_CR6","unstructured":"Bjontegaard, G.: Calculation of average psnr differences between rd-curves. In: VCEG-M33 (2001)"},{"key":"22_CR7","unstructured":"Campos, J., Meierhans, S., Djelouah, A., Schroers, C.: Content adaptive optimization for neural image compression. In: CVPRW (2019)"},{"key":"22_CR8","doi-asserted-by":"crossref","unstructured":"Cao, Z., Simon, T., Wei, S.E., Sheikh, Y.: Realtime multi-person 2d pose estimation using part affinity fields. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7291\u20137299 (2017)","DOI":"10.1109\/CVPR.2017.143"},{"key":"22_CR9","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-030-58452-8_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Carion","year":"2020","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 213\u2013229. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13"},{"key":"22_CR10","unstructured":"Chen, S., et al.: Adaptformer: adapting vision transformers for scalable visual recognition. In: NeurIPS, vol.\u00a035, pp. 16664\u201316678 (2022)"},{"key":"22_CR11","doi-asserted-by":"crossref","unstructured":"Chen, Y.H., Weng, Y.C., Kao, C.H., Chien, C., Chiu, W.C., Peng, W.H.: Transtic: transferring transformer-based image compression from human perception to machine perception. In: ICCV, pp. 23297\u201323307 (2023)","DOI":"10.1109\/ICCV51070.2023.02129"},{"key":"22_CR12","unstructured":"Chen, Z., Duan, Y., Wang, W., He, J., Lu, T., Dai, J., Qiao, Y.: Vision transformer adapter for dense predictions. In: ICLR (2023)"},{"key":"22_CR13","doi-asserted-by":"crossref","unstructured":"Cheng, Z., Sun, H., Takeuchi, M., Katto, J.: Learned image compression with discretized gaussian mixture likelihoods and attention modules. In: CVPR, pp. 7939\u20137948 (2020)","DOI":"10.1109\/CVPR42600.2020.00796"},{"key":"22_CR14","first-page":"2739","volume":"31","author":"H Choi","year":"2022","unstructured":"Choi, H., Baji\u0107, I.V.: Scalable image coding for humans and machines. IEEE TIP 31, 2739\u20132754 (2022)","journal-title":"IEEE TIP"},{"key":"22_CR15","doi-asserted-by":"crossref","unstructured":"Chollet, F.: Xception: deep learning with depthwise separable convolutions. In: CVPR, pp. 1251\u20131258 (2017)","DOI":"10.1109\/CVPR.2017.195"},{"key":"22_CR16","unstructured":"Codevilla, F., Simard, J.G., Goroshin, R., Pal, C.: Learned image compression for machine perception. arXiv preprint arXiv:2111.02249 (2021)"},{"key":"22_CR17","doi-asserted-by":"crossref","unstructured":"Deng, J., et al.: Imagenet: a large-scale hierarchical image database. In: CVPR, pp. 248\u2013255 (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"22_CR18","doi-asserted-by":"crossref","unstructured":"Ding, S., et al.: Motion-aware contrastive video representation learning via foreground-background merging. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9716\u20139726 (2022)","DOI":"10.1109\/CVPR52688.2022.00949"},{"key":"22_CR19","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16x16 words: transformers for image recognition at scale. In: ICLR (2020)"},{"key":"22_CR20","doi-asserted-by":"crossref","unstructured":"Feng, R., Gao, Y., Jin, X., Feng, R., Chen, Z.: Semantically structured image compression via irregular group-based decoupling. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01581"},{"key":"22_CR21","unstructured":"Feng, R., Liu, J., Jin, X., Pan, X., Sun, H., Chen, Z.: Prompt-icm: A unified framework towards image coding for machines with task-driven prompts. arXiv preprint arXiv:2305.02578 (2023)"},{"key":"22_CR22","unstructured":"Fischer, K., Brand, F., Kaup, A.: Boosting neural image compression for machines using latent space masking. IEEE TCSVT (2022)"},{"key":"22_CR23","doi-asserted-by":"crossref","unstructured":"Girshick, R.: Fast r-cnn. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1440\u20131448 (2015)","DOI":"10.1109\/ICCV.2015.169"},{"key":"22_CR24","unstructured":"He, J., Zhou, C., Ma, X., Berg-Kirkpatrick, T., Neubig, G.: Towards a unified view of parameter-efficient transfer learning. In: ICLR (2022)"},{"key":"22_CR25","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask r-cnn. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2961\u20132969 (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"22_CR26","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"22_CR27","doi-asserted-by":"crossref","unstructured":"He, X., Li, C., Zhang, P., Yang, J., Wang, X.E.: Parameter-efficient model adaptation for vision transformers. In: AAAI, vol.\u00a037, pp. 817\u2013825 (2023)","DOI":"10.1609\/aaai.v37i1.25160"},{"key":"22_CR28","unstructured":"Houlsby, N., et al.: Parameter-efficient transfer learning for nlp. In: ICML, pp. 2790\u20132799. PMLR (2019)"},{"key":"22_CR29","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van Der\u00a0Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4700\u20134708 (2017)","DOI":"10.1109\/CVPR.2017.243"},{"key":"22_CR30","doi-asserted-by":"publisher","unstructured":"Jia, M., Tang, L., Chen, B.C., Cardie, C., Belongie, S., Hariharan, B., Lim, S.N.: Visual prompt tuning. In: ECCV, pp. 709\u2013727. Springer (2022). https:\/\/doi.org\/10.1007\/978-3-031-19827-4_41","DOI":"10.1007\/978-3-031-19827-4_41"},{"key":"22_CR31","doi-asserted-by":"crossref","unstructured":"Khattak, M.U., Rasheed, H., Maaz, M., Khan, S., Khan, F.S.: Maple: Multi-modal prompt learning. In: CVPR, pp. 19113\u201319122 (2023)","DOI":"10.1109\/CVPR52729.2023.01832"},{"key":"22_CR32","doi-asserted-by":"crossref","unstructured":"Koyuncu, A.B., Gao, H., Boev, A., Gaikov, G., Alshina, E., Steinbach, E.: Contextformer: a transformer with spatio-channel attention for context modeling in learned image compression. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19800-7_26"},{"key":"22_CR33","unstructured":"Lee, J., Cho, S., Beack, S.K.: Context-adaptive entropy model for end-to-end optimized image compression. In: ICLR (2019)"},{"key":"22_CR34","doi-asserted-by":"crossref","unstructured":"Lester, B., Al-Rfou, R., Constant, N.: The power of scale for parameter-efficient prompt tuning. In: EMNLP (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"22_CR35","unstructured":"Li, H., Li, S., Dai, W., Li, C., Zou, J., Xiong, H.: Frequency-aware transformer for learned image compression. In: The Twelfth International Conference on Learning Representations (2024). https:\/\/openreview.net\/forum?id=HKGQDDTuvZ"},{"key":"22_CR36","unstructured":"Li, H., et al.: Hierarchical graph networks for 3d human pose estimation. arXiv preprint arXiv:2111.11927 (2021)"},{"key":"22_CR37","doi-asserted-by":"crossref","unstructured":"Li, H., et al.: Pose-oriented transformer with uncertainty-guided refinement for 2d-to-3d human pose estimation. In: AAAI, vol.\u00a037, pp. 1296\u20131304 (2023)","DOI":"10.1609\/aaai.v37i1.25213"},{"key":"22_CR38","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Doll\u00e1r, P., Zitnick, C.L.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"22_CR39","doi-asserted-by":"crossref","unstructured":"Liu, J., Feng, R., Qi, Y., Chen, Q., Chen, Z., Zeng, W., Jin, X.: Rate-distortion-cognition controllable versatile neural image compression. In: ECCV. Springer (2024)","DOI":"10.1007\/978-3-031-72992-8_19"},{"key":"22_CR40","doi-asserted-by":"crossref","unstructured":"Liu, J., Jin, X., Feng, R., Chen, Z., Zeng, W.: Composable image coding for machine via task-oriented internal adaptor and external prior. In: VCIP, pp.\u00a01\u20135 (2023)","DOI":"10.1109\/VCIP59821.2023.10402659"},{"key":"22_CR41","doi-asserted-by":"crossref","unstructured":"Liu, J., Sun, H., Katto, J.: Improving multiple machine vision tasks in the compressed domain. In: ICPR, pp. 331\u2013337. IEEE (2022)","DOI":"10.1109\/ICPR56361.2022.9956532"},{"key":"22_CR42","doi-asserted-by":"crossref","unstructured":"Liu, J., Sun, H., Katto, J.: Learned image compression with mixed transformer-cnn architectures. In: CVPR, pp. 14388\u201314397 (2023)","DOI":"10.1109\/CVPR52729.2023.01383"},{"issue":"9","key":"22_CR43","doi-asserted-by":"publisher","first-page":"2605","DOI":"10.1007\/s11263-021-01491-7","volume":"129","author":"K Liu","year":"2021","unstructured":"Liu, K., Liu, D., Li, L., Yan, N., Li, H.: Semantics-to-signal scalable image compression with learned revertible representations. IJCV 129(9), 2605\u20132621 (2021)","journal-title":"IJCV"},{"key":"22_CR44","doi-asserted-by":"crossref","unstructured":"Liu, L., Hu, Z., Chen, Z., Xu, D.: Icmh-net: neural image compression towards both machine vision and human vision. In: ACM MM, pp. 8047\u20138056 (2023)","DOI":"10.1145\/3581783.3612041"},{"issue":"9","key":"22_CR45","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3560815","volume":"55","author":"P Liu","year":"2023","unstructured":"Liu, P., Yuan, W., Fu, J., Jiang, Z., Hayashi, H., Neubig, G.: Pre-train, prompt, and predict: a systematic survey of prompting methods in natural language processing. ACM Comput. Surv. 55(9), 1\u201335 (2023)","journal-title":"ACM Comput. Surv."},{"key":"22_CR46","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: ICCV, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"22_CR47","doi-asserted-by":"crossref","unstructured":"Lu, M., Guo, P., Shi, H., Cao, C., Ma, Z.: Transformer-based image compression. In: DCC, pp. 469\u2013469 (2022)","DOI":"10.1109\/DCC52660.2022.00080"},{"key":"22_CR48","unstructured":"Lv, Y., Xiang, J., Zhang, J., Yang, W., Han, X., Yang, W.: Dynamic low-rank instance adaptation for universal neural image compression. In: ACM MM, pp. 632\u2013642 (2023)"},{"key":"22_CR49","doi-asserted-by":"crossref","unstructured":"Mentzer, F., Agustsson, E., Tschannen, M., Timofte, R., Gool, L.V.: Practical full resolution learned lossless image compression. In: CVPR, pp. 10629\u201310638 (2019)","DOI":"10.1109\/CVPR.2019.01088"},{"key":"22_CR50","unstructured":"Minnen, D., Ball\u00e9, J., Toderici, G.D.: Joint autoregressive and hierarchical priors for learned image compression. In: NeurIPS, vol.\u00a031 (2018)"},{"key":"22_CR51","doi-asserted-by":"crossref","unstructured":"Minnen, D., Singh, S.: Channel-wise autoregressive entropy models for learned image compression. In: ICIP, pp. 3339\u20133343 (2020)","DOI":"10.1109\/ICIP40778.2020.9190935"},{"key":"22_CR52","unstructured":"Nair, V., Hinton, G.E.: Rectified linear units improve restricted boltzmann machines. In: ICML. pp. 807\u2013814 (2010)"},{"key":"22_CR53","doi-asserted-by":"crossref","unstructured":"Pfeiffer, J., Kamath, A., R\u00fcckl\u00e9, A., Cho, K., Gurevych, I.: Adapterfusion: Non-destructive task composition for transfer learning. arXiv preprint arXiv:2005.00247 (2020)","DOI":"10.18653\/v1\/2021.eacl-main.39"},{"key":"22_CR54","doi-asserted-by":"crossref","unstructured":"Pfeiffer, J., et al.: Adapterhub: a framework for adapting transformers. In: EMNLP, pp. 46\u201354 (2020)","DOI":"10.18653\/v1\/2020.emnlp-demos.7"},{"key":"22_CR55","unstructured":"Qian, Y., Sun, X., Lin, M., Tan, Z., Jin, R.: Entroformer: a transformer-based entropy model for learned image compression. In: ICLR (2022)"},{"key":"22_CR56","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. In: NeurIPS, vol.\u00a028 (2015)"},{"key":"22_CR57","doi-asserted-by":"crossref","unstructured":"Shen, S., Yue, H., Yang, J.: Dec-adapter: exploring efficient decoder-side adapter for bridging screen content and natural image compression. In: CVPR, pp. 12887\u201312896 (2023)","DOI":"10.1109\/ICCV51070.2023.01184"},{"key":"22_CR58","doi-asserted-by":"crossref","unstructured":"Shi, B., Jiang, D., Zhang, X., Li, H., Dai, W., Zou, J., Xiong, H., Tian, Q.: A transformer-based decoder for semantic segmentation with multi-level context mining. In: European Conference on Computer Vision. pp. 624\u2013639. Springer (2022)","DOI":"10.1007\/978-3-031-19815-1_36"},{"key":"22_CR59","doi-asserted-by":"crossref","unstructured":"Strudel, R., Garcia, R., Laptev, I., Schmid, C.: Segmenter: transformer for semantic segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 7262\u20137272 (2021)","DOI":"10.1109\/ICCV48922.2021.00717"},{"key":"22_CR60","doi-asserted-by":"crossref","unstructured":"Sun, K., Xiao, B., Liu, D., Wang, J.: Deep high-resolution representation learning for human pose estimation. In: CVPR, pp. 5693\u20135703 (2019)","DOI":"10.1109\/CVPR.2019.00584"},{"key":"22_CR61","doi-asserted-by":"crossref","unstructured":"Tsubota, K., Akutsu, H., Aizawa, K.: Universal deep image compression via content-adaptive optimization with adapters. In: WACV, pp. 2529\u20132538 (2023)","DOI":"10.1109\/WACV56688.2023.00256"},{"key":"22_CR62","doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: Adapting shortcut with normalizing flow: An efficient tuning framework for visual recognition. In: CVPR, pp. 15965\u201315974 (2023)","DOI":"10.1109\/CVPR52729.2023.01532"},{"key":"22_CR63","unstructured":"Xie, E., Wang, W., Yu, Z., Anandkumar, A., Alvarez, J.M., Luo, P.: Segformer: Simple and efficient design for semantic segmentation with transformers. In: NeurIPS, vol.\u00a034, pp. 12077\u201312090 (2021)"},{"key":"22_CR64","first-page":"2957","volume":"23","author":"S Yang","year":"2021","unstructured":"Yang, S., Hu, Y., Yang, W., Duan, L.Y., Liu, J.: Towards coding for human and machine vision: scalable face image coding. IEEE TMM 23, 2957\u20132971 (2021)","journal-title":"IEEE TMM"},{"key":"22_CR65","doi-asserted-by":"crossref","unstructured":"Zheng, H., Zhou, L., Li, H., Su, J., Wei, X., Xu, X.: Bem: Balanced and entropy-based mix for long-tailed semi-supervised learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 22893\u201322903, June 2024","DOI":"10.1109\/CVPR52733.2024.02160"},{"key":"22_CR66","doi-asserted-by":"crossref","unstructured":"Zheng, S., et\u00a0al.: Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6881\u20136890 (2021)","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"22_CR67","unstructured":"Zhu, X., Su, W., Lu, L., Li, B., Wang, X., Dai, J.: Deformable detr: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159 (2020)"},{"key":"22_CR68","doi-asserted-by":"crossref","unstructured":"Zou, R., Song, C., Zhang, Z.: The devil is in the details: window-based attention for image compression. In: CVPR, pp. 17492\u201317501 (2022)","DOI":"10.1109\/CVPR52688.2022.01697"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72983-6_22","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,30]],"date-time":"2024-11-30T10:36:57Z","timestamp":1732963017000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72983-6_22"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,29]]},"ISBN":["9783031729829","9783031729836"],"references-count":68,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72983-6_22","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,29]]},"assertion":[{"value":"29 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}