{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T19:14:18Z","timestamp":1757618058144,"version":"3.44.0"},"publisher-location":"Cham","reference-count":77,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031919060"},{"type":"electronic","value":"9783031919077"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-91907-7_18","type":"book-chapter","created":{"date-parts":[[2025,5,27]],"date-time":"2025-05-27T11:46:24Z","timestamp":1748346384000},"page":"303-320","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["DreamTexture: High-Fidelity Synthetic 3D Data Generation Through Decoupled Geometry and\u00a0Texture Synthesis"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-9292-9832","authenticated-orcid":false,"given":"Jing","family":"Li","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7037-1806","authenticated-orcid":false,"given":"Yawei","family":"Luo","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6786-1962","authenticated-orcid":false,"given":"Ying","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0002-5183-3021","authenticated-orcid":false,"given":"Xueying","family":"Li","sequence":"additional","affiliation":[]},{"given":"Xiaoxue","family":"Li","sequence":"additional","affiliation":[]},{"given":"Yuwen","family":"Hao","sequence":"additional","affiliation":[]},{"given":"Lijun","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Zhengping","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,12]]},"reference":[{"key":"18_CR1","doi-asserted-by":"crossref","unstructured":"Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mip-NeRF 360: unbounded anti-aliased neural radiance fields. In: CVPR, pp. 5470\u20135479 (2022)","DOI":"10.1109\/CVPR52688.2022.00539"},{"issue":"4","key":"18_CR2","doi-asserted-by":"publisher","first-page":"106","DOI":"10.1145\/3072959.3073610","volume":"36","author":"S Bi","year":"2017","unstructured":"Bi, S., Kalantari, N.K., Ramamoorthi, R.: Patch-based optimization for image-based texture mapping. TOG 36(4), 106\u20131 (2017)","journal-title":"TOG"},{"key":"18_CR3","doi-asserted-by":"crossref","unstructured":"Bokhovkin, A., Tulsiani, S., Dai, A.: Mesh2tex: generating mesh textures from image queries. In: ICCV, pp. 8918\u20138928 (2023)","DOI":"10.1109\/ICCV51070.2023.00819"},{"key":"18_CR4","doi-asserted-by":"crossref","unstructured":"Cao, T., Kreis, K., Fidler, S., Sharp, N., Yin, K.: Texfusion: synthesizing 3D textures with text-guided image diffusion models. In: ICCV, pp. 4169\u20134181 (2023)","DOI":"10.1109\/ICCV51070.2023.00385"},{"key":"18_CR5","doi-asserted-by":"crossref","unstructured":"Chen, D.Z., Siddiqui, Y., Lee, H.Y., Tulyakov, S., Nie\u00dfner, M.: Text2tex: text-driven texture synthesis via diffusion models. In: ICCV, pp. 18558\u201318568 (2023)","DOI":"10.1109\/ICCV51070.2023.01701"},{"key":"18_CR6","doi-asserted-by":"crossref","unstructured":"Chen, R., Chen, Y., Jiao, N., Jia, K.: Fantasia3D: disentangling geometry and appearance for high-quality text-to-3D content creation. In: ICCV, pp. 22246\u201322256 (2023)","DOI":"10.1109\/ICCV51070.2023.02033"},{"key":"18_CR7","doi-asserted-by":"crossref","unstructured":"Chen, Z., Yin, K., Fidler, S.: AUV-Net: Learning aligned UV maps for texture transfer and synthesis. In: CVPR, pp. 1465\u20131474 (2022)","DOI":"10.1109\/CVPR52688.2022.00152"},{"key":"18_CR8","doi-asserted-by":"crossref","unstructured":"Downs, L., et al.: Google scanned objects: a high-quality dataset of 3D scanned household items. In: ICRA, pp. 2553\u20132560. IEEE (2022)","DOI":"10.1109\/ICRA46639.2022.9811809"},{"key":"18_CR9","doi-asserted-by":"crossref","unstructured":"Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., Kanazawa, A.: Plenoxels: radiance fields without neural networks. In: CVPR, pp. 5501\u20135510 (2022)","DOI":"10.1109\/CVPR52688.2022.00542"},{"key":"18_CR10","unstructured":"Gal, R., et al.: An image is worth one word: personalizing text-to-image generation using textual inversion. In: ICLR (2023)"},{"key":"18_CR11","unstructured":"Gu, Y., et\u00a0al.: Mix-of-show: decentralized low-rank adaptation for multi-concept customization of diffusion models. In: NeurIPS, vol. 36 (2023)"},{"key":"18_CR12","doi-asserted-by":"crossref","unstructured":"Guo, D., Li, K., Hu, B., Zhang, Y., Wang, M.: Benchmarking micro-action recognition: dataset, method, and application. TCSVT (2024)","DOI":"10.1109\/TCSVT.2024.3358415"},{"key":"18_CR13","unstructured":"Guo, Y.C., et al.: Threestudio: a unified framework for 3D content generation. https:\/\/github.com\/threestudio-project\/threestudio (2023)"},{"key":"18_CR14","doi-asserted-by":"crossref","unstructured":"Hang, T., et al.: Efficient diffusion training via min-snr weighting strategy. In: ICCV, pp. 7441\u20137451 (2023)","DOI":"10.1109\/ICCV51070.2023.00684"},{"key":"18_CR15","doi-asserted-by":"crossref","unstructured":"Henderson, P., Tsiminaki, V., Lampert, C.H.: Leveraging 2D data to learn textured 3D mesh generation. In: CVPR, pp. 7498\u20137507 (2020)","DOI":"10.1109\/CVPR42600.2020.00752"},{"key":"18_CR16","unstructured":"Hong, Y., et al.: LRM: large reconstruction model for single image to 3D. In: ICLR (2024)"},{"key":"18_CR17","unstructured":"Hu, E.J., et\u00a0al.: Lora: low-rank adaptation of large language models. In: ICLR (2022)"},{"key":"18_CR18","doi-asserted-by":"crossref","unstructured":"Huang, J., et\u00a0al.: Adversarial texture optimization from RGB-D scans. In: CVPR, pp. 1559\u20131568 (2020)","DOI":"10.1109\/CVPR42600.2020.00163"},{"key":"18_CR19","unstructured":"Huang, Y., Wang, J., Shi, Y., Tang, B., Qi, X., Zhang, L.: Dreamtime: an improved optimization strategy for diffusion-guided 3D generation. In: ICLR (2024)"},{"issue":"4","key":"18_CR20","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3592433","volume":"42","author":"B Kerbl","year":"2023","unstructured":"Kerbl, B., Kopanas, G., Leimk\u00fchler, T., Drettakis, G.: 3D gaussian splatting for real-time radiance field rendering. TOG 42(4), 1\u201314 (2023)","journal-title":"TOG"},{"key":"18_CR21","doi-asserted-by":"crossref","unstructured":"Kopf, J., Fu, C.W., Cohen-Or, D., Deussen, O., Lischinski, D., Wong, T.T.: Solid texture synthesis from 2D exemplars. In: ACM SIGGRAPH, pp. 2\u2013es (2007)","DOI":"10.1145\/1275808.1276380"},{"key":"18_CR22","doi-asserted-by":"crossref","unstructured":"Kumari, N., Zhang, B., Zhang, R., Shechtman, E., Zhu, J.Y.: Multi-concept customization of text-to-image diffusion. In: CVPR, pp. 1931\u20131941 (2023)","DOI":"10.1109\/CVPR52729.2023.00192"},{"issue":"3","key":"18_CR23","doi-asserted-by":"publisher","first-page":"541","DOI":"10.1145\/1141911.1141921","volume":"25","author":"S Lefebvre","year":"2006","unstructured":"Lefebvre, S., Hoppe, H.: Appearance-space texture synthesis. TOG 25(3), 541\u2013548 (2006)","journal-title":"TOG"},{"key":"18_CR24","unstructured":"Li, J., et al.: Instant3D: fast text-to-3D with sparse-view generation and large reconstruction model. In: ICLR (2024)"},{"key":"18_CR25","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: Gligen: open-set grounded text-to-image generation. In: CVPR, pp. 22511\u201322521 (2023)","DOI":"10.1109\/CVPR52729.2023.02156"},{"key":"18_CR26","doi-asserted-by":"crossref","unstructured":"Lin, C.H., et al.: Magic3D: high-resolution text-to-3D content creation. In: CVPR, pp. 300\u2013309 (2023)","DOI":"10.1109\/CVPR52729.2023.00037"},{"key":"18_CR27","doi-asserted-by":"crossref","unstructured":"Liu, M., et al.: One-2-3-45++: fast single image to 3D objects with consistent multi-view generation and 3D diffusion. In: CVPR, pp. 10072\u201310083 (2024)","DOI":"10.1109\/CVPR52733.2024.00960"},{"key":"18_CR28","unstructured":"Liu, M., et al.: One-2-3-45: any single image to 3D mesh in 45 seconds without per-shape optimization. In: NeurIPS, vol. 36 (2023)"},{"key":"18_CR29","doi-asserted-by":"crossref","unstructured":"Liu, R., Wu, R., Van\u00a0Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: zero-shot one image to 3D object. In: ICCV, pp. 9298\u20139309 (2023)","DOI":"10.1109\/ICCV51070.2023.00853"},{"key":"18_CR30","unstructured":"Liu, Y., et al.: Syncdreamer: generating multiview-consistent images from a single-view image. In: ICLR (2024)"},{"key":"18_CR31","doi-asserted-by":"crossref","unstructured":"Liu, Y., Xie, M., Liu, H., Wong, T.T.: Text-guided texturing by synchronized multi-view diffusion. arXiv preprint arXiv:2311.12891 (2023)","DOI":"10.1145\/3680528.3687621"},{"key":"18_CR32","doi-asserted-by":"crossref","unstructured":"Long, X., et\u00a0al.: Wonder3D: single image to 3D using cross-domain diffusion. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00951"},{"key":"18_CR33","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019)"},{"key":"18_CR34","doi-asserted-by":"crossref","unstructured":"Lu, J., et al.: Context-aware textures. TOG 26(1), 3\u2013es (2007)","DOI":"10.1145\/1189762.1189765"},{"key":"18_CR35","doi-asserted-by":"crossref","unstructured":"Luo, Y., Liu, P., Yang, Y.: Kill two birds with one stone: Domain generalization for semantic segmentation via network pruning. In: IJCV, pp. 1\u201318 (2024)","DOI":"10.1007\/s11263-024-02194-5"},{"key":"18_CR36","doi-asserted-by":"crossref","unstructured":"Luo, Y., Liu, P., Zheng, L., Guan, T., Yu, J., Yang, Y.: Category-level adversarial adaptation for semantic segmentation using purified features. In: TPAMI, pp. 3940\u20133956 (2021)","DOI":"10.1109\/TPAMI.2021.3064379"},{"issue":"3","key":"18_CR37","doi-asserted-by":"publisher","first-page":"333","DOI":"10.1631\/FITEE.2300747","volume":"25","author":"Y Luo","year":"2024","unstructured":"Luo, Y., Yang, Y.: Large language model and domain-specific model collaboration for smart education. FITEE 25(3), 333\u2013341 (2024)","journal-title":"FITEE"},{"key":"18_CR38","unstructured":"Ma, S., Luo, Y., Yang, Y.: Reconstructing and simulating dynamic 3D objects with mesh-adsorbed gaussian splatting. arXiv preprint arXiv:2406.01593 (2024)"},{"key":"18_CR39","doi-asserted-by":"crossref","unstructured":"Melas-Kyriazi, L., Laina, I., Rupprecht, C., Vedaldi, A.: Realfusion: 360deg reconstruction of any object from a single image. In: CVPR, pp. 8446\u20138455 (2023)","DOI":"10.1109\/CVPR52729.2023.00816"},{"key":"18_CR40","unstructured":"Mertens, T., Kautz, J., Chen, J., Bekaert, P., Durand, F.: Texture transfer using geometry correlation. Rendering Tech. 273(10.2312), 273\u2013284 (2006)"},{"key":"18_CR41","doi-asserted-by":"crossref","unstructured":"Metzer, G., Richardson, E., Patashnik, O., Giryes, R., Cohen-Or, D.: Latent-nerf for shape-guided generation of 3D shapes and textures. In: CVPR, pp. 12663\u201312673 (2023)","DOI":"10.1109\/CVPR52729.2023.01218"},{"key":"18_CR42","unstructured":"Miao, Q., Luo, Y., Yang, Y.: Pla4D: pixel-level alignments for text-to-4D gaussian splatting. arXiv preprint arXiv:2405.19957 (2024)"},{"key":"18_CR43","doi-asserted-by":"crossref","unstructured":"Mildenhall, B., Srinivasan, P., Tancik, M., Barron, J., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58452-8_24"},{"key":"18_CR44","doi-asserted-by":"crossref","unstructured":"Min, Z., Luo, Y., Yang, W., Wang, Y., Yang, Y.: Entangled view-epipolar information aggregation for generalizable neural radiance fields. In: CVPR, pp. 4906\u20134916 (2024)","DOI":"10.1109\/CVPR52733.2024.00469"},{"key":"18_CR45","doi-asserted-by":"crossref","unstructured":"Mohammad\u00a0Khalid, N., Xie, T., Belilovsky, E., Popa, T.: Clip-mesh: generating textured meshes from text using pretrained image-text models. In: ACM SIGGRAPH, pp.\u00a01\u20138 (2022)","DOI":"10.1145\/3550469.3555392"},{"key":"18_CR46","doi-asserted-by":"crossref","unstructured":"Mou, C., et al.: T2i-adapter: learning adapters to dig out more controllable ability for text-to-image diffusion models. In: AAAI, vol.\u00a038, pp. 4296\u20134304 (2024)","DOI":"10.1609\/aaai.v38i5.28226"},{"issue":"4","key":"18_CR47","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3528223.3530127","volume":"41","author":"T M\u00fcller","year":"2022","unstructured":"M\u00fcller, T., Evans, A., Schied, C., Keller, A.: Instant neural graphics primitives with a multiresolution hash encoding. TOG 41(4), 1\u201315 (2022)","journal-title":"TOG"},{"key":"18_CR48","doi-asserted-by":"crossref","unstructured":"Pavllo, D., Kohler, J., Hofmann, T., Lucchi, A.: Learning generative models of textured 3D meshes from real-world images. In: ICCV, pp. 13879\u201313889 (2021)","DOI":"10.1109\/ICCV48922.2021.01362"},{"key":"18_CR49","unstructured":"Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: text-to-3D using 2D diffusion. In: ICLR (2023)"},{"key":"18_CR50","unstructured":"Qian, G., et al.: Magic123: one image to high-quality 3D object generation using both 2D and 3D diffusion priors. In: ICLR (2024)"},{"key":"18_CR51","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: ICML, pp. 8748\u20138763. PMLR (2021)"},{"key":"18_CR52","doi-asserted-by":"crossref","unstructured":"Richardson, E., Metzer, G., Alaluf, Y., Giryes, R., Cohen-Or, D.: Texture: text-guided texturing of 3D shapes. In: ACM SIGGRAPH, pp. 1\u201311 (2023)","DOI":"10.1145\/3588432.3591503"},{"key":"18_CR53","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"18_CR54","doi-asserted-by":"crossref","unstructured":"Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: fine tuning text-to-image diffusion models for subject-driven generation. In: CVPR, pp. 22500\u201322510 (2023)","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"18_CR55","unstructured":"Saharia, C., et\u00a0al.: Photorealistic text-to-image diffusion models with deep language understanding. In: NeurIPS, vol.\u00a035, pp. 36479\u201336494 (2022)"},{"key":"18_CR56","doi-asserted-by":"crossref","unstructured":"Sanghi, A., et al.: Clip-forge: towards zero-shot text-to-shape generation. In: CVPR, pp. 18603\u201318613 (2022)","DOI":"10.1109\/CVPR52688.2022.01805"},{"key":"18_CR57","doi-asserted-by":"crossref","unstructured":"Schonberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: CVPR, pp. 4104\u20134113 (2016)","DOI":"10.1109\/CVPR.2016.445"},{"key":"18_CR58","unstructured":"Shi, R., et al.: Zero123++: a single image to consistent multi-view diffusion base model. arXiv preprint arXiv:2310.15110 (2023)"},{"key":"18_CR59","unstructured":"Shi, Y., Wang, P., Ye, J., Mai, L., Li, K., Yang, X.: MVDream: multi-view diffusion for 3D generation. In: ICLR (2024)"},{"key":"18_CR60","doi-asserted-by":"crossref","unstructured":"Si, C., Huang, Z., Jiang, Y., Liu, Z.: Freeu: free lunch in diffusion U-Net. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00453"},{"key":"18_CR61","doi-asserted-by":"crossref","unstructured":"Siddiqui, Y., Thies, J., Ma, F., Shan, Q., Nie\u00dfner, M., Dai, A.: Texturify: generating textures on 3D shape surfaces. In: ECCV, pp. 72\u201388. Springer (2022)","DOI":"10.1007\/978-3-031-20062-5_5"},{"key":"18_CR62","unstructured":"Sun, J., et al.: Dreamcraft3D: hierarchical 3D generation with bootstrapped diffusion prior. In: ICLR (2024)"},{"key":"18_CR63","doi-asserted-by":"crossref","unstructured":"Tang, J., Chen, Z., Chen, X., Wang, T., Zeng, G., Liu, Z.: LGM: large multi-view gaussian model for high-resolution 3D content creation. arXiv preprint arXiv:2402.05054 (2024)","DOI":"10.1007\/978-3-031-73235-5_1"},{"key":"18_CR64","unstructured":"Tang, J., Ren, J., Zhou, H., Liu, Z., Zeng, G.: Dreamgaussian: generative gaussian splatting for efficient 3D content creation. In: ICLR (2024)"},{"key":"18_CR65","doi-asserted-by":"crossref","unstructured":"Tang, J., et al.: Make-it-3D: high-fidelity 3D creation from a single image with diffusion prior. In: ICCV, pp. 22819\u201322829 (2023)","DOI":"10.1109\/ICCV51070.2023.02086"},{"key":"18_CR66","unstructured":"Tochilkin, D., et al.: Triposr: fast 3D object reconstruction from a single image. arXiv preprint arXiv:2403.02151 (2024)"},{"key":"18_CR67","doi-asserted-by":"crossref","unstructured":"Wang, H., Du, X., Li, J., Yeh, R.A., Shakhnarovich, G.: Score jacobian chaining: lifting pretrained 2D diffusion models for 3d generation. In: CVPR, pp. 12619\u201312629 (2023)","DOI":"10.1109\/CVPR52729.2023.01214"},{"key":"18_CR68","unstructured":"Wang, P., Shi, Y.: Imagedream: image-prompt multi-view diffusion for 3D generation. arXiv preprint arXiv:2312.02201 (2023)"},{"key":"18_CR69","unstructured":"Wang, Z., et al.: Prolificdreamer: high-fidelity and diverse text-to-3D generation with variational score distillation. In: NeurIPS (2023)"},{"key":"18_CR70","doi-asserted-by":"crossref","unstructured":"Xu, D., Jiang, Y., Wang, P., Fan, Z., Wang, Y., Wang, Z.: Neurallift-360: lifting an in-the-wild 2D photo to a 3D object with 360deg views. In: CVPR, pp. 4479\u20134489 (2023)","DOI":"10.1109\/CVPR52729.2023.00435"},{"key":"18_CR71","unstructured":"Xu, J., Cheng, W., Gao, Y., Wang, X., Gao, S., Shan, Y.: Instantmesh: efficient 3D mesh generation from a single image with sparse-view large reconstruction models. arXiv preprint arXiv:2404.07191 (2024)"},{"key":"18_CR72","unstructured":"Ye, H., Zhang, J., Liu, S., Han, X., Yang, W.: IP-adapter: text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721 (2023)"},{"key":"18_CR73","unstructured":"Yeh, S.Y., Hsieh, Y.G., Gao, Z., Yang, B.B., Oh, G., Gong, Y.: Navigating text-to-image customization: from lycoris fine-tuning to model evaluation. In: ICLR (2024)"},{"key":"18_CR74","doi-asserted-by":"crossref","unstructured":"Yi, T., et al.: Gaussiandreamer: fast generation from text to 3D gaussians by bridging 2D and 3D diffusion models. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00649"},{"key":"18_CR75","doi-asserted-by":"crossref","unstructured":"Zeng, X., et al.: Paint3D: paint anything 3D with lighting-less texture diffusion models. In: CVPR, pp. 4252\u20134262 (2024)","DOI":"10.1109\/CVPR52733.2024.00407"},{"key":"18_CR76","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: ICCV, pp. 3836\u20133847 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"issue":"4","key":"18_CR77","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2601097.2601134","volume":"33","author":"QY Zhou","year":"2014","unstructured":"Zhou, Q.Y., Koltun, V.: Color map optimization for 3D reconstruction with consumer depth cameras. ToG 33(4), 1\u201310 (2014)","journal-title":"ToG"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-91907-7_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T15:57:03Z","timestamp":1757174223000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-91907-7_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031919060","9783031919077"],"references-count":77,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-91907-7_18","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"12 May 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}