{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T19:24:07Z","timestamp":1774121047222,"version":"3.50.1"},"reference-count":58,"publisher":"Springer Science and Business Media LLC","issue":"10","license":[{"start":{"date-parts":[[2024,5,16]],"date-time":"2024-05-16T00:00:00Z","timestamp":1715817600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,5,16]],"date-time":"2024-05-16T00:00:00Z","timestamp":1715817600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62302385"],"award-info":[{"award-number":["62302385"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1007\/s11263-024-02097-5","type":"journal-article","created":{"date-parts":[[2024,5,16]],"date-time":"2024-05-16T13:01:56Z","timestamp":1715864516000},"page":"4456-4472","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":18,"title":["Instant3D: Instant Text-to-3D Generation"],"prefix":"10.1007","volume":"132","author":[{"given":"Ming","family":"Li","sequence":"first","affiliation":[]},{"given":"Pan","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Jia-Wei","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Jussi","family":"Keppo","sequence":"additional","affiliation":[]},{"given":"Min","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Shuicheng","family":"Yan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9305-5830","authenticated-orcid":false,"given":"Xiangyu","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,5,16]]},"reference":[{"key":"2097_CR1","unstructured":"Arjovsky, M., & Bottou, L. (2016). Towards principled methods for training generative adversarial networks. In International conference on learning representations."},{"key":"2097_CR2","unstructured":"Armandpour, M., Zheng, H., Sadeghian, A., Sadeghian, A., & Zhou, M. (2023). Re-imagine the negative prompt algorithm: Transform 2d diffusion into 3d, alleviate janus problem and beyond. arXiv:2304.04968"},{"key":"2097_CR3","first-page":"1877","volume":"33","author":"T Brown","year":"2020","unstructured":"Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J. D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. (2020). Language models are few-shot learners. Advances in neural information processing systems, 33, 1877\u20131901.","journal-title":"Advances in neural information processing systems"},{"key":"2097_CR4","doi-asserted-by":"crossref","unstructured":"Chan, E. R., Lin, C. Z., Chan, M. A., Nagano, K., Pan, B., De\u00a0Mello, S., Gallo, O., Guibas, L. J., Tremblay, J., Khamis, S., et\u00a0al. (2022). Efficient geometry-aware 3d generative adversarial networks. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 16123\u201316133).","DOI":"10.1109\/CVPR52688.2022.01565"},{"key":"2097_CR5","doi-asserted-by":"crossref","unstructured":"Deitke, M., Schwenk, D., Salvador, J., Weihs, L., Michel, O., VanderBilt, E., Schmidt, L., Ehsani, K., Kembhavi, A., & Farhadi, A. (2023). Objaverse: A universe of annotated 3d objects. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 13142\u201313153).","DOI":"10.1109\/CVPR52729.2023.01263"},{"key":"2097_CR6","first-page":"19822","volume":"34","author":"M Ding","year":"2021","unstructured":"Ding, M., Yang, Z., Hong, W., Zheng, W., Zhou, C., Yin, D., Lin, J., Zou, X., Shao, Z., Yang, H., et al. (2021). Cogview: Mastering text-to-image generation via transformers. Advances in Neural Information Processing Systems, 34, 19822\u201319835.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2097_CR7","doi-asserted-by":"crossref","unstructured":"Esser, P., Rombach, R., & Ommer, B. (2021). Taming transformers for high-resolution image synthesis. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (CVPR) (pp. 12873\u201312883).","DOI":"10.1109\/CVPR46437.2021.01268"},{"issue":"11","key":"2097_CR8","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1145\/3422622","volume":"63","author":"I Goodfellow","year":"2020","unstructured":"Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., & Bengio, Y. (2020). Generative adversarial networks. Communications of the ACM, 63(11), 139\u2013144.","journal-title":"Communications of the ACM"},{"key":"2097_CR9","unstructured":"Ho, J., & Salimans, T. (2022). Classifier-free diffusion guidance. arXiv:2207.12598"},{"key":"2097_CR10","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., & Abbeel, P. (2020). Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems, 33, 6840\u20136851.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2097_CR11","doi-asserted-by":"crossref","unstructured":"Jain, A., Mildenhall, B., Barron, J. T., Abbeel, P., & Poole, B. (2022). Zero-shot text-guided object generation with dream fields. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (CVPR) (pp. 867\u2013876).","DOI":"10.1109\/CVPR52688.2022.00094"},{"key":"2097_CR12","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., & Aila, T. (2019) A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 4401\u20134410).","DOI":"10.1109\/CVPR.2019.00453"},{"key":"2097_CR13","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., & Aila, T. (2020) Analyzing and improving the image quality of StyleGAN. In CVPR.","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"2097_CR14","unstructured":"Karras, T., Aittala, M., Laine, S., H\u00e4rk\u00f6nen, E., Hellsten, J., Lehtinen, J., & Aila, T. (2021). Alias-free generative adversarial networks. In NeurIPS."},{"key":"2097_CR15","doi-asserted-by":"crossref","unstructured":"Khalid, N. M., Xie, T., Belilovsky, E., & Tiberiu, P. (2022). Clip-mesh: Generating textured meshes from text using pretrained image-text models. In SIGGRAPH Asia 2022 conference papers.","DOI":"10.1145\/3550469.3555392"},{"key":"2097_CR16","unstructured":"Kingma, D. P., & Ba, J. (2014). Adam: A method for stochastic optimization. arXiv:1412.6980"},{"key":"2097_CR17","unstructured":"Lee, H. H., & Chang, A. X. (2022). Understanding pure clip guidance for voxel grid nerf models. arXiv:2209.15172"},{"key":"2097_CR18","unstructured":"Li, J., Tan, H., Zhang, K., Xu, Z., Luan, F., Xu, Y., Hong, Y., Sunkavalli, K., Shakhnarovich, G., & Bi, S. (2023a). Instant3d: Fast text-to-3d with sparse-view generation and large reconstruction model. arXiv:2311.06214"},{"key":"2097_CR19","unstructured":"Li, W., Chen, R., Chen, X., & Tan, P. (2023b). Sweetdreamer: Aligning geometric priors in 2d diffusion for consistent text-to-3d. arXiv:2310.02596"},{"key":"2097_CR20","unstructured":"Liu, M., Xu, C., Jin, H., Chen, L., Xu, Z., Su, H., et\u00a0al. (2023a) One-2-3-45: Any single image to 3d mesh in 45 seconds without per-shape optimization. arXiv:2306.16928"},{"key":"2097_CR21","doi-asserted-by":"crossref","unstructured":"Liu, R., Wu, R., Van\u00a0Hoorick, B., Tokmakov, P., Zakharov, S., & Vondrick, C. (2023b). Zero-1-to-3: Zero-shot one image to 3d object. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 9298\u20139309).","DOI":"10.1109\/ICCV51070.2023.00853"},{"key":"2097_CR22","unstructured":"Liu, Y., Lin, C., Zeng, Z., Long, X., Liu, L., Komura, T., & Wang, W. (2023c). Syncdreamer: Generating multiview-consistent images from a single-view image. arXiv:2309.03453"},{"key":"2097_CR23","doi-asserted-by":"crossref","unstructured":"Liu, Z., Dai, P., Li, R., Qi, X., & Fu, C. W. (2022). Iss: Image as stetting stone for text-guided 3d shape generation. arXiv:2209.04145","DOI":"10.1109\/TPAMI.2023.3321329"},{"key":"2097_CR24","doi-asserted-by":"crossref","unstructured":"Long, X., Guo, Y. C., Lin, C., Liu, Y., Dou, Z., Liu, L., Ma, Y., Zhang, S. H., Habermann, M., Theobalt, C., et\u00a0al. (2023). Wonder3d: Single image to 3d using cross-domain diffusion. arXiv:2310.15008","DOI":"10.1109\/CVPR52733.2024.00951"},{"key":"2097_CR25","doi-asserted-by":"crossref","unstructured":"Lorraine, J., Xie, K., Zeng, X., Lin, C. H., Takikawa, T., Sharp, N., Lin, T. Y., Liu, M. Y., Fidler, S., & Lucas, J. (2023) Att3d: Amortized text-to-3d object synthesis. arXiv:2306.07349","DOI":"10.1109\/ICCV51070.2023.01645"},{"key":"2097_CR26","doi-asserted-by":"crossref","unstructured":"Metzer, G., Richardson, E., Patashnik, O., Giryes, R., & Cohen-Or, D. (2023). Latent-nerf for shape-guided generation of 3d shapes and textures. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition.","DOI":"10.1109\/CVPR52729.2023.01218"},{"key":"2097_CR27","doi-asserted-by":"crossref","unstructured":"Mildenhall, B., Srinivasan, P. P., Tancik, M., Barron, J. T., Ramamoorthi, R., & Ng, R. (2020) Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV.","DOI":"10.1007\/978-3-030-58452-8_24"},{"issue":"4","key":"2097_CR28","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3528223.3530127","volume":"41","author":"T M\u00fcller","year":"2022","unstructured":"M\u00fcller, T., Evans, A., Schied, C., & Keller, A. (2022). Instant neural graphics primitives with a multiresolution hash encoding. ACM Transactions on Graphics (ToG), 41(4), 1\u201315.","journal-title":"ACM Transactions on Graphics (ToG)"},{"key":"2097_CR29","unstructured":"Nichol, A., Dhariwal, P., Ramesh, A., Shyam, P., Mishkin, P., McGrew, B., Sutskever, I., & Chen, M. (2021) Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv:2112.10741"},{"key":"2097_CR30","unstructured":"Nichol, A., Jun, H., Dhariwal, P., Mishkin, P., & Chen, M. (2022). Point-e: A system for generating 3d point clouds from complex prompts. arXiv:2212.08751"},{"key":"2097_CR31","unstructured":"Poole, B., Jain, A., Barron, J. T., & Mildenhall, B. (2022). Dreamfusion: Text-to-3d using 2d diffusion. arXiv:2209.14988"},{"key":"2097_CR32","doi-asserted-by":"crossref","unstructured":"Qiao, T., Zhang, J., Xu, D., & Tao, D. (2019). Mirrorgan: Learning text-to-image generation by redescription. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (CVPR).","DOI":"10.1109\/CVPR.2019.00160"},{"key":"2097_CR33","unstructured":"Radford, A., Kim, J. W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et\u00a0al. (2021). Learning transferable visual models from natural language supervision. In International conference on machine learning, PMLR (pp. 8748\u20138763)."},{"key":"2097_CR34","unstructured":"Ramesh, A., Pavlov, M., Goh, G., Gray, S., Voss, C., Radford, A., Chen, M., & Sutskever, I. (2021). Zero-shot text-to-image generation. In International conference on machine learning, PMLR (pp. 8821\u20138831)."},{"key":"2097_CR35","unstructured":"Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., & Chen, M. (2022) Hierarchical text-conditional image generation with clip latents. arXiv:2204.06125"},{"key":"2097_CR36","unstructured":"Reed, S., Akata, Z., Yan, X., Logeswaran, L., Schiele, B., & Lee, H. (2016). Generative adversarial text to image synthesis. In International conference on machine learning, PMLR (pp. 1060\u20131069)."},{"key":"2097_CR37","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., & Ommer, B. (2022) High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 10684\u201310695).","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"2097_CR38","doi-asserted-by":"crossref","unstructured":"Ruan, S., Zhang, Y., Zhang, K., Fan, Y., Tang, F., Liu, Q., & Chen, E. (2021) Dae-gan: Dynamic aspect-aware gan for text-to-image synthesis. In Proceedings of the IEEE\/CVF international conference on computer vision (ICCV) (pp. 13960\u201313969).","DOI":"10.1109\/ICCV48922.2021.01370"},{"key":"2097_CR39","doi-asserted-by":"crossref","unstructured":"Saharia, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E., Ghasemipour, S. K. S., Ayan, B. K., Mahdavi, S. S., Lopes, R. G., et\u00a0al. (2022). Photorealistic text-to-image diffusion models with deep language understanding. arXiv:2205.11487","DOI":"10.1145\/3528233.3530757"},{"key":"2097_CR40","doi-asserted-by":"crossref","unstructured":"Sanghi, A., Chu, H., Lambourne, J. G., Wang, Y., Cheng, C. Y., Fumero, M., Malekshan, K. R. (2022). Clip-forge: Towards zero-shot text-to-shape generation. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (CVPR) (pp. 18603\u201318613).","DOI":"10.1109\/CVPR52688.2022.01805"},{"key":"2097_CR41","unstructured":"Schuhmann, C., Vencu, R., Beaumont, R., Kaczmarczyk, R., Mullis, C., Katta, A., Coombes, T., Jitsev, J., & Komatsuzaki, A. (2021). Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv:2111.02114"},{"key":"2097_CR42","unstructured":"Schuhmann, C., Beaumont, R., Vencu, R., Gordon, C., Wightman, R., Cherti, M., Coombes, T., Katta, A., Mullis, C., Wortsman, M., et\u00a0al. (2022). Laion-5b: An open large-scale dataset for training next generation image-text models. arXiv:2210.08402"},{"key":"2097_CR43","first-page":"6087","volume":"34","author":"T Shen","year":"2021","unstructured":"Shen, T., Gao, J., Yin, K., Liu, M. Y., & Fidler, S. (2021). Deep marching tetrahedra: A hybrid representation for high-resolution 3d shape synthesis. Advances in Neural Information Processing Systems, 34, 6087\u20136101.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"2097_CR44","unstructured":"Shi, Y., Wang, P., Ye, J., Long, M., Li, K., Yang, X. (2023). Mvdream: Multi-view diffusion for 3d generation. arXiv:2308.16512"},{"key":"2097_CR45","unstructured":"Song, J., Meng, C., & Ermon, S. (2020). Denoising diffusion implicit models. arXiv:2010.02502"},{"key":"2097_CR46","unstructured":"Song, Y., Sohl-Dickstein, J., Kingma, D. P., & Kumar, A., Ermon, S., & Poole, B. (2021). Score-based generative modeling through stochastic differential equations. 2011.13456"},{"key":"2097_CR47","doi-asserted-by":"crossref","unstructured":"Sun, C., Sun, M., & Chen, H. T. (2022). Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (CVPR) (pp. 5459\u20135469).","DOI":"10.1109\/CVPR52688.2022.00538"},{"key":"2097_CR48","doi-asserted-by":"crossref","unstructured":"Tan, H., Liu, X., Li, X., Zhang, Y., & Yin, B. (2019) Semantics-enhanced adversarial nets for text-to-image synthesis. In Proceedings of the IEEE\/CVF international conference on computer vision (ICCV).","DOI":"10.1109\/ICCV.2019.01060"},{"key":"2097_CR49","unstructured":"Tang, S., Zhang, F., Chen, J., Wang, P., & Furukawa, Y. (2023). Mvdiffusion: Enabling holistic multi-view image generation with correspondence-aware diffusion. CoRR abs\/2307.01097"},{"key":"2097_CR50","doi-asserted-by":"crossref","unstructured":"Tsalicoglou, C., Manhardt, F., Tonioni, A., Niemeyer, M., & Tombari, F. (2023). Textmesh: Generation of realistic 3d meshes from text prompts. arXiv:2304.12439","DOI":"10.1109\/3DV62453.2024.00154"},{"key":"2097_CR51","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, \u0141., & Polosukhin, I. (2017). Attention is all you need. In Advances in neural information processing systems 30."},{"key":"2097_CR52","doi-asserted-by":"crossref","unstructured":"Wang, H., Du, X., Li, J., Yeh, R. A., & Shakhnarovich, G. (2022a). Score Jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. arXiv:2212.00774","DOI":"10.1109\/CVPR52729.2023.01214"},{"key":"2097_CR53","doi-asserted-by":"crossref","unstructured":"Wang, H., Du, X., Li, J., Yeh, R. A., Shakhnarovich, G. (2023a). Score Jacobian chaining: Lifting pretrained 2d diffusion models for 3d generation. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 12619\u201312629).","DOI":"10.1109\/CVPR52729.2023.01214"},{"key":"2097_CR54","unstructured":"Wang, Z., Liu, W., He, Q., Wu, X., & Yi, Z. (2022b). Clip-gen: Language-free training of a text-to-image generator with clip. arXiv:2203.00386"},{"key":"2097_CR55","unstructured":"Wang, Z., Lu, C., Wang, Y., Bao, F., Li, C., Su, H., & Zhu, J. (2023b). Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. arXiv:2305.16213"},{"key":"2097_CR56","doi-asserted-by":"crossref","unstructured":"Xu, T., Zhang, P., Huang, Q., Zhang, H., Gan, Z., Huang, X., & He, X. (2018). Attngan: Fine-grained text to image generation with attentional generative adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR).","DOI":"10.1109\/CVPR.2018.00143"},{"key":"2097_CR57","unstructured":"Yi, H., Zheng, Z., Xu, X., & Chua, T. S. (2023). Progressive text-to-3d generation for automatic 3d prototyping. arXiv:2309.14600"},{"key":"2097_CR58","doi-asserted-by":"crossref","unstructured":"Zhang, H., Xu, T., Li, H., Zhang, S., Wang, X., Huang, X., & Metaxas, D. N. (2017). Stackgan: Text to photo-realistic image synthesis with stacked generative adversarial networks. In Proceedings of the IEEE international conference on computer vision (ICCV).","DOI":"10.1109\/ICCV.2017.629"}],"updated-by":[{"DOI":"10.1007\/s11263-024-02193-6","type":"correction","label":"Correction","source":"publisher","updated":{"date-parts":[[2024,7,31]],"date-time":"2024-07-31T00:00:00Z","timestamp":1722384000000}}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-024-02097-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-024-02097-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-024-02097-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,4]],"date-time":"2024-10-04T06:20:22Z","timestamp":1728022822000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-024-02097-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,16]]},"references-count":58,"journal-issue":{"issue":"10","published-print":{"date-parts":[[2024,10]]}},"alternative-id":["2097"],"URL":"https:\/\/doi.org\/10.1007\/s11263-024-02097-5","relation":{"correction":[{"id-type":"doi","id":"10.1007\/s11263-024-02193-6","asserted-by":"object"}]},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,5,16]]},"assertion":[{"value":"5 October 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 April 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 May 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"31 July 2024","order":4,"name":"change_date","label":"Change Date","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"Correction","order":5,"name":"change_type","label":"Change Type","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"A Correction to this paper has been published:","order":6,"name":"change_details","label":"Change Details","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"https:\/\/doi.org\/10.1007\/s11263-024-02193-6","URL":"https:\/\/doi.org\/10.1007\/s11263-024-02193-6","order":7,"name":"change_details","label":"Change Details","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Delcarations"}},{"value":"Ming Li is funded by the ISEP-IDS PhD scholarship in NUS. Xiangyu Xu is supported by the NSFC Grant #62302385 and the computational resources provided by the HPC platform of Xi\u2019an Jiaotong University. Pan Zhou is supported by the Singapore Ministry of Education (MOE) Academic Research Fund (AcRF) Tier 1 grant.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interests"}},{"value":"The authors declare that the data supporting the findings of this study are available within the paper and its supplementary information files.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Data Availability."}}]}}