{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T16:07:09Z","timestamp":1771344429008,"version":"3.50.1"},"reference-count":41,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2026,1,20]],"date-time":"2026-01-20T00:00:00Z","timestamp":1768867200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,20]],"date-time":"2026-01-20T00:00:00Z","timestamp":1768867200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"ANID Chile","award":["1251263"],"award-info":[{"award-number":["1251263"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1007\/s11263-025-02684-0","type":"journal-article","created":{"date-parts":[[2026,1,20]],"date-time":"2026-01-20T03:44:22Z","timestamp":1768880662000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Symmetria: A Synthetic Dataset for Learning in Point Clouds"],"prefix":"10.1007","volume":"134","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8766-3581","authenticated-orcid":false,"given":"Ivan","family":"Sipiran","sequence":"first","affiliation":[]},{"given":"Gustavo","family":"Santelices","sequence":"additional","affiliation":[]},{"given":"Lucas","family":"Oyarz\u00fan","sequence":"additional","affiliation":[]},{"given":"Andrea","family":"Ranieri","sequence":"additional","affiliation":[]},{"given":"Chiara","family":"Romanengo","sequence":"additional","affiliation":[]},{"given":"Silvia","family":"Biasotti","sequence":"additional","affiliation":[]},{"given":"Bianca","family":"Falcidieno","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,20]]},"reference":[{"key":"2684_CR1","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2014","unstructured":"Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M. S., Berg, A. C., & Fei-Fei, L. (2014). Imagenet large scale visual recognition challenge. International Journal of Computer Vision,115, 211\u2013252.","journal-title":"International Journal of Computer Vision"},{"key":"2684_CR2","unstructured":"Oquab, M., Darcet, T., Moutakanni, T., Vo, H., Szafraniec, M., Khalidov, V., Fernandez, P., Haziza, D., Massa, F., El-Nouby, A., Assran, M., Ballas, N., Galuba, W., Howes, R., Huang, P.-Y., Li, S.-W., Misra, I., Rabbat, M., Sharma, V., Synnaeve, G., Xu, H., Jegou, H., Mairal, J., Labatut, P., Joulin, A., & Bojanowski, P. DINOv2: Learning Robust Visual Features without Supervision (2024)"},{"key":"2684_CR3","unstructured":"Chang, A.X., Funkhouser, T.A., Guibas, L.J., Hanrahan, P., Huang, Q.-X., Li, Z., Savarese, S., Savva, M., Song, S., Su, H., Xiao, J., Yi, L., & Yu, F. Shapenet: An information-rich 3d model repository. ArXiv : abs\/1512.03012 (2015)"},{"key":"2684_CR4","doi-asserted-by":"publisher","unstructured":"Wu, Z., Song, S., Khosla, A., Yu, F., Zhang, L., Tang, X., & Xiao, J. (2015). 3d shapenets: A deep representation for volumetric shapes (pp. 1912\u20131920). Los Alamitos, CA, USA: IEEE Computer Society. https:\/\/doi.org\/10.1109\/CVPR.2015.7298801","DOI":"10.1109\/CVPR.2015.7298801"},{"key":"2684_CR5","doi-asserted-by":"publisher","unstructured":"Deitke, M., Schwenk, D., Salvador, J., Weihs, L., Michel, O., VanderBilt, E., Schmidt, L., Ehsanit, K., Kembhavi, A., & Farhadi, A.: Objaverse,. (2023). A universe of annotated 3d objects (pp. 13142\u201313153). Los Alamitos, CA, USA: IEEE Computer Society. https:\/\/doi.org\/10.1109\/CVPR52729.2023.01263","DOI":"10.1109\/CVPR52729.2023.01263"},{"key":"2684_CR6","doi-asserted-by":"publisher","first-page":"990","DOI":"10.1007\/s11263-021-01555-8","volume":"130","author":"H Kataoka","year":"2022","unstructured":"Kataoka, H., Okayasu, K., Matsumoto, A., Yamagata, E., Yamada, R., Inoue, N., Nakamura, A., & Satoh, Y. (2022). Pre-training without natural images. International Journal of Computer Vision,130, 990\u20131007.","journal-title":"International Journal of Computer Vision"},{"key":"2684_CR7","unstructured":"Sipiran, I., Romanengo, C., Falcidieno, B., Biasotti, S., Arvanitis, G., Chen, C., Fotis, V., He, J., Lv, X., Moustakas, K., et al. (2023). Shrec 2023: Detection of symmetries on 3d point clouds representing simple shapes (pp. 17\u2013237). The Eurographics Association."},{"issue":"6","key":"2684_CR8","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1111\/cgf.12010","volume":"32","author":"NJ Mitra","year":"2013","unstructured":"Mitra, N. J., Pauly, M., Wand, M., & Ceylan, D. (2013). Symmetry in 3d geometry: Extraction and applications. Computer Graphics Forum,32(6), 1\u201323. https:\/\/doi.org\/10.1111\/cgf.12010","journal-title":"Computer Graphics Forum"},{"key":"2684_CR9","doi-asserted-by":"crossref","unstructured":"Sipiran, I. Analysis of partial axial symmetry on 3d surfaces and its application in the restoration of cultural heritage objects. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) Workshops (2017)","DOI":"10.1109\/ICCVW.2017.345"},{"key":"2684_CR10","doi-asserted-by":"crossref","unstructured":"Mitra, N. J., Guibas, L. J., & Pauly, M. (2006). Partial and approximate symmetry detection for 3d geometry. SIGGRAPH \u201906ACM SIGGRAPH 2006 Papers (pp. 560\u2013568). New York, NY, USA: Association for Computing Machinery.","DOI":"10.1145\/1179352.1141924"},{"key":"2684_CR11","unstructured":"Simari, P., Kalogerakis, E., & Singh, K. (2006). Folding meshes: hierarchical mesh segmentation based on planar symmetry (Vol. \u201906, pp. 111\u2013119). Goslar, DEU: Eurographics Association."},{"key":"2684_CR12","doi-asserted-by":"publisher","unstructured":"Podolak, J., Shilane, P., Golovinskiy, A., Rusinkiewicz, S., & Funkhouser, T. A planar-reflective symmetry transform for 3d shapes. In: ACM SIGGRAPH 2006 Papers. SIGGRAPH \u201906, pp. 549\u2013559. Association for Computing Machinery, New York, NY, USA (2006). https:\/\/doi.org\/10.1145\/1179352.1141923","DOI":"10.1145\/1179352.1141923"},{"issue":"09","key":"2684_CR13","doi-asserted-by":"publisher","first-page":"11321","DOI":"10.1109\/TPAMI.2023.3262786","volume":"45","author":"A Xiao","year":"2023","unstructured":"Xiao, A., Huang, J., Guan, D., Zhang, X., Lu, S., & Shao, L. (2023). Unsupervised point cloud representation learning with deep neural networks: A survey. IEEE Transactions on Pattern Analysis & Machine Intelligence,45(09), 11321\u201311339. https:\/\/doi.org\/10.1109\/TPAMI.2023.3262786","journal-title":"IEEE Transactions on Pattern Analysis & Machine Intelligence"},{"key":"2684_CR14","unstructured":"Zhou, Q., & Jacobson, A. Thingi10K: A Dataset of 10,000 3D-Printing Models (2016)"},{"key":"2684_CR15","doi-asserted-by":"publisher","unstructured":"Koch, S., Matveev, A., Jiang, Z., Williams, F., Artemov, A., Burnaev, E., Alexa, M., Zorin, D., & Panozzo, D. (2019). Abc: A big cad model dataset for geometric deep learning. (pp. 9593\u20139603). https:\/\/doi.org\/10.1109\/CVPR.2019.00983","DOI":"10.1109\/CVPR.2019.00983"},{"key":"2684_CR16","doi-asserted-by":"publisher","unstructured":"Uy, M., Pham, Q., Hua, B., Nguyen, T., & Yeung, S. (2019). Revisiting point cloud classification: A new benchmark dataset and classification model on real-world data (pp. 1588\u20131597). Los Alamitos, CA, USA: IEEE Computer Society. https:\/\/doi.org\/10.1109\/ICCV.2019.00167","DOI":"10.1109\/ICCV.2019.00167"},{"key":"2684_CR17","doi-asserted-by":"crossref","unstructured":"Ching, T., Himmelstein, D.S., Beaulieu-Jones, B.K., Kalinin, A.A., Do, B.T., Way, G.P., Ferrero, E., Agapow, P.-M., Zietz, M., Hoffman, M.M., Xie, W., Rosen, G.L., Lengerich, B.J., Israeli, J., Lanchantin, J., Woloszynek, S., Carpenter, A.E., Shrikumar, A., Xu, J., Cofer, E.M., Lavender, C.A., Turaga, S.C., Alexandari, A.M., Lu, Z., Harris, D.J., DeCaprio, D., Qi, Y., Kundaje, A., Peng, Y., Wiley, L.K., Segler, M.H.S., Boca, S.M., Swamidass, S.J., Huang, A., Gitter, A., & Greene, C.S. Opportunities and obstacles for deep learning in biology and medicine. Journal of the Royal Society Interface 15 (2017)","DOI":"10.1101\/142760"},{"key":"2684_CR18","doi-asserted-by":"publisher","unstructured":"Helmrich, D. N., Bauer, F. M., Giraud, M., Schnepf, A., G\u00f6bbert, J. H., Scharr, H., Hvannberg, E., & Riedel, M. ((2023) . A scalable pipeline to create synthetic datasets from functional\u2013structural plant models for deep learning. silico Plants,6(1), 022. https:\/\/doi.org\/10.1093\/insilicoplants\/diad022.","DOI":"10.1093\/insilicoplants\/diad022."},{"key":"2684_CR19","unstructured":"Wu, Y., Wu, Y., Gkioxari, G., & Tian, Y. Building generalizable agents with a realistic and rich 3d environment. arXiv preprint arXiv:1801.02209 (2018)"},{"key":"2684_CR20","unstructured":"Maciek\u00a0Chociej, L. Peter Welinder: Orrb: Openai remote rendering backend. In: Eprint arXiv (2019). arxiv:1906.11633"},{"key":"2684_CR21","doi-asserted-by":"crossref","unstructured":"Roberts, M., Ramapuram, J., Ranjan, A., Kumar, A., Bautista, M. A., Paczan, N., Webb, R., & Susskind, J.M.: Hypersim,. (2021). A photorealistic synthetic dataset for holistic indoor scene understanding.","DOI":"10.1109\/ICCV48922.2021.01073"},{"key":"2684_CR22","unstructured":"Ko, C.-Y., Chen, P.-Y., Das, P., Chuang, Y.-S., & Daniel, L. (2023). On robustness-accuracy characterization of large language models using synthetic datasets."},{"key":"2684_CR23","doi-asserted-by":"crossref","unstructured":"Mishra, S., Panda, R., Phoo, C. P., Chen, C.-F., Karlinsky, L., Saenko, K., Saligrama, V., & Feris, R. S. (2021). Task2sim: Towards effective pre-training and transfer from synthetic data. (pp. 9184\u20139194)","DOI":"10.1109\/CVPR52688.2022.00898"},{"key":"2684_CR24","unstructured":"Yang, L., Xu, X., Kang, B., Shi, Y., & Zhao, H. Freemask: Synthetic images with dense annotations make stronger segmentation models. In: NeurIPS (2023)"},{"key":"2684_CR25","unstructured":"Kim, Y.-w., Mishra, S., Jin, S., Panda, R., Kuehne, H., Karlinsky, L., Saligrama, V., Saenko, K., Oliva, A., & Feris, R. How transferable are video representations based on synthetic data? In: Koyejo, S., Mohamed, S., Agarwal, A., Belgrave, D., Cho, K., Oh, A. (eds.) Advances in Neural Information Processing Systems, vol. 35, pp. 35710\u201335723. Curran Associates, Inc., ??? (2022)"},{"key":"2684_CR26","doi-asserted-by":"crossref","unstructured":"Herzig, R., Abramovich, O., Ben Avraham, E., Arbelle, A., Karlinsky, L., Shamir, A., Darrell, T., & Globerson, A. (2024). Promptonomyvit: Multi-task prompt learning improves video transformers using synthetic scene data. (pp. 6803\u20136815)","DOI":"10.1109\/WACV57701.2024.00666"},{"key":"2684_CR27","unstructured":"Mishra, S., Castillo, C.D., Wang, H., Saenko, K., & Saligrama, V. SynCDR : Training Cross Domain Retrieval Models with Synthetic Data (2024)"},{"key":"2684_CR28","doi-asserted-by":"crossref","unstructured":"Cascante-Bonilla, P., Shehada, K., Smith, J. S., Doveh, S., Kim, D., Panda, R., Varol, G., Oliva, A., Ordonez, V., Feris, R., & Karlinsky, L. (2023). Going beyond nouns with vision & language models using synthetic data. (pp. 20155\u201320165)","DOI":"10.1109\/ICCV51070.2023.01844"},{"key":"2684_CR29","doi-asserted-by":"publisher","unstructured":"Sariyildiz, M., Alahari, K., Larlus, D., & Kalantidis, Y. (2023). Fake it till you make it: Learning transferable representations from synthetic imagenet clones (pp. 8011\u20138021). Los Alamitos, CA, USA: IEEE Computer Society. https:\/\/doi.org\/10.1109\/CVPR52729.2023.00774","DOI":"10.1109\/CVPR52729.2023.00774"},{"key":"2684_CR30","doi-asserted-by":"crossref","unstructured":"Kataoka, H., Hayamizu, R., Yamada, R., Nakashima, K., Takashima, S., Zhang, X., Martinez-Noriega, E. J., Inoue, N., & Yokota, R. (2022). Replacing labeled real-image datasets with auto-generated contours. (pp. 21200\u201321209)","DOI":"10.1109\/CVPR52688.2022.02055"},{"issue":"2","key":"2684_CR31","doi-asserted-by":"publisher","first-page":"1990","DOI":"10.1609\/aaai.v36i2.20094","volume":"36","author":"K Nakashima","year":"2022","unstructured":"Nakashima, K., Kataoka, H., Matsumoto, A., Iwata, K., Inoue, N., & Satoh, Y. (2022). Can vision transformers learn without natural images? Proceedings of the AAAI Conference on Artificial Intelligence,36(2), 1990\u20131998. https:\/\/doi.org\/10.1609\/aaai.v36i2.20094","journal-title":"Proceedings of the AAAI Conference on Artificial Intelligence"},{"key":"2684_CR32","doi-asserted-by":"publisher","unstructured":"Takashima, S., Hayamizu, R., Inoue, N., Kataoka, H., & Yokota, R. (2023). Visual atoms: Pre-training vision transformers with sinusoidal waves (pp. 18579\u201318588). Los Alamitos, CA, USA: IEEE Computer Society. https:\/\/doi.org\/10.1109\/CVPR52729.2023.01782","DOI":"10.1109\/CVPR52729.2023.01782"},{"key":"2684_CR33","doi-asserted-by":"publisher","unstructured":"Nakamura, R., Kataoka, H., Takashima, S., Noriega, E. M., Yokota, R., & Inoue, N. (2023). Pre-training vision transformers with very limited synthesized images (pp. 20303\u201320312). Los Alamitos, CA, USA: IEEE Computer Society. https:\/\/doi.org\/10.1109\/ICCV51070.2023.01862","DOI":"10.1109\/ICCV51070.2023.01862"},{"key":"2684_CR34","doi-asserted-by":"crossref","unstructured":"Pang, Y., Wang, W., Tay, F.E., Liu, W., Tian, Y., & Yuan, L. Masked autoencoders for point cloud self-supervised learning. In: Computer Vision\u2013ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part II, pp. 604\u2013621 (2022). Springer","DOI":"10.1007\/978-3-031-20086-1_35"},{"key":"2684_CR35","unstructured":"Chen, G., Wang, M., Yang, Y., Yu, K., Yuan, L., & Yue, Y. Pointgpt: Auto-regressively generative pre-training from point clouds. Advances in Neural Information Processing Systems 36 (2024)"},{"key":"2684_CR36","doi-asserted-by":"publisher","unstructured":"Liu, Y., Fan, B., Xiang, S., & Pan, C. (2019). Relation-shape convolutional neural network for point cloud analysis (pp. 8887\u20138896). Los Alamitos, CA, USA: IEEE Computer Society. https:\/\/doi.org\/10.1109\/CVPR.2019.00910","DOI":"10.1109\/CVPR.2019.00910"},{"key":"2684_CR37","doi-asserted-by":"publisher","unstructured":"Yu, X., Tang, L., Rao, Y., Huang, T., Zhou, J., & Lu, J. (2022). Point-bert: Pre-training 3d point cloud transformers with masked point modeling (pp. 19291\u201319300). Los Alamitos, CA, USA: IEEE Computer Society. https:\/\/doi.org\/10.1109\/CVPR52688.2022.01871","DOI":"10.1109\/CVPR52688.2022.01871"},{"key":"2684_CR38","doi-asserted-by":"publisher","unstructured":"Yi, L., Kim, V.G., Ceylan, D., Shen, I.-C., Yan, M., Su, H., Lu, C., Huang, Q., Sheffer, A., & Guibas, L. A scalable active framework for region annotation in 3d shape collections. ACM Trans. Graph. 35(6) (2016) https:\/\/doi.org\/10.1145\/2980179.2980238","DOI":"10.1145\/2980179.2980238"},{"key":"2684_CR39","doi-asserted-by":"publisher","unstructured":"Charles, R. Q., Su, H., Kaichun, M., & Guibas, L.J.: Pointnet,. (2017). Deep learning on point sets for 3d classification and segmentation. (pp. 77\u201385). https:\/\/doi.org\/10.1109\/CVPR.2017.16","DOI":"10.1109\/CVPR.2017.16"},{"key":"2684_CR40","doi-asserted-by":"crossref","unstructured":"Shi, Y., Huang, J., Zhang, H., Xu, X., Rusinkiewicz, S., & Xu, K. Symmetrynet: Learning to predict reflectional and rotational symmetries of 3d shapes from single-view rgb-d images. ACM Trans. Graph. 39 (2020)","DOI":"10.1145\/3414685.3417775"},{"key":"2684_CR41","doi-asserted-by":"crossref","unstructured":"Gao, L., Zhang, L.-X., Meng, H.-Y., Ren, Y.-H., Lai, Y.-K., & Kobbelt, L. Prs-net: Planar reflective symmetry detection net for 3d models. IEEE Transactions on Visualization and Computer Graphics, 1\u20131 (2020)","DOI":"10.1109\/TVCG.2020.3003823"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02684-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-025-02684-0","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02684-0.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,17]],"date-time":"2026-02-17T15:20:54Z","timestamp":1771341654000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-025-02684-0"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,1,20]]},"references-count":41,"journal-issue":{"issue":"2","published-print":{"date-parts":[[2026,2]]}},"alternative-id":["2684"],"URL":"https:\/\/doi.org\/10.1007\/s11263-025-02684-0","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,1,20]]},"assertion":[{"value":"1 October 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 September 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 January 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}],"article-number":"70"}}