{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T11:39:07Z","timestamp":1742989147304,"version":"3.40.3"},"publisher-location":"Cham","reference-count":31,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031442063"},{"type":"electronic","value":"9783031442070"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-44207-0_29","type":"book-chapter","created":{"date-parts":[[2023,9,21]],"date-time":"2023-09-21T14:03:51Z","timestamp":1695305031000},"page":"344-355","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Enhanced Point Cloud Interpretation via\u00a0Style Fusion and\u00a0Contrastive Learning in\u00a0Advanced 3D Data Analysis"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-2727-3607","authenticated-orcid":false,"given":"Ruimin","family":"Zhou","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2106-7226","authenticated-orcid":false,"given":"Chung-Ming","family":"Own","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,9,22]]},"reference":[{"key":"29_CR1","doi-asserted-by":"crossref","unstructured":"Abhinav, U., Alpana, D., Kuriakose, S.-M., Mahato, D.: 3DSTNet: neural 3D shape style transfer. In: 2022 IEEE International Conference on Multimedia and Expo Workshops (ICMEW), pp. 1\u20136. IEEE (2022)","DOI":"10.1109\/ICMEW56448.2022.9859470"},{"key":"29_CR2","doi-asserted-by":"crossref","unstructured":"Afham, M., Dissanayake, I., Dissanayake, D., Dharmasiri, A., Thilakarathna, K., Rodrigo, R.: CrossPoint: self-supervised cross-modal contrastive learning for 3D point cloud understanding. In: The IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9892\u20139902. IEEE (2022)","DOI":"10.1109\/CVPR52688.2022.00967"},{"key":"29_CR3","unstructured":"Chang, A.-X., et al.: ShapeNet: an information-rich 3D model repository. CoRR abs\/1512.03012 (2015). arxiv.org\/abs\/1512.03012"},{"key":"29_CR4","unstructured":"Chen, T., Kornblith, S., Norouzi, M., Geoffrey, H.: A simple framework for contrastive learning of visual representations. In: The 37th International Conference on Machine Learning, pp. 1597\u20131607 (2020)"},{"key":"29_CR5","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.-J., Kai, L., Li, F.-F.: ImageNet: a large-scale hierarchical image database. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"issue":"2","key":"29_CR6","doi-asserted-by":"publisher","first-page":"187","DOI":"10.1007\/s41095-021-0229-5","volume":"7","author":"M Guo","year":"2021","unstructured":"Guo, M., Cai, J., Liu, Z., Mu, T., Martin, R., Hu, S.: PCT: point cloud transformer. Comput. Vis. Media 7(2), 187\u2013199 (2021)","journal-title":"Comput. Vis. Media"},{"key":"29_CR7","doi-asserted-by":"crossref","unstructured":"He, K.-M., Fan, H.-Q., Wu, Y.-X., Xie, S.-N., Girshick, R.-B.: Momentum contrast for unsupervised visual representation learning. In: The IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9726\u20139735. IEEE (2020)","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"29_CR8","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.-Y., Zhou, T.-H., Efros, A.-A.: Image-to-image translation with conditional adversarial networks. In: The IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1125\u20131134. IEEE (2017)","DOI":"10.1109\/CVPR.2017.632"},{"key":"29_CR9","doi-asserted-by":"crossref","unstructured":"Jiang, L., et al.: Guided point contrastive learning for semi-supervised point cloud semantic segmentation. In: 2021 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 6403\u20136412. IEEE (2021)","DOI":"10.1109\/ICCV48922.2021.00636"},{"key":"29_CR10","unstructured":"Laurens, V.-M., Geoffrey, E.-H.: Visualizing data using t-SNE. J. Mach. Learn. Res. 9, 2579\u20132605 (2021)"},{"key":"29_CR11","doi-asserted-by":"crossref","unstructured":"Li, R.-H., Li, X.-Z., Heng, P.-A., Fu, C.-W.: PointAugment: an auto-augmentation framework for point cloud classification. In: The IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 6377\u20136386. IEEE (2020)","DOI":"10.1109\/CVPR42600.2020.00641"},{"key":"29_CR12","unstructured":"Li, Y., Bu, R., Sun, M., Wu, W., Di, X., Chen, B.: PointCNN: convolution on X-transformed points. In: NeurIPS, vol. 31. Curran Associates (2018)"},{"key":"29_CR13","doi-asserted-by":"crossref","unstructured":"Lin, M.-X., et al.: Single image 3D shape retrieval via cross-modal instance and category contrastive learning. In: 2021 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 11385\u201311395. IEEE (2021)","DOI":"10.1109\/ICCV48922.2021.01121"},{"key":"29_CR14","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"326","DOI":"10.1007\/978-3-030-58592-1_20","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Z Liu","year":"2020","unstructured":"Liu, Z., Hu, H., Cao, Y., Zhang, Z., Tong, X.: A closer look at local aggregation operators in point cloud analysis. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12368, pp. 326\u2013342. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58592-1_20"},{"issue":"4","key":"29_CR15","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2766929","volume":"34","author":"Z-L Lun","year":"2015","unstructured":"Lun, Z.-L., Kalogerakis, E., Sheffer, A.: Elements of style: learning perceptual shape style similarity. ACM Trans. Graph. (TOG) 34(4), 1\u201314 (2015)","journal-title":"ACM Trans. Graph. (TOG)"},{"issue":"21","key":"29_CR16","doi-asserted-by":"publisher","first-page":"7392","DOI":"10.3390\/s21217392","volume":"21","author":"D Nazir","year":"2021","unstructured":"Nazir, D., Afzal, M.-Z., Pagani, A., Liwicki, M., Stricker, D.: Contrastive learning for 3D point clouds classification and shape completion. Sensors 21(21), 7392 (2021)","journal-title":"Sensors"},{"key":"29_CR17","unstructured":"Oord, A., Li, Y.-Z., Vinyals, O.: Representation learning with contrastive predictive coding. CoRR abs\/1807.03748 (2018). arxiv.org\/abs\/1807.03748"},{"key":"29_CR18","doi-asserted-by":"crossref","unstructured":"Qi, C.-R., Su, H., Mo, K., Guibas, L.-J.: PointNet: deep learning on point sets for 3D classification and segmentation. In: The IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 77\u201385. IEEE (2017)","DOI":"10.1109\/CVPR.2017.16"},{"key":"29_CR19","unstructured":"Qi, C.-R., Yi, L., Su, H., Guibas, L.-J.: PointNet++: deep hierarchical feature learning on point sets in a metric space. In: NIPS, vol. 30, pp. 5099\u20135108 (2017)"},{"key":"29_CR20","doi-asserted-by":"crossref","unstructured":"Sanghi, A.: Info3D: representation learning on 3D objects using mutual information maximization and contrastive learning. CoRR abs\/2006.02598 (2020). arxiv.org\/abs\/2006.02598","DOI":"10.1007\/978-3-030-58526-6_37"},{"key":"29_CR21","unstructured":"Snell, J., Swersky, K., Zemel, R.-S.: Prototypical networks for few-shot learning. CoRR abs\/1703.05175 (2017). arxiv.org\/abs\/1703.05175"},{"key":"29_CR22","doi-asserted-by":"crossref","unstructured":"Sun, C., Zheng, Z., Wang, X., Xu, M., Yang, Y.: Self-supervised point cloud representation learning via separating mixed shapes. IEEE Trans. Multimedia, 1\u201311 (2022)","DOI":"10.1109\/TMM.2022.3206664"},{"key":"29_CR23","doi-asserted-by":"crossref","unstructured":"Uy, M.-A., Pham, Q.-H., Hua, B.-S., Nguyen, D.-T., Yeung, S.K.: Revisiting point cloud classification: a new benchmark dataset and classification model on real-world data. In: 2019 IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 1588\u20131597. IEEE (2019)","DOI":"10.1109\/ICCV.2019.00167"},{"issue":"5","key":"29_CR24","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3326362","volume":"38","author":"Y Wang","year":"2019","unstructured":"Wang, Y., Sun, Y.-B., Liu, Z.-W., Sarma, S.-E., Michael, M.-B., Justin, M.-S.: Dynamic graph CNN for learning on point clouds. ACM Trans. Graph. (TOG) 38(5), 1\u201312 (2019)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"29_CR25","doi-asserted-by":"crossref","unstructured":"Wu, W.-X., Qi, Z.-G., Li, F.-X.: PointConv: deep convolutional networks on 3D point clouds. In: The IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9613\u20139622. IEEE (2019)","DOI":"10.1109\/CVPR.2019.00985"},{"issue":"4","key":"29_CR26","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3306346.3322936","volume":"38","author":"Z-J Wu","year":"2019","unstructured":"Wu, Z.-J., Wang, X., Lin, D., Lischinski, D., Cohen-Or, D., Huang, H.: Structure-aware generative network for 3D-shape modeling. ACM Trans. Graph. (TOG) 38(4), 1\u201314 (2019)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"29_CR27","unstructured":"Wu, Z.-R., et al.: 3D ShapeNets: a deep representation for volumetric shapes. In: The IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1912\u20131920. IEEE (2015)"},{"key":"29_CR28","doi-asserted-by":"crossref","unstructured":"Xie, S.-N., Gu, J.-T., Guo, D.-M., Qi, C., Guibas, L.-J., Litany, O.: PointContrast: unsupervised pre-training for 3D point cloud understanding. CoRR abs\/2007.10985 (2020). arxiv.org\/abs\/2007.10985","DOI":"10.1007\/978-3-030-58580-8_34"},{"issue":"6","key":"29_CR29","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3355089.3356494","volume":"38","author":"K Yin","year":"2019","unstructured":"Yin, K., Chen, Z.-Q., Huang, H., Cohen-Or, D., Zhang, H.: LOGAN: unpaired shape transform in latent overcomplete space. ACM Trans. Graph. (TOG) 38(6), 1\u201313 (2019)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"29_CR30","doi-asserted-by":"publisher","first-page":"58","DOI":"10.1016\/j.neucom.2022.07.049","volume":"505","author":"J Zhang","year":"2022","unstructured":"Zhang, J., et al.: PointCutMix: regularization strategy for point cloud classification. Neurocomputing 505, 58\u201367 (2022)","journal-title":"Neurocomputing"},{"key":"29_CR31","doi-asserted-by":"crossref","unstructured":"Zheng, W., Tang, W.-L., Jiang, L., Fu, C.-W.: SE-SSD: self-ensembling single-stage object detector from point cloud. In: The IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 14489\u201314498. IEEE (2021)","DOI":"10.1109\/CVPR46437.2021.01426"}],"container-title":["Lecture Notes in Computer Science","Artificial Neural Networks and Machine Learning \u2013 ICANN 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-44207-0_29","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T16:21:53Z","timestamp":1730132513000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-44207-0_29"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031442063","9783031442070"],"references-count":31,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-44207-0_29","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"22 September 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICANN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Artificial Neural Networks","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Heraklion","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 September 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"32","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icann2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/e-nns.org\/icann2023\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"easyacademia.org","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"947","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"426","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"22","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"45% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.4","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"type of other papers accepted : 9 Abstract","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}