{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T16:26:51Z","timestamp":1771950411292,"version":"3.50.1"},"publisher-location":"Cham","reference-count":25,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783030057091","type":"print"},{"value":"9783030057107","type":"electronic"}],"license":[{"start":{"date-parts":[[2018,12,8]],"date-time":"2018-12-08T00:00:00Z","timestamp":1544227200000},"content-version":"tdm","delay-in-days":0,"URL":"http:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019]]},"DOI":"10.1007\/978-3-030-05710-7_36","type":"book-chapter","created":{"date-parts":[[2018,12,7]],"date-time":"2018-12-07T12:48:55Z","timestamp":1544186935000},"page":"436-446","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":15,"title":["Point Cloud Colorization Based on Densely Annotated 3D Shape Dataset"],"prefix":"10.1007","author":[{"given":"Xu","family":"Cao","sequence":"first","affiliation":[]},{"given":"Katashi","family":"Nagao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2018,12,8]]},"reference":[{"key":"36_CR1","unstructured":"CloudCompare. http:\/\/www.cloudcompare.org"},{"key":"36_CR2","unstructured":"Chang, A.X., et al.: ShapeNet: an information-rich 3D model repository. Technical report arXiv: 1512.03012 [cs.GR] (2015)"},{"issue":"3","key":"36_CR3","doi-asserted-by":"publisher","first-page":"73","DOI":"10.1145\/1531326.1531379","volume":"28","author":"X Chen","year":"2009","unstructured":"Chen, X., Golovinskiy, A., Funkhouser, T.: A benchmark for 3D mesh segmentation. ACM Trans. Graph. 28(3), 73 (2009). (Proc. SIGGRAPH)","journal-title":"ACM Trans. Graph."},{"key":"36_CR4","doi-asserted-by":"crossref","unstructured":"Dai, A., Ritchie, D., Bokeloh, M., Reed, S., Sturm, J. and Nie\u00dfner, M.: ScanComplete: large-scale scene completion and semantic segmentation for 3D scans. In: Proceedings of Computer Vision and Pattern Recognition (CVPR). IEEE (2018)","DOI":"10.1109\/CVPR.2018.00481"},{"key":"36_CR5","doi-asserted-by":"crossref","unstructured":"Fan, H., Su, H., Guibas, L.J.: A point set generation network for 3D object reconstruction from a single image. In: Proceedings of 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, pp. 2463\u20132471 (2017)","DOI":"10.1109\/CVPR.2017.264"},{"key":"36_CR6","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: Ghahramani, Z., Welling, M., Cortes, C., Lawrence, N.D., Weinberger, K.Q. (eds.) Advances in Neural Information Processing Systems, vol. 27, pp. 2672\u20132680 (2014)"},{"key":"36_CR7","unstructured":"Gulrajani, I., Ahmed, F., Arjovsky, M., Dumoulin, V., Courville, A.C.: Improved training of Wasserstein gans. In: Guyon, I., et al. (eds.) Advances in Neural Information Processing Systems, vol. 30, pp. 5767\u20135777. Curran Associates, Inc. (2017)"},{"key":"36_CR8","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.632"},{"key":"36_CR9","doi-asserted-by":"crossref","unstructured":"Klokov, R., Lempitsky, V.: Escape from cells: deep kd-networks for the recognition of 3D point cloud models. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2017)","DOI":"10.1109\/ICCV.2017.99"},{"key":"36_CR10","doi-asserted-by":"crossref","unstructured":"Lin, C.H., Kong, C., Lucey, S.: Learning efficient point cloud generation for dense 3D object reconstruction. In: Proceedings of AAAI Conference on Artificial Intelligence (AAAI) (2018)","DOI":"10.1609\/aaai.v32i1.12278"},{"key":"36_CR11","doi-asserted-by":"crossref","unstructured":"Lun, Z., Gadelha, M., Kalogerakis, E., Maji, S., Wang, R.: 3D shape reconstruction from sketches via multi-view convolutional networks. In: Proceedings of 2017 International Conference on 3D Vision (3DV) (2017)","DOI":"10.1109\/3DV.2017.00018"},{"key":"36_CR12","unstructured":"Nagao, K., Miyakawa, Y.: Building scale VR: sutomatically creating indoor 3D maps and its application to simulation of disaster situations. In: Proceedings of Future Technologies Conference (FTC) (2017)"},{"key":"36_CR13","unstructured":"Panos, A., Olga, D., Ioannis, M., Leonidas, G.: Learning representations and generative models for 3D point clouds In: Proceedings of International Conference on Learning Representations (ICLR) (2018)"},{"key":"36_CR14","unstructured":"Qi, C.R., Su, H., Mo, K., Guibas, L.J.: PointNet: deep learning on point sets for 3D classification and segmentation. In: Proceedings of Computer Vision and Pattern Recognition (CVPR). IEEE (2017)"},{"key":"36_CR15","unstructured":"Qi, C.R., Yi, L., Su, H., Guibas, L.J.: PointNet++: deep hierarchical feature learning on point sets in a metric space. arXiv preprint arXiv: 1706.02413 (2017)"},{"key":"36_CR16","doi-asserted-by":"crossref","unstructured":"Shao, L., Chang, A.X., Su, H., Savva, M., Guibas, L.J.: Cross-modal attribute transfer for rescaling 3D models. In: Proceedings of 2017 International Conference on 3D Vision (3DV), pp. 640\u2013648 (2017)","DOI":"10.1109\/3DV.2017.00078"},{"key":"36_CR17","unstructured":"Shilane, P., Min, P., Kazhdan, M., Funkhouser, T.: The Princeton shape benchmark. In: Shape Modeling International (2004)"},{"key":"36_CR18","unstructured":"Smith, E.J., Meger, D.: Improved adversarial systems for 3D object generation and reconstruction. In: Levine, S., Vanhoucke, V., Goldberg, K. (eds.) Proceedings of the 1st Annual Conference on Robot Learning. Proceedings of Machine Learning Research, vol. 78, pp. 87\u201396. PMLR (2017)"},{"key":"36_CR19","doi-asserted-by":"crossref","unstructured":"Song, S., Yu, F., Zeng, A., Chang, A.X., Savva, M., Funkhouser, T.: Semantic scene completion from a single depth image. In: Proceedings of the 30th IEEE Conference on Computer Vision and Pattern Recognition (2017)","DOI":"10.1109\/CVPR.2017.28"},{"key":"36_CR20","doi-asserted-by":"crossref","unstructured":"Su, H., et al.: SPLATNet: sparse lattice networks for point cloud processing. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2530\u20132539 (2018)","DOI":"10.1109\/CVPR.2018.00268"},{"key":"36_CR21","unstructured":"Wu, J., Zhang, C., Xue, T., Freeman, W.T., Tenenbaum, J.B.: Learning a probabilistic latent space of object shapes via 3D generative-adversarial modeling. In: Advances in Neural Information Processing Systems, pp. 82\u201390 (2016)"},{"key":"36_CR22","unstructured":"Wu, Z., et al.: 3D ShapeNets: a deep representation for volumetric shapes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2015)"},{"key":"36_CR23","doi-asserted-by":"crossref","unstructured":"Xiang, Y., et al.: ObjectNet3D: a large scale database for 3D object recognition. In: Proceedings of European Conference Computer Vision (ECCV) (2016)","DOI":"10.1007\/978-3-319-46484-8_10"},{"key":"36_CR24","doi-asserted-by":"crossref","unstructured":"Yang, B., Wen, H., Wang, S., Clark, R., Markham, A., Trigoni, N.: 3D object reconstruction from a single depth view with adversarial learning. In: Proceedings of International Conference on Computer Vision Workshops (ICCVW) (2017)","DOI":"10.1109\/ICCVW.2017.86"},{"key":"36_CR25","doi-asserted-by":"crossref","unstructured":"Yi, L., et al.: A scalable active framework for region annotation in 3D shape collections. In: Proceedings of SIGGRAPH Asia (2016)","DOI":"10.1145\/2980179.2980238"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"http:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-05710-7_36","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,7]],"date-time":"2022-09-07T21:18:21Z","timestamp":1662585501000},"score":1,"resource":{"primary":{"URL":"http:\/\/link.springer.com\/10.1007\/978-3-030-05710-7_36"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2018,12,8]]},"ISBN":["9783030057091","9783030057107"],"references-count":25,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-05710-7_36","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2018,12,8]]},"assertion":[{"value":"8 December 2018","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Thessaloniki","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2019","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"8 January 2019","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 January 2019","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"25","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2019","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/mmm2019.iti.gr\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double blind for full papers and workshop papers, single blind for other paper types","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"204","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"96","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"47% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"2.67","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}},{"value":"6 demonstration papers, 5 industry papers, 6 workshop papers, and 6 Video Browser Showdown papers were also accepted.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information"}}]}}