{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,10]],"date-time":"2026-02-10T14:22:24Z","timestamp":1770733344322,"version":"3.49.0"},"publisher-location":"Cham","reference-count":21,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031586750","type":"print"},{"value":"9783031586767","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-58676-7_6","type":"book-chapter","created":{"date-parts":[[2024,4,26]],"date-time":"2024-04-26T03:02:44Z","timestamp":1714100564000},"page":"69-81","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["SynPhoRest - A Procedural Generation Tool of\u00a0Synthetic Photorealistic Forest Datasets"],"prefix":"10.1007","author":[{"given":"Ruben","family":"Bidault","sequence":"first","affiliation":[]},{"given":"Paulo","family":"Peixoto","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,4,27]]},"reference":[{"key":"6_CR1","unstructured":"FAO of the United Nations. A Fresh Perspective - Global Forest Resources Assessment 2020. https:\/\/www.fao.org\/forest-resourcesassessment\/2020\/en\/. Accessed 05 July 2023"},{"key":"6_CR2","doi-asserted-by":"publisher","unstructured":"Giusti, A., et al.: A machine learning approach to visual perception of forest trails for mobile robots. IEEE Robot. Automat. Lett. 1(2), 661\u2013667 (2016). https:\/\/doi.org\/10.1109\/LRA.2015.2509024","DOI":"10.1109\/LRA.2015.2509024"},{"key":"6_CR3","doi-asserted-by":"publisher","unstructured":"Munappy, A., Bosch, J., Olsson, H.H., Arpteg, A., Brinne, B.: Data management challenges for deep learning. In: 2019 45th Euromicro Conference on Software Engineering and Advanced Applications (SEAA), Kallithea, pp. 140\u2013147 (2019). https:\/\/doi.org\/10.1109\/SEAA.2019.00030","DOI":"10.1109\/SEAA.2019.00030"},{"key":"6_CR4","doi-asserted-by":"publisher","unstructured":"Hu, Q., et al.: RandLA-net: efficient semantic segmentation of large-scale point clouds. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Seattle, pp. 11105\u201311114 (2020). https:\/\/doi.org\/10.1109\/CVPR42600.2020.01112","DOI":"10.1109\/CVPR42600.2020.01112"},{"key":"6_CR5","unstructured":"V7Labs. V7 Darwin - Auto-Annotate Complex Objects 10x Faster. https:\/\/www.v7labs.com\/auto-annotation. Accessed 04 July 2023"},{"key":"6_CR6","unstructured":"He, R., et al.: Is synthetic data from generative models ready for image recognition? arXiv preprint arXiv:2210.07574 [cs.CV] (2023)"},{"key":"6_CR7","doi-asserted-by":"publisher","unstructured":"Richter, S.R., Vineet, V., Roth, S., Koltun, V.: Playing for data: ground truth from computer games. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) Computer Vision. ECCV 2016. LNCS, vol. 9906, pp. 102\u2013118. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46475-6_7","DOI":"10.1007\/978-3-319-46475-6_7"},{"key":"6_CR8","doi-asserted-by":"publisher","unstructured":"Brostow, G.J., Fauqueur, J., Cipolla, R.: Semantic object classes in video: a high-definition ground truth database. Pattern Recogn. Lett. 30(2), 88\u201397 (2009). https:\/\/doi.org\/10.1016\/j.patrec.2008.04.005","DOI":"10.1016\/j.patrec.2008.04.005"},{"key":"6_CR9","doi-asserted-by":"crossref","unstructured":"Yue, X., Wu, B., Seshia, S.A., Keutzer, K., Sangiovanni-Vincentelli, A.L.: A LiDAR Point Cloud Generator: From a Virtual World to Autonomous Driving. arXiv preprint arXiv: 1804.00103 [cs.CV] (2018)","DOI":"10.1145\/3206025.3206080"},{"key":"6_CR10","doi-asserted-by":"publisher","unstructured":"Wu, B., Wan, A., Yue, X., Keutzer, K.: SqueezeSeg: convolutional neural nets with recurrent CRF for real-time road-object segmentation from 3D LiDAR point cloud. In: 2018 IEEE International Conference on Robotics and Automation (ICRA), Brisbane, pp. 1887\u20131893 (2018). https:\/\/doi.org\/10.1109\/ICRA.2018.8462926","DOI":"10.1109\/ICRA.2018.8462926"},{"key":"6_CR11","doi-asserted-by":"publisher","unstructured":"Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? The KITTI vision benchmark suite. In: 2012 IEEE Conference on Computer Vision and Pattern Recognition, Providence, pp. 3354\u20133361 (2012). https:\/\/doi.org\/10.1109\/CVPR.2012.6248074","DOI":"10.1109\/CVPR.2012.6248074"},{"key":"6_CR12","doi-asserted-by":"publisher","unstructured":"Hurl, B., Czarnecki, K., Waslander, S.: Precise synthetic image and LiDAR (PreSIL) dataset for autonomous vehicle perception. IEEE Intell. Veh. Symp. (IV) Paris, France 2019, 2522\u20132529 (2019). https:\/\/doi.org\/10.1109\/IVS.2019.8813809","DOI":"10.1109\/IVS.2019.8813809"},{"key":"6_CR13","unstructured":"Griffiths, D., Boehm, J.: Synthcity: a large scale synthetic point cloud. arXiv:1907.04758 [cs.CV] (2019)"},{"key":"6_CR14","unstructured":"Wrenninge, M., Unger, J.: Synscapes: a photorealistic synthetic dataset for street scene parsing. arXiv:1810.08705 [cs.CV] (2018)"},{"key":"6_CR15","unstructured":"Nunes, R., Ferreira, J., Peixoto, P.: Procedural generation of synthetic forest environments to train machine learning algorithms. https:\/\/openreview.net\/forum?id=rpzgjNCe4G9"},{"key":"6_CR16","doi-asserted-by":"crossref","unstructured":"Cordts, M., et al.: The cityscapes dataset for semantic urban scene understanding. In: Proceeding of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). arXiv:1604.01685 [cs.CV] (2016)","DOI":"10.1109\/CVPR.2016.350"},{"key":"6_CR17","doi-asserted-by":"publisher","unstructured":"Perlin, K.: An image synthesizer. SIGGRAPH Comput. Graph. 19(3), 287\u2013296 (1985). https:\/\/doi.org\/10.1145\/325165.325247","DOI":"10.1145\/325165.325247"},{"key":"6_CR18","unstructured":"Perlin, K.: Noise hardware (2001). https:\/\/redirect.cs.umbc.edu\/~olano\/s2001c24\/ch09.pdf. Accessed 30 June 2023"},{"key":"6_CR19","unstructured":"devdad. Simplexnoise plugin (2020). https:\/\/github.com\/devdad\/SimplexNoise. Accessed 14 Jan 2023. Commit: b57598706afd8fc4d50164da1ed58515595699b2"},{"key":"6_CR20","unstructured":"Unreal Engine 5.1 Documentation - Procedural Mesh Component (2022). https:\/\/docs.unrealengine.com\/5.1\/enUS\/API\/Plugins\/ProceduralMeshComponent\/UProceduralMeshComponent\/. Accessed 23 June 2023"},{"key":"6_CR21","unstructured":"Adobe Inc. Adobe Photoshop. https:\/\/www.adobe.com\/products\/photoshop.html. Accessed 04 July 2023"}],"container-title":["Lecture Notes in Networks and Systems","Robot 2023: Sixth Iberian Robotics Conference"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-58676-7_6","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,9]],"date-time":"2026-02-09T18:03:13Z","timestamp":1770660193000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-58676-7_6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031586750","9783031586767"],"references-count":21,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-58676-7_6","relation":{},"ISSN":["2367-3370","2367-3389"],"issn-type":[{"value":"2367-3370","type":"print"},{"value":"2367-3389","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"27 April 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ROBOT","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Iberian Robotics conference","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Coimbra","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Portugal","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"22 November 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24 November 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"robot2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.iberianroboticsconf.eu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}