{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,26]],"date-time":"2026-01-26T00:20:01Z","timestamp":1769386801565,"version":"3.49.0"},"publisher-location":"Cham","reference-count":29,"publisher":"Springer International Publishing","isbn-type":[{"value":"9783031064296","type":"print"},{"value":"9783031064302","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-06430-2_6","type":"book-chapter","created":{"date-parts":[[2022,5,16]],"date-time":"2022-05-16T08:03:16Z","timestamp":1652688196000},"page":"65-76","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":11,"title":["StandardSim: A Synthetic Dataset for\u00a0Retail Environments"],"prefix":"10.1007","author":[{"given":"Cristina","family":"Mata","sequence":"first","affiliation":[]},{"given":"Nick","family":"Locascio","sequence":"additional","affiliation":[]},{"given":"Mohammed Azeem","family":"Sheikh","sequence":"additional","affiliation":[]},{"given":"Kenny","family":"Kihara","sequence":"additional","affiliation":[]},{"given":"Dan","family":"Fischetti","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,5,17]]},"reference":[{"key":"6_CR1","unstructured":"3D Models for Professionals: TurboSquid. https:\/\/www.turbosquid.com"},{"key":"6_CR2","unstructured":"Pro2 3D Camera. https:\/\/matterport.com\/cameras\/pro2-3D-camera"},{"key":"6_CR3","first-page":"122","volume":"120","author":"G Bradski","year":"2000","unstructured":"Bradski, G.: The OpenCV library. Dr. Dobb\u2019s J. Softw. Tools 120, 122\u2013125 (2000)","journal-title":"Dr. Dobb\u2019s J. Softw. Tools"},{"key":"6_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"611","DOI":"10.1007\/978-3-642-33783-3_44","volume-title":"Computer Vision \u2013 ECCV 2012","author":"DJ Butler","year":"2012","unstructured":"Butler, D.J., Wulff, J., Stanley, G.B., Black, M.J.: A naturalistic open source movie for optical flow evaluation. In: Fitzgibbon, A., Lazebnik, S., Perona, P., Sato, Y., Schmid, C. (eds.) ECCV 2012. LNCS, vol. 7577, pp. 611\u2013625. Springer, Heidelberg (2012). https:\/\/doi.org\/10.1007\/978-3-642-33783-3_44"},{"key":"6_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"833","DOI":"10.1007\/978-3-030-01234-2_49","volume-title":"Computer Vision \u2013 ECCV 2018","author":"L-C Chen","year":"2018","unstructured":"Chen, L.-C., Zhu, Y., Papandreou, G., Schroff, F., Adam, H.: Encoder-decoder with atrous separable convolution for semantic image segmentation. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11211, pp. 833\u2013851. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01234-2_49"},{"key":"6_CR6","doi-asserted-by":"crossref","unstructured":"Chen, X., Ma, H., Wan, J., Li, B., Xia, T.: Multi-view 3d object detection network for autonomous driving. In: IEEE CVPR, vol. 1, p. 3 (2017)","DOI":"10.1109\/CVPR.2017.691"},{"key":"6_CR7","unstructured":"Community, B.O.: Blender - a 3D modelling and rendering package. Blender Foundation, Stichting Blender Foundation, Amsterdam (2018). http:\/\/www.blender.org"},{"key":"6_CR8","doi-asserted-by":"crossref","unstructured":"Goel, K., Srinivasan, P., Tariq, S., Philbin, J.: QuadroNet: multi-task learning for real-time semantic depth aware instance segmentation. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), January 2021, pp. 315\u2013324 (2021)","DOI":"10.1109\/WACV48630.2021.00036"},{"key":"6_CR9","doi-asserted-by":"crossref","unstructured":"Goldman, E., Herzig, R., Eisenschtat, A., Goldberger, J., Hassner, T.: Precise detection in densely packed scenes. In: Proceedings of the Conference on Computer Vision and Pattern Recognition (CVPR) (2019)","DOI":"10.1109\/CVPR.2019.00537"},{"key":"6_CR10","doi-asserted-by":"crossref","unstructured":"Hartley, R.I., Zisserman, A.: Multiple View Geometry in Computer Vision, 2nd edn. Cambridge University Press (2004). ISBN 0521540518","DOI":"10.1017\/CBO9780511811685"},{"key":"6_CR11","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. arXiv preprint arXiv:1512.03385 (2015)","DOI":"10.1109\/CVPR.2016.90"},{"key":"6_CR12","doi-asserted-by":"crossref","unstructured":"Jin, L., et al.: Geometric structure based and regularized depth estimation from 360 indoor imagery. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020 (2020)","DOI":"10.1109\/CVPR42600.2020.00097"},{"key":"6_CR13","unstructured":"Lin, T.Y., et al.: Microsoft COCO: common objects in context (2014). http:\/\/arxiv.org\/abs\/1405.0312"},{"key":"6_CR14","doi-asserted-by":"crossref","unstructured":"Liu, X., Lathrop Jr., R.G.: Urban change detection based on an artificial neural network. Int. J. Remote Sens. 23(12), 2513\u20132518 (2002)","DOI":"10.1080\/01431160110097240"},{"key":"6_CR15","doi-asserted-by":"crossref","unstructured":"McCormac, J., Handa, A., Leutenegger, S., J. Davison, A.: SceneNet RGB-D: can 5M synthetic images beat generic imagenet pre-training on indoor segmentation? (2017)","DOI":"10.1109\/ICCV.2017.292"},{"key":"6_CR16","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"746","DOI":"10.1007\/978-3-642-33715-4_54","volume-title":"Computer Vision \u2013 ECCV 2012","author":"N Silberman","year":"2012","unstructured":"Silberman, N., Hoiem, D., Kohli, P., Fergus, R.: Indoor segmentation and support inference from RGBD images. In: Fitzgibbon, A., Lazebnik, S., Perona, P., Sato, Y., Schmid, C. (eds.) ECCV 2012. LNCS, vol. 7576, pp. 746\u2013760. Springer, Heidelberg (2012). https:\/\/doi.org\/10.1007\/978-3-642-33715-4_54"},{"key":"6_CR17","doi-asserted-by":"crossref","unstructured":"Park, J.M., Jang, J., Yoo, S.M., Lee, S.K., Kim, U., Kim, J.H.: ChangeSim: towards end-to-end online scene change detection in industrial indoor environments. In: 2021 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE (2021). https:\/\/arxiv.org\/abs\/2103.05368","DOI":"10.1109\/IROS51168.2021.9636350"},{"key":"6_CR18","unstructured":"Paszke, A., et al.: PyTorch: an imperative style, high-performance deep learning library. In: Wallach, H., Larochelle, H., Beygelzimer, A., d\u2019Alche Buc, F., Fox, E., Garnett, R. (eds.) Advances in Neural Information Processing Systems 32, pp. 8024\u20138035. Curran Associates, Inc. (2019), http:\/\/papers.neurips.cc\/paper\/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf"},{"key":"6_CR19","doi-asserted-by":"crossref","unstructured":"Ranftl, R., Bochkovskiy, A., Koltun, V.: Vision transformers for dense prediction. arXiv preprint (2021)","DOI":"10.1109\/ICCV48922.2021.01196"},{"key":"6_CR20","doi-asserted-by":"publisher","first-page":"1623","DOI":"10.1109\/TPAMI.2020.3019967","volume":"44","author":"R Ranftl","year":"2020","unstructured":"Ranftl, R., Lasinger, K., Hafner, D., Schindler, K., Koltun, V.: Towards robust monocular depth estimation: mixing datasets for zero-shot cross-dataset transfer. IEEE Trans. Pattern Anal. Mach. Intell. (TPAMI) 44, 1623\u20131637 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell. (TPAMI)"},{"key":"6_CR21","doi-asserted-by":"crossref","unstructured":"Roberts, M., et al.: Hypersim: a photorealistic synthetic dataset for holistic indoor scene understanding (2021)","DOI":"10.1109\/ICCV48922.2021.01073"},{"key":"6_CR22","doi-asserted-by":"crossref","unstructured":"Saeedan, F., Roth, S.: Boosting monocular depth with panoptic segmentation maps. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), January 2021, pp. 3853\u20133862 (2021)","DOI":"10.1109\/WACV48630.2021.00390"},{"key":"6_CR23","doi-asserted-by":"publisher","unstructured":"Shi, W., Zhang, M., Zhang, R., Chen, S., Zhan, Z.: Change detection based on artificial intelligence: state-of-the-art and challenges. Remote Sens. 12(10) (2020). https:\/\/doi.org\/10.3390\/rs12101688. https:\/\/www.mdpi.com\/2072-4292\/12\/10\/1688","DOI":"10.3390\/rs12101688"},{"key":"6_CR24","doi-asserted-by":"crossref","unstructured":"Sturm, J., Engelhard, N., Endres, F., Burgard, W., Cremers, D.: A benchmark for the evaluation of RGB-D SLAM systems. In: Proceedings of the International Conference on Intelligent Robot Systems (IROS), October 2012 (2012)","DOI":"10.1109\/IROS.2012.6385773"},{"key":"6_CR25","doi-asserted-by":"crossref","unstructured":"Uhrig, J., Schneider, N., Schneider, L., Franke, U., Brox, T., Geiger, A.: Sparsity invariant CNNs. In: International Conference on 3D Vision (3DV) (2017)","DOI":"10.1109\/3DV.2017.00012"},{"key":"6_CR26","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"129","DOI":"10.1007\/978-3-030-11012-3_10","volume-title":"Computer Vision \u2013 ECCV 2018 Workshops","author":"A Varghese","year":"2019","unstructured":"Varghese, A., Gubbi, J., Ramaswamy, A., Balamuralidhar, P.: ChangeNet: a deep learning architecture for visual change detection. In: Leal-Taix\u00e9, L., Roth, S. (eds.) ECCV 2018. LNCS, vol. 11130, pp. 129\u2013145. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-11012-3_10"},{"key":"6_CR27","doi-asserted-by":"crossref","unstructured":"Wang, K., Shen, S.: MVDepthNet: real-time multiview depth estimation neural network. In: International Conference on 3D Vision (3DV), September 2018 (2018)","DOI":"10.1109\/3DV.2018.00037"},{"key":"6_CR28","doi-asserted-by":"crossref","unstructured":"Wang, L., Zhang, J., Wang, O., Lin, Z., Lu, H.: SDC-depth: semantic divide-and-conquer network for monocular depth estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020 (2020)","DOI":"10.1109\/CVPR42600.2020.00062"},{"key":"6_CR29","doi-asserted-by":"crossref","unstructured":"Zhao, Y., Kong, S., Shin, D., Fowlkes, C.: Domain decluttering: simplifying images to mitigate synthetic-real domain shift and improve depth estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020 (2020)","DOI":"10.1109\/CVPR42600.2020.00339"}],"container-title":["Lecture Notes in Computer Science","Image Analysis and Processing \u2013 ICIAP 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-06430-2_6","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,13]],"date-time":"2024-03-13T13:44:42Z","timestamp":1710337482000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-06430-2_6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031064296","9783031064302"],"references-count":29,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-06430-2_6","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"17 May 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIAP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Image Analysis and Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Lecce","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 May 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 May 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iciap2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.iciap2021.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Microsoft","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"307","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"168","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"55% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}