{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,23]],"date-time":"2025-12-23T05:27:06Z","timestamp":1766467626973,"version":"3.40.3"},"publisher-location":"Singapore","reference-count":50,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819980697"},{"type":"electronic","value":"9789819980703"}],"license":[{"start":{"date-parts":[[2023,11,15]],"date-time":"2023-11-15T00:00:00Z","timestamp":1700006400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,11,15]],"date-time":"2023-11-15T00:00:00Z","timestamp":1700006400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-981-99-8070-3_27","type":"book-chapter","created":{"date-parts":[[2023,11,14]],"date-time":"2023-11-14T08:02:54Z","timestamp":1699948974000},"page":"352-366","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["DOS Dataset: A Novel Indoor Deformable Object Segmentation Dataset for\u00a0Sweeping Robots"],"prefix":"10.1007","author":[{"given":"Zehan","family":"Tan","sequence":"first","affiliation":[]},{"given":"Weidong","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Zhiwei","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,11,15]]},"reference":[{"key":"27_CR1","doi-asserted-by":"publisher","unstructured":"Adhikari, B., Peltomaki, J., Puura, J., Huttunen, H.: Faster bounding box annotation for object detection in indoor scenes. In: 2018 7th European Workshop on Visual Information Processing (EUVIP), pp. 1\u20136. IEEE (2018). https:\/\/doi.org\/10.1109\/EUVIP.2018.8611732","DOI":"10.1109\/EUVIP.2018.8611732"},{"issue":"12","key":"27_CR2","doi-asserted-by":"publisher","first-page":"2481","DOI":"10.1109\/TPAMI.2016.2644615","volume":"39","author":"V Badrinarayanan","year":"2017","unstructured":"Badrinarayanan, V., Kendall, A., Cipolla, R.: SegNet: a deep convolutional encoder-decoder architecture for image segmentation. IEEE Trans. Pattern Anal. Mach. Intell. 39(12), 2481\u20132495 (2017). https:\/\/doi.org\/10.1109\/TPAMI.2016.2644615","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"27_CR3","doi-asserted-by":"publisher","unstructured":"Bashkirova, D., et al.: ZeroWaste dataset: towards deformable object segmentation in cluttered scenes. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 21147\u201321157 (2022). https:\/\/doi.org\/10.48550\/arXiv.2106.02740","DOI":"10.48550\/arXiv.2106.02740"},{"key":"27_CR4","doi-asserted-by":"publisher","unstructured":"Caesar, H., Uijlings, J., Ferrari, V.: COCO-stuff: thing and stuff classes in context. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1209\u20131218 (2018). https:\/\/doi.org\/10.48550\/arXiv.1612.03716","DOI":"10.48550\/arXiv.1612.03716"},{"key":"27_CR5","doi-asserted-by":"publisher","unstructured":"Chen, L.C., Papandreou, G., Schroff, F., Adam, H.: Rethinking atrous convolution for semantic image segmentation. arXiv preprint arXiv:1706.05587 (2017). https:\/\/doi.org\/10.48550\/arXiv.1706.05587","DOI":"10.48550\/arXiv.1706.05587"},{"key":"27_CR6","doi-asserted-by":"publisher","unstructured":"Chen, L.C., Zhu, Y., Papandreou, G., Schroff, F., Adam, H.: Encoder-decoder with atrous separable convolution for semantic image segmentation. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 801\u2013818 (2018). https:\/\/doi.org\/10.1007\/978-3-030-01234-2_49","DOI":"10.1007\/978-3-030-01234-2_49"},{"key":"27_CR7","unstructured":"Contributors, M.: MMSegmentation: Open MMLab semantic segmentation toolbox and benchmark (2020). https:\/\/github.com\/open-mmlab\/mmsegmentation"},{"key":"27_CR8","doi-asserted-by":"publisher","unstructured":"Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nie\u00dfner, M.: ScanNet: richly-annotated 3D reconstructions of indoor scenes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5828\u20135839 (2017). https:\/\/doi.org\/10.48550\/arXiv.1702.04405","DOI":"10.48550\/arXiv.1702.04405"},{"key":"27_CR9","doi-asserted-by":"publisher","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020). https:\/\/doi.org\/10.48550\/arXiv.2010.11929","DOI":"10.48550\/arXiv.2010.11929"},{"key":"27_CR10","doi-asserted-by":"publisher","first-page":"303","DOI":"10.1007\/s11263-009-0275-4","volume":"88","author":"M Everingham","year":"2009","unstructured":"Everingham, M., Van Gool, L., Williams, C.K., Winn, J., Zisserman, A.: The pascal visual object classes (VOC) challenge. Int. J. Comput. Vis. 88, 303\u2013308 (2009). https:\/\/doi.org\/10.1007\/s11263-009-0275-4","journal-title":"Int. J. Comput. Vis."},{"key":"27_CR11","doi-asserted-by":"publisher","unstructured":"Geng, Z., Guo, M.H., Chen, H., Li, X., Wei, K., Lin, Z.: Is attention better than matrix decomposition? arXiv preprint arXiv:2109.04553 (2021). https:\/\/doi.org\/10.48550\/arXiv.2109.04553","DOI":"10.48550\/arXiv.2109.04553"},{"key":"27_CR12","doi-asserted-by":"publisher","unstructured":"Guo, M.H., Lu, C.Z., Hou, Q., Liu, Z., Cheng, M.M., Hu, S.M.: SegNeXt: rethinking convolutional attention design for semantic segmentation. arXiv preprint arXiv:2209.08575 (2022). https:\/\/doi.org\/10.48550\/arXiv.2209.08575","DOI":"10.48550\/arXiv.2209.08575"},{"key":"27_CR13","doi-asserted-by":"publisher","unstructured":"Huang, L., Yuan, Y., Guo, J., Zhang, C., Chen, X., Wang, J.: Interlaced sparse self-attention for semantic segmentation. arXiv preprint arXiv:1907.12273 (2019). https:\/\/doi.org\/10.48550\/arXiv.1907.12273","DOI":"10.48550\/arXiv.1907.12273"},{"key":"27_CR14","doi-asserted-by":"publisher","unstructured":"Huang, Q.: Weight-quantized SqueezeNet for resource-constrained robot vacuums for indoor obstacle classification. AI 3(1), 180\u2013193 (2022). https:\/\/doi.org\/10.3390\/ai3010011","DOI":"10.3390\/ai3010011"},{"key":"27_CR15","doi-asserted-by":"publisher","unstructured":"Huang, X., Sanket, K., Ayyad, A., Naeini, F.B., Makris, D., Zweir, Y.: A neuromorphic dataset for object segmentation in indoor cluttered environment. arXiv preprint arXiv:2302.06301 (2023). https:\/\/doi.org\/10.48550\/arXiv.2302.06301","DOI":"10.48550\/arXiv.2302.06301"},{"key":"27_CR16","doi-asserted-by":"publisher","unstructured":"Huang, Z., Wang, X., Huang, L., Huang, C., Wei, Y., Liu, W.: CCNet: criss-cross attention for semantic segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 603\u2013612 (2019). https:\/\/doi.org\/10.1109\/ICCV.2019.00069","DOI":"10.1109\/ICCV.2019.00069"},{"key":"27_CR17","doi-asserted-by":"publisher","unstructured":"Keipour, A., Mousaei, M., Bandari, M., Schaal, S., Scherer, S.: Detection and physical interaction with deformable linear objects. arXiv preprint arXiv:2205.08041 (2022). https:\/\/doi.org\/10.48550\/arXiv.2205.08041","DOI":"10.48550\/arXiv.2205.08041"},{"key":"27_CR18","doi-asserted-by":"publisher","unstructured":"Kim, W., Seok, J.: Indoor semantic segmentation for robot navigating on mobile. In: 2018 Tenth International Conference on Ubiquitous and Future Networks (ICUFN), pp. 22\u201325. IEEE (2018). https:\/\/doi.org\/10.1109\/ICUFN.2018.8436956","DOI":"10.1109\/ICUFN.2018.8436956"},{"key":"27_CR19","doi-asserted-by":"publisher","unstructured":"Kirillov, A., Wu, Y., He, K., Girshick, R.: PointRend: image segmentation as rendering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9799\u20139808 (2020). https:\/\/doi.org\/10.48550\/arXiv.1912.08193","DOI":"10.48550\/arXiv.1912.08193"},{"issue":"2","key":"27_CR20","doi-asserted-by":"publisher","first-page":"50","DOI":"10.1109\/MRA.2021.3066040","volume":"28","author":"M Koskinopoulou","year":"2021","unstructured":"Koskinopoulou, M., Raptopoulos, F., Papadopoulos, G., Mavrakis, N., Maniadakis, M.: Robotic waste sorting technology: toward a vision-based categorization system for the industrial robotic separation of recyclable waste. IEEE Robot. Autom. Mag. 28(2), 50\u201360 (2021). https:\/\/doi.org\/10.1109\/MRA.2021.3066040","journal-title":"IEEE Robot. Autom. Mag."},{"key":"27_CR21","doi-asserted-by":"publisher","unstructured":"Li, W., et al.: InteriorNet: mega-scale multi-sensor photo-realistic indoor scenes dataset. arXiv preprint arXiv:1809.00716 (2018). https:\/\/doi.org\/10.48550\/arXiv.1809.00716","DOI":"10.48550\/arXiv.1809.00716"},{"key":"27_CR22","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"27_CR23","doi-asserted-by":"publisher","unstructured":"Liu, Z., et al.: Swin Transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021). https:\/\/doi.org\/10.48550\/arXiv.2103.14030","DOI":"10.48550\/arXiv.2103.14030"},{"key":"27_CR24","doi-asserted-by":"publisher","unstructured":"Long, J., Shelhamer, E., Darrell, T.: Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3431\u20133440 (2015). https:\/\/doi.org\/10.1109\/CVPR.2015.7298965","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"27_CR25","doi-asserted-by":"publisher","first-page":"17820","DOI":"10.1109\/ACCESS.2021.3053546","volume":"9","author":"Y Lv","year":"2021","unstructured":"Lv, Y., Fang, Y., Chi, W., Chen, G., Sun, L.: Object detection for sweeping robots in home scenes (ODSR-IHS): a novel benchmark dataset. IEEE Access 9, 17820\u201317828 (2021). https:\/\/doi.org\/10.1109\/ACCESS.2021.3053546","journal-title":"IEEE Access"},{"key":"27_CR26","doi-asserted-by":"publisher","first-page":"274","DOI":"10.1016\/j.wasman.2021.12.001","volume":"138","author":"S Majchrowska","year":"2022","unstructured":"Majchrowska, S., et al.: Deep learning-based waste detection in natural and urban environments. Waste Manage. 138, 274\u2013284 (2022). https:\/\/doi.org\/10.1016\/j.wasman.2021.12.001","journal-title":"Waste Manage."},{"key":"27_CR27","doi-asserted-by":"publisher","unstructured":"Minaee, S., Boykov, Y.Y., Porikli, F., Plaza, A.J., Kehtarnavaz, N., Terzopoulos, D.: Image segmentation using deep learning: a survey. IEEE Trans. Pattern Anal. Mach. Intell. 44, 3523\u20133542 (2021). https:\/\/doi.org\/10.48550\/arXiv.1809.00716","DOI":"10.48550\/arXiv.1809.00716"},{"key":"27_CR28","doi-asserted-by":"publisher","first-page":"626","DOI":"10.1016\/j.neucom.2022.01.005","volume":"493","author":"Y Mo","year":"2022","unstructured":"Mo, Y., Wu, Y., Yang, X., Liu, F., Liao, Y.: Review the state-of-the-art technologies of semantic segmentation based on deep learning. Neurocomputing 493, 626\u2013646 (2022). https:\/\/doi.org\/10.1016\/j.neucom.2022.01.005","journal-title":"Neurocomputing"},{"key":"27_CR29","doi-asserted-by":"publisher","unstructured":"Pohlen, T., Hermans, A., Mathias, M., Leibe, B.: Full-resolution residual networks for semantic segmentation in street scenes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4151\u20134160 (2017). https:\/\/doi.org\/10.48550\/arXiv.1611.08323","DOI":"10.48550\/arXiv.1611.08323"},{"key":"27_CR30","doi-asserted-by":"publisher","unstructured":"Proen\u00e7a, P.F., Simoes, P.: TACO: trash annotations in context for litter detection. arXiv preprint arXiv:2003.06975 (2020). https:\/\/doi.org\/10.48550\/arXiv.2003.06975","DOI":"10.48550\/arXiv.2003.06975"},{"key":"27_CR31","doi-asserted-by":"publisher","unstructured":"Rafique, A.A., Jalal, A., Kim, K.: Statistical multi-objects segmentation for indoor\/outdoor scene detection and classification via depth images. In: 2020 17th International Bhurban Conference on Applied Sciences and Technology (IBCAST), pp. 271\u2013276. IEEE (2020). https:\/\/doi.org\/10.1109\/IBCAST47879.2020.9044576","DOI":"10.1109\/IBCAST47879.2020.9044576"},{"issue":"3","key":"27_CR32","doi-asserted-by":"publisher","first-page":"1562","DOI":"10.3390\/app13031562","volume":"13","author":"J Rao","year":"2023","unstructured":"Rao, J., Bian, H., Xu, X., Chen, J.: Autonomous visual navigation system based on a single camera for floor-sweeping robot. Appl. Sci. 13(3), 1562 (2023). https:\/\/doi.org\/10.3390\/app13031562","journal-title":"Appl. Sci."},{"key":"27_CR33","doi-asserted-by":"publisher","unstructured":"Richtsfeld, A., M\u00f6rwald, T., Prankl, J., Zillich, M., Vincze, M.: Segmentation of unknown objects in indoor environments. In: 2012 IEEE\/RSJ International Conference on Intelligent Robots and Systems, pp. 4791\u20134796. IEEE (2012). https:\/\/doi.org\/10.1109\/IROS.2012.6385661","DOI":"10.1109\/IROS.2012.6385661"},{"key":"27_CR34","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"234","DOI":"10.1007\/978-3-319-24574-4_28","volume-title":"Medical Image Computing and Computer-Assisted Intervention \u2013 MICCAI 2015","author":"O Ronneberger","year":"2015","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-Net: convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W.M., Frangi, A.F. (eds.) MICCAI 2015. LNCS, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28"},{"issue":"1","key":"27_CR35","doi-asserted-by":"publisher","first-page":"157","DOI":"10.1007\/s11263-007-0090-8","volume":"77","author":"BC Russell","year":"2008","unstructured":"Russell, B.C., Torralba, A., Murphy, K.P., Freeman, W.T.: LabelMe: a database and web-based tool for image. Int. J. Comput. Vis. 77(1), 157\u2013173 (2008). https:\/\/doi.org\/10.1007\/s11263-007-0090-8","journal-title":"Int. J. Comput. Vis."},{"issue":"4","key":"27_CR36","doi-asserted-by":"publisher","first-page":"640","DOI":"10.1109\/TPAMI.2016.2572683","volume":"39","author":"E Shelhamer","year":"2017","unstructured":"Shelhamer, E., Long, J., Darrell, T.: Fully convolutional networks for semantic segmentation. IEEE Trans. Pattern Anal. Mach. Intell. 39(4), 640\u2013651 (2017). https:\/\/doi.org\/10.1109\/TPAMI.2016.2572683","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"7576","key":"27_CR37","doi-asserted-by":"publisher","first-page":"746","DOI":"10.1007\/978-3-642-33715-4_54","volume":"5","author":"N Silberman","year":"2012","unstructured":"Silberman, N., Hoiem, D., Kohli, P., Fergus, R.: Indoor segmentation and support inference from RGBD images. ECCV 5(7576), 746\u2013760 (2012). https:\/\/doi.org\/10.1007\/978-3-642-33715-4_54","journal-title":"ECCV"},{"key":"27_CR38","doi-asserted-by":"publisher","unstructured":"Strudel, R., Garcia, R., Laptev, I., Schmid, C.: Segmenter: transformer for semantic segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 7262\u20137272 (2021). https:\/\/doi.org\/10.48550\/arXiv.2105.05633","DOI":"10.48550\/arXiv.2105.05633"},{"issue":"14","key":"27_CR39","doi-asserted-by":"publisher","first-page":"3816","DOI":"10.3390\/s20143816","volume":"20","author":"T Wang","year":"2020","unstructured":"Wang, T., Cai, Y., Liang, L., Ye, D.: A multi-level approach to waste object segmentation. Sensors 20(14), 3816 (2020). https:\/\/doi.org\/10.3390\/s20143816","journal-title":"Sensors"},{"key":"27_CR40","doi-asserted-by":"publisher","first-page":"106813","DOI":"10.1016\/j.resconrec.2022.106813","volume":"190","author":"TW Wu","year":"2023","unstructured":"Wu, T.W., Zhang, H., Peng, W., L\u00fc, F., He, P.J.: Applications of convolutional neural networks for intelligent waste identification and recycling: A review. Resour. Conserv. Recycl. 190, 106813 (2023). https:\/\/doi.org\/10.1016\/j.resconrec.2022.106813","journal-title":"Resour. Conserv. Recycl."},{"key":"27_CR41","doi-asserted-by":"publisher","unstructured":"Xie, E., Wang, W., Yu, Z., Anandkumar, A., Alvarez, J.M., Luo, P.: SegFormer: simple and efficient design for semantic segmentation with transformers. In: Advances in Neural Information Processing Systems, vol. 34, pp. 12077\u201312090 (2021). https:\/\/doi.org\/10.48550\/arXiv.2105.15203","DOI":"10.48550\/arXiv.2105.15203"},{"key":"27_CR42","doi-asserted-by":"publisher","unstructured":"Yuan, Y., Chen, X., Chen, X., Wang, J.: Segmentation transformer: object-contextual representations for semantic segmentation. arXiv preprint arXiv:1909.11065 (2019). https:\/\/doi.org\/10.1007\/978-3-030-58539-6_11","DOI":"10.1007\/978-3-030-58539-6_11"},{"key":"27_CR43","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"173","DOI":"10.1007\/978-3-030-58539-6_11","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Y Yuan","year":"2020","unstructured":"Yuan, Y., Chen, X., Wang, J.: Object-contextual representations for semantic segmentation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12351, pp. 173\u2013190. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58539-6_11"},{"issue":"8","key":"27_CR44","doi-asserted-by":"publisher","first-page":"2375","DOI":"10.1007\/s11263-021-01465-9","volume":"129","author":"Y Yuan","year":"2021","unstructured":"Yuan, Y., Huang, L., Guo, J., Zhang, C., Chen, X., Wang, J.: OCNet: object context for semantic segmentation. Int. J. Comput. Vis. 129(8), 2375\u20132398 (2021). https:\/\/doi.org\/10.1007\/s11263-021-01465-9","journal-title":"Int. J. Comput. Vis."},{"key":"27_CR45","doi-asserted-by":"publisher","unstructured":"Zhang, H., et al.: Context encoding for semantic segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7151\u20137160 (2018). https:\/\/doi.org\/10.48550\/arXiv.1803.08904","DOI":"10.48550\/arXiv.1803.08904"},{"key":"27_CR46","doi-asserted-by":"publisher","unstructured":"Zhang, W., Pang, J., Chen, K., Loy, C.C.: K-Net: towards unified image segmentation. In: Advances in Neural Information Processing Systems, vol. 34, pp. 10326\u201310338 (2021). https:\/\/doi.org\/10.48550\/arXiv.2106.14855","DOI":"10.48550\/arXiv.2106.14855"},{"key":"27_CR47","doi-asserted-by":"publisher","unstructured":"Zhang, W., Pang, J., Chen, K., Loy, C.C.: K-Net: towards unified image segmentation. In: NeurIPS (2021). https:\/\/doi.org\/10.48550\/arXiv.2106.148550","DOI":"10.48550\/arXiv.2106.148550"},{"key":"27_CR48","doi-asserted-by":"publisher","unstructured":"Zhao, H., Shi, J., Qi, X., Wang, X., Jia, J.: Pyramid scene parsing network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2881\u20132890 (2017). https:\/\/doi.org\/10.1109\/CVPR.2017.660","DOI":"10.1109\/CVPR.2017.660"},{"key":"27_CR49","doi-asserted-by":"publisher","unstructured":"Zhao, H., et al.: PSANet: point-wise spatial attention network for scene parsing. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 267\u2013283 (2018). https:\/\/doi.org\/10.1007\/978-3-030-01240-3_17","DOI":"10.1007\/978-3-030-01240-3_17"},{"key":"27_CR50","doi-asserted-by":"publisher","unstructured":"Zhou, B., Zhao, H., Puig, X., Fidler, S., Barriuso, A., Torralba, A.: Scene parsing through ade20k dataset. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 633\u2013641 (2017). https:\/\/doi.org\/10.1109\/CVPR.2017.544","DOI":"10.1109\/CVPR.2017.544"}],"container-title":["Lecture Notes in Computer Science","Neural Information Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-99-8070-3_27","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,12]],"date-time":"2024-03-12T16:25:01Z","timestamp":1710260701000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-99-8070-3_27"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,15]]},"ISBN":["9789819980697","9789819980703"],"references-count":50,"URL":"https:\/\/doi.org\/10.1007\/978-981-99-8070-3_27","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023,11,15]]},"assertion":[{"value":"15 November 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICONIP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Neural Information Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Changsha","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20 November 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 November 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"30","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iconip2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/iconip2023.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"EasyChair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1274","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"650","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"51% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4.14","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.46","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}