{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T18:30:59Z","timestamp":1775068259643,"version":"3.50.1"},"publisher-location":"Cham","reference-count":29,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031414978","type":"print"},{"value":"9783031414985","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-41498-5_10","type":"book-chapter","created":{"date-parts":[[2023,8,14]],"date-time":"2023-08-14T19:23:19Z","timestamp":1692040999000},"page":"135-150","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["MuraNet: Multi-task Floor Plan Recognition with\u00a0Relation Attention"],"prefix":"10.1007","author":[{"given":"Lingxiao","family":"Huang","sequence":"first","affiliation":[]},{"given":"Jung-Hsuan","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Chiching","family":"Wei","sequence":"additional","affiliation":[]},{"given":"Wilson","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,8,15]]},"reference":[{"key":"10_CR1","doi-asserted-by":"publisher","unstructured":"Dodge, S., Xu, J., Stenger, B.: Parsing floor plan images. In: MVA, pp. 358\u2013361 (2017). https:\/\/doi.org\/10.23919\/MVA.2017.7986875","DOI":"10.23919\/MVA.2017.7986875"},{"key":"10_CR2","doi-asserted-by":"publisher","unstructured":"de las Heras, L.P., Fern\u00e1ndez, D., Valveny, E., Llad\u00f3s, J., S\u00e1nchez, G.: Unsupervised wall detector in architectural floor plans. In: ICDAR, pp. 1245\u20131249 (2013). https:\/\/doi.org\/10.1109\/ICDAR.2013.252","DOI":"10.1109\/ICDAR.2013.252"},{"key":"10_CR3","doi-asserted-by":"crossref","unstructured":"Surikov, I.Y., Nakhatovich, M.A., Belyaev, S.Y., et al.: Floor plan recognition and vectorization using combination UNet, faster-RCNN, statistical component analysis and Ramer-Douglas-Peucker. In: COMS2, pp. 16\u201328 (2020)","DOI":"10.1007\/978-981-15-6648-6_2"},{"issue":"6","key":"10_CR4","doi-asserted-by":"publisher","first-page":"1205","DOI":"10.1080\/13658816.2020.1781130","volume":"35","author":"Y Wu","year":"2021","unstructured":"Wu, Y., Shang, J., Chen, P., Zlantanova, S., Hu, X., Zhou, Z.: Indoor mapping and modeling by parsing floor plan images. Int. J. Geogr. Inf. Sci. 35(6), 1205\u20131231 (2021)","journal-title":"Int. J. Geogr. Inf. Sci."},{"key":"10_CR5","doi-asserted-by":"publisher","first-page":"58","DOI":"10.1016\/j.ins.2021.03.032","volume":"567","author":"Z Lu","year":"2021","unstructured":"Lu, Z., Wang, T., Guo, J., et al.: Data-driven floor plan understanding in rural residential buildings via deep recognition. Inf. Sci. 567, 58\u201374 (2021)","journal-title":"Inf. Sci."},{"key":"10_CR6","doi-asserted-by":"crossref","unstructured":"Liu, C., Wu, J., Kohli, P., Furukawa, Y.: Raster-to-vector: revisiting floorplan transformation. In: ICCV, pp. 2195\u20132203 (2017)","DOI":"10.1109\/ICCV.2017.241"},{"key":"10_CR7","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"28","DOI":"10.1007\/978-3-030-20205-7_3","volume-title":"Image Analysis","author":"A Kalervo","year":"2019","unstructured":"Kalervo, A., Ylioinas, J., H\u00e4iki\u00f6, M., Karhu, A., Kannala, J.: CubiCasa5K: a dataset and an improved multi-task model for floorplan image analysis. In: Felsberg, M., Forss\u00e9n, P.-E., Sintorn, I.-M., Unger, J. (eds.) SCIA 2019. LNCS, vol. 11482, pp. 28\u201340. Springer, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-20205-7_3"},{"key":"10_CR8","unstructured":"Dosovitskiy, A., et al.: An image is worth 16$$\\,\\times \\,$$16 words: Transformers for image recognition at scale. In: International Conference on Learning Represent (2020)"},{"key":"10_CR9","doi-asserted-by":"crossref","unstructured":"Guo, M.H., Lu, C.Z., Liu, Z.N., Cheng, M.M., Hu, S.M.: Visual Attention Network. arXiv preprint arXiv:2202.09741 (2022)","DOI":"10.1007\/s41095-023-0364-2"},{"key":"10_CR10","unstructured":"Guo, M.H., et al.: SegNeXt: rethinking convolutional attention design for semantic segmentation. arXiv preprint arXiv:2209.08575 (2022)"},{"key":"10_CR11","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-Net: convolutional networks for biomedical image segmentation. In: MICCAI (2015)","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"10_CR12","first-page":"12077","volume":"34","author":"E Xie","year":"2021","unstructured":"Xie, E., Wang, W., Yu, Z., Anandkumar, A., Alvarez, J.M., Luo, P.: Segformer: simple and efficient design for semantic segmentation with transformers. Adv. Neural Inf. Process. Syst. 34, 12077\u201312090 (2021)","journal-title":"Adv. Neural Inf. Process. Syst."},{"issue":"4","key":"10_CR13","doi-asserted-by":"publisher","first-page":"834","DOI":"10.1109\/TPAMI.2017.2699184","volume":"40","author":"LC Chen","year":"2018","unstructured":"Chen, L.C., Papandreou, G., Kokkinos, I., Murphy, K., Yuille, A.L.: Deeplab: semantic image segmentation with deep convolutional nets, Atrous convolution, and fully connected CRFs. IEEE Trans. Pattern Anal. Mach. Intell. 40(4), 834\u2013848 (2018)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10_CR14","unstructured":"Ge, Z., Liu, S., Wang, F., Zeming, L., Jian, S.: YOLOX: exceeding YOLO series in 2021. arXiv preprint arXiv:2107.08430 (2021)"},{"key":"10_CR15","doi-asserted-by":"crossref","unstructured":"Song, G., Liu, Y., Wang, X.: Revisiting the sibling head in object detector. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01158"},{"key":"10_CR16","doi-asserted-by":"crossref","unstructured":"Wu, Y, Chen, Y., Yuan, L. et al.: Rethinking classification and localization for object detection. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01020"},{"key":"10_CR17","unstructured":"Liu, C., Schwing, A., Kundu, K., Urtasun, R., and Fidler, S.: Rent3D: floor-plan priors for monocular layout estimation. In: CVPR (2015)"},{"key":"10_CR18","doi-asserted-by":"crossref","unstructured":"Zeng, Z., Li, X., Yu, Y.K., Fu, C.W.: Deep floor plan recognition using a multi-task network with room-boundary-guided attention. In: ICCV, pp. 9095\u20139103 (2019)","DOI":"10.1109\/ICCV.2019.00919"},{"key":"10_CR19","doi-asserted-by":"crossref","unstructured":"Ge, Z., Liu, S., Li, Z., Yoshie, O., and Sun, J.: OTA: optimal transport assignment for object detection. In CVPR, pp. 303\u2013312 (2021)","DOI":"10.1109\/CVPR46437.2021.00037"},{"key":"10_CR20","unstructured":"Redmon, J., Farhadi, A.: YOLOv3: an incremental improvement. arXiv preprint arXiv:1804.02767 (2018)"},{"issue":"6","key":"10_CR21","doi-asserted-by":"publisher","first-page":"2066","DOI":"10.3390\/app10062066","volume":"10","author":"Y Zhao","year":"2020","unstructured":"Zhao, Y., Xueyuan, D., Huahui, L.: A deep learning-based method to detect components from scanned structural drawings for reconstructing 3D models. Appl. Sci. 10(6), 2066 (2020)","journal-title":"Appl. Sci."},{"key":"10_CR22","doi-asserted-by":"crossref","unstructured":"Rezvanifar, A., Cote, M., and Albu, A.B.: Symbol spotting on digital architectural floor plans using a deep learning-based framework. In: CVPRW (2020)","DOI":"10.1109\/CVPRW50498.2020.00292"},{"key":"10_CR23","doi-asserted-by":"crossref","unstructured":"Fan, Z., Zhu, L., Li, H., et al.: FloorPlanCAD: a large-scale CAD drawing dataset for panoptic symbol spotting. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00997"},{"key":"10_CR24","unstructured":"Nicolas, C., Francisco, M., Gabriel, S., Nicolas, U., Alexander, K., Sergey, Z.: End-to-end object detection with transformers. arXiv:2005.12872 (2020)"},{"key":"10_CR25","unstructured":"Ze, L., Yutong, L., Yue, C., et al.: End-to-end object detection with transformers. In: ICCV (2021)"},{"issue":"10","key":"10_CR26","doi-asserted-by":"publisher","first-page":"3349","DOI":"10.1109\/TPAMI.2020.2983686","volume":"43","author":"J Wang","year":"2020","unstructured":"Wang, J., Sun, K., Cheng, T., et al.: Deep high-resolution representation learning for visual recognition. IEEE Trans. Pattern Anal. Mach. Intell. 43(10), 3349\u20133364 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"10_CR27","unstructured":"Liang-Chieh, C., George, P., Iasonas, K., Kevin, M., Alan, L.Y.: Semantic image segmentation with deep convolutional nets and fully connected CRFs. arXiv preprint arXiv:1412.7062 (2014)"},{"key":"10_CR28","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask R-CNN. In: ICCV(2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"10_CR29","doi-asserted-by":"publisher","first-page":"1137","DOI":"10.1109\/TPAMI.2016.2577031","volume":"39","author":"S Ren","year":"2017","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. IEEE Trans. Pattern Anal. Mach. Intell. 39, 1137\u20131149 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."}],"container-title":["Lecture Notes in Computer Science","Document Analysis and Recognition \u2013 ICDAR 2023 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-41498-5_10","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,8,14]],"date-time":"2023-08-14T19:24:51Z","timestamp":1692041091000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-41498-5_10"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031414978","9783031414985"],"references-count":29,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-41498-5_10","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"15 August 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICDAR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Document Analysis and Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"San Jos\u00e9, CA","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"USA","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 August 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 August 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icdar2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icdar2023.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Easychair","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"316","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"154","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"49% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.89","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1.50","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Number and type of other papers accepted : IJDAR track papers","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}