{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,7]],"date-time":"2026-02-07T11:07:05Z","timestamp":1770462425628,"version":"3.49.0"},"publisher-location":"Cham","reference-count":45,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031250712","type":"print"},{"value":"9783031250729","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-25072-9_9","type":"book-chapter","created":{"date-parts":[[2023,2,17]],"date-time":"2023-02-17T08:40:04Z","timestamp":1676623204000},"page":"130-146","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["Enhanced Coarse-to-Fine Network for\u00a0Image Restoration from\u00a0Under-Display Cameras"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8753-6606","authenticated-orcid":false,"given":"Yurui","family":"Zhu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1376-2712","authenticated-orcid":false,"given":"Xi","family":"Wang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8036-4071","authenticated-orcid":false,"given":"Xueyang","family":"Fu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5708-7018","authenticated-orcid":false,"given":"Xiaowei","family":"Hu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,2,18]]},"reference":[{"key":"9_CR1","unstructured":"Agarap, A.F.: Deep learning using rectified linear units (ReLU). arXiv preprint arXiv:1803.08375 (2018)"},{"key":"9_CR2","doi-asserted-by":"crossref","unstructured":"Anwar, S., Barnes, N.: Real image denoising with feature attention. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 3155\u20133164 (2019)","DOI":"10.1109\/ICCV.2019.00325"},{"key":"9_CR3","doi-asserted-by":"crossref","unstructured":"Chen, D., et al.: Gated context aggregation network for image dehazing and deraining. In: 2019 IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 1375\u20131383. IEEE (2019)","DOI":"10.1109\/WACV.2019.00151"},{"key":"9_CR4","doi-asserted-by":"publisher","unstructured":"Chen, L., Chu, X., Zhang, X., Sun, J.: Simple baselines for image restoration. In: Avidan, S., Brostow, G., Cisse, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision \u2013 ECCV 2022. LNCS, vol. 13667. Springer, Cham. https:\/\/doi.org\/10.1007\/978-3-031-20071-7_2","DOI":"10.1007\/978-3-031-20071-7_2"},{"key":"9_CR5","doi-asserted-by":"crossref","unstructured":"Cheng, C.J., et al.: P-79: evaluation of diffraction induced background image quality degradation through transparent OLED display. In: SID Symposium Digest of Technical Papers, vol. 50, pp. 1533\u20131536. Wiley Online Library (2019)","DOI":"10.1002\/sdtp.13235"},{"key":"9_CR6","doi-asserted-by":"crossref","unstructured":"Cho, S.J., Ji, S.W., Hong, J.P., Jung, S.W., Ko, S.J.: Rethinking coarse-to-fine approach in single image deblurring. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4641\u20134650 (2021)","DOI":"10.1109\/ICCV48922.2021.00460"},{"key":"9_CR7","doi-asserted-by":"crossref","unstructured":"Feng, R., Li, C., Chen, H., Li, S., Loy, C.C., Gu, J.: Removing diffraction image artifacts in under-display camera via dynamic skip connection network. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 662\u2013671 (2021)","DOI":"10.1109\/CVPR46437.2021.00072"},{"key":"9_CR8","doi-asserted-by":"crossref","unstructured":"Fu, X., Qi, Q., Zha, Z.J., Zhu, Y., Ding, X.: Rain streak removal via dual graph convolutional network. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, pp. 1352\u20131360 (2021)","DOI":"10.1609\/aaai.v35i2.16224"},{"key":"9_CR9","doi-asserted-by":"crossref","unstructured":"Gao, H., Tao, X., Shen, X., Jia, J.: Dynamic scene deblurring with parameter selective sharing and nested skip connections. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3848\u20133856 (2019)","DOI":"10.1109\/CVPR.2019.00397"},{"key":"9_CR10","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Delving deep into rectifiers: surpassing human-level performance on ImageNet classification. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1026\u20131034 (2015)","DOI":"10.1109\/ICCV.2015.123"},{"key":"9_CR11","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"9_CR12","unstructured":"Hendrycks, D., Gimpel, K.: Gaussian error linear units (GELUs). arXiv preprint arXiv:1606.08415 (2016)"},{"key":"9_CR13","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van Der Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4700\u20134708 (2017)","DOI":"10.1109\/CVPR.2017.243"},{"key":"9_CR14","unstructured":"Jia, X., De Brabandere, B., Tuytelaars, T., Gool, L.V.: Dynamic filter networks. In: 29th Proceedings of Conference on Advances in Neural Information Processing System (2016)"},{"key":"9_CR15","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"239","DOI":"10.1007\/978-3-030-01228-1_15","volume-title":"Computer Vision \u2013 ECCV 2018","author":"S-W Kim","year":"2018","unstructured":"Kim, S.-W., Kook, H.-K., Sun, J.-Y., Kang, M.-C., Ko, S.-J.: Parallel feature pyramid network for object detection. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11209, pp. 239\u2013256. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01228-1_15"},{"key":"9_CR16","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"key":"9_CR17","doi-asserted-by":"crossref","unstructured":"Koh, J., Lee, J., Yoon, S.: BNUDC: a two-branched deep neural network for restoring images from under-display cameras. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1950\u20131959 (2022)","DOI":"10.1109\/CVPR52688.2022.00199"},{"issue":"20","key":"9_CR18","first-page":"1","volume":"2016","author":"HJ Kwon","year":"2016","unstructured":"Kwon, H.J., Yang, C.M., Kim, M.C., Kim, C.W., Ahn, J.Y., Kim, P.R.: Modeling of luminance transition curve of transparent plastics on transparent OLED displays. Electr. Imaging 2016(20), 1\u20134 (2016)","journal-title":"Electr. Imaging"},{"key":"9_CR19","unstructured":"Liu, D., Wen, B., Fan, Y., Loy, C.C., Huang, T.S.: Non-local recurrent network for image restoration. In: 31st Proceedings of Conference on Advances in Neural Information Processing Systems (2018)"},{"key":"9_CR20","doi-asserted-by":"crossref","unstructured":"Liu, Z., Mao, H., Wu, C.Y., Feichtenhofer, C., Darrell, T., Xie, S.: A convnet for the 2020s. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11976\u201311986 (2022)","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"9_CR21","doi-asserted-by":"crossref","unstructured":"Ma, Y., Liu, X., Bai, S., Wang, L., He, D., Liu, A.: Coarse-to-fine image inpainting via region-wise convolutions and non-local correlation. In: IJCAI, pp. 3123\u20133129 (2019)","DOI":"10.24963\/ijcai.2019\/433"},{"key":"9_CR22","doi-asserted-by":"crossref","unstructured":"Nah, S., Hyun Kim, T., Mu Lee, K.: Deep multi-scale convolutional neural network for dynamic scene deblurring. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3883\u20133891 (2017)","DOI":"10.1109\/CVPR.2017.35"},{"key":"9_CR23","doi-asserted-by":"crossref","unstructured":"Pan, X., Zhan, X., Dai, B., Lin, D., Loy, C.C., Luo, P.: Exploiting deep generative prior for versatile image restoration and manipulation. IEEE Trans. Pattern Anal. Mach. Intell. 44, 7474\u20137489 (2021)","DOI":"10.1109\/TPAMI.2021.3115428"},{"key":"9_CR24","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"364","DOI":"10.1007\/978-3-030-68238-5_28","volume-title":"Computer Vision \u2013 ECCV 2020 Workshops","author":"H Panikkasseril Sethumadhavan","year":"2020","unstructured":"Panikkasseril Sethumadhavan, H., Puthussery, D., Kuriakose, M., Charangatt Victor, J.: Transform domain pyramidal dilated convolution networks for restoration of under display camera images. In: Bartoli, A., Fusiello, A. (eds.) ECCV 2020. LNCS, vol. 12539, pp. 364\u2013378. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-68238-5_28"},{"issue":"11","key":"9_CR25","doi-asserted-by":"publisher","first-page":"1242","DOI":"10.1109\/JDT.2016.2594815","volume":"12","author":"Z Qin","year":"2016","unstructured":"Qin, Z., Tsai, Y.H., Yeh, Y.W., Huang, Y.P., Shieh, H.P.D.: See-through image blurring of transparent organic light-emitting diodes display: calculation method based on diffraction and analysis of pixel structures. J. Display Technol. 12(11), 1242\u20131249 (2016)","journal-title":"J. Display Technol."},{"issue":"4","key":"9_CR26","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/JPHOT.2017.2722000","volume":"9","author":"Z Qin","year":"2017","unstructured":"Qin, Z., Xie, J., Lin, F.C., Huang, Y.P., Shieh, H.P.D.: Evaluation of a transparent display\u2019s pixel structure regarding subjective quality of diffracted see-through images. IEEE Photonics J. 9(4), 1\u201314 (2017)","journal-title":"IEEE Photonics J."},{"key":"9_CR27","doi-asserted-by":"crossref","unstructured":"Ren, W., et al.: Gated fusion network for single image dehazing. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3253\u20133261 (2018)","DOI":"10.1109\/CVPR.2018.00343"},{"key":"9_CR28","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"379","DOI":"10.1007\/978-3-030-68238-5_29","volume-title":"Computer Vision \u2013 ECCV 2020 Workshops","author":"V Sundar","year":"2020","unstructured":"Sundar, V., Hegde, S., Kothandaraman, D., Mitra, K.: Deep Atrous guided filter for image restoration in under display cameras. In: Bartoli, A., Fusiello, A. (eds.) ECCV 2020. LNCS, vol. 12539, pp. 379\u2013397. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-68238-5_29"},{"key":"9_CR29","doi-asserted-by":"crossref","unstructured":"Tang, Q., Jiang, H., Mei, X., Hou, S., Liu, G., Li, Z.: 28\u20132: study of the image blur through FFS LCD panel caused by diffraction for camera under panel. In: SID Symposium Digest of Technical Papers, vol. 51, pp. 406\u2013409. Wiley Online Library (2020)","DOI":"10.1002\/sdtp.13890"},{"key":"9_CR30","unstructured":"Wang, L., Li, Y., Wang, S.: Deepdeblur: fast one-step blurry face images restoration. arXiv preprint arXiv:1711.09515 (2017)"},{"key":"9_CR31","doi-asserted-by":"crossref","unstructured":"Wang, X., Chan, K.C., Yu, K., Dong, C., Change Loy, C.: EDVR: video restoration with enhanced deformable convolutional networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (2019)","DOI":"10.1109\/CVPRW.2019.00247"},{"issue":"4","key":"9_CR32","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR33","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"398","DOI":"10.1007\/978-3-030-68238-5_30","volume-title":"Computer Vision \u2013 ECCV 2020 Workshops","author":"Q Yang","year":"2020","unstructured":"Yang, Q., Liu, Y., Tang, J., Ku, T.: Residual and dense UNet for\u00a0under-display camera restoration. In: Bartoli, A., Fusiello, A. (eds.) ECCV 2020. LNCS, vol. 12539, pp. 398\u2013408. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-68238-5_30"},{"key":"9_CR34","doi-asserted-by":"crossref","unstructured":"Zamir, S.W., Arora, A., Khan, S., Hayat, M., Khan, F.S., Yang, M.H.: Restormer: efficient transformer for high-resolution image restoration. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5728\u20135739 (2022)","DOI":"10.1109\/CVPR52688.2022.00564"},{"key":"9_CR35","doi-asserted-by":"crossref","unstructured":"Zamir, S.W., et al.: Multi-stage progressive image restoration. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Rrecognition, pp. 14821\u201314831 (2021)","DOI":"10.1109\/CVPR46437.2021.01458"},{"key":"9_CR36","doi-asserted-by":"crossref","unstructured":"Zamir, S.W., et al.: Multi-stage progressive image restoration. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01458"},{"key":"9_CR37","doi-asserted-by":"crossref","unstructured":"Zhang, H., Dai, Y., Li, H., Koniusz, P.: Deep stacked hierarchical multi-patch network for image deblurring. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5978\u20135986 (2019)","DOI":"10.1109\/CVPR.2019.00613"},{"issue":"5","key":"9_CR38","doi-asserted-by":"publisher","first-page":"1109","DOI":"10.1109\/TNNLS.2015.2511069","volume":"28","author":"K Zhang","year":"2016","unstructured":"Zhang, K., Tao, D., Gao, X., Li, X., Li, J.: Coarse-to-fine learning for single-image super-resolution. IEEE Trans. Neural Netw. Learn. Syst. 28(5), 1109\u20131122 (2016)","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"9_CR39","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 586\u2013595 (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"9_CR40","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"294","DOI":"10.1007\/978-3-030-01234-2_18","volume-title":"Computer Vision \u2013 ECCV 2018","author":"Y Zhang","year":"2018","unstructured":"Zhang, Y., Li, K., Li, K., Wang, L., Zhong, B., Fu, Y.: Image super-resolution using very deep residual channel attention networks. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11211, pp. 294\u2013310. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01234-2_18"},{"key":"9_CR41","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image super-resolution. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2472\u20132481 (2018)","DOI":"10.1109\/CVPR.2018.00262"},{"issue":"7","key":"9_CR42","doi-asserted-by":"publisher","first-page":"2480","DOI":"10.1109\/TPAMI.2020.2968521","volume":"43","author":"Y Zhang","year":"2020","unstructured":"Zhang, Y., Tian, Y., Kong, Y., Zhong, B., Fu, Y.: Residual dense network for image restoration. IEEE Trans. Pattern Anal. Mach. Intell. 43(7), 2480\u20132495 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"9_CR43","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"56","DOI":"10.1007\/978-3-030-67070-2_3","volume-title":"Computer Vision \u2013 ECCV 2020 Workshops","author":"H Zhao","year":"2020","unstructured":"Zhao, H., Kong, X., He, J., Qiao, Yu., Dong, C.: Efficient image super-resolution using pixel attention. In: Bartoli, A., Fusiello, A. (eds.) ECCV 2020. LNCS, vol. 12537, pp. 56\u201372. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-67070-2_3"},{"key":"9_CR44","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"337","DOI":"10.1007\/978-3-030-68238-5_26","volume-title":"Computer Vision \u2013 ECCV 2020 Workshops","author":"Y Zhou","year":"2020","unstructured":"Zhou, Y., et al.: UDC 2020 challenge on image restoration of under-display camera: methods and results. In: Bartoli, A., Fusiello, A. (eds.) ECCV 2020. LNCS, vol. 12539, pp. 337\u2013351. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-68238-5_26"},{"key":"9_CR45","doi-asserted-by":"crossref","unstructured":"Zhou, Y., Ren, D., Emerton, N., Lim, S., Large, T.: Image restoration for under-display camera. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9179\u20139188 (2021)","DOI":"10.1109\/CVPR46437.2021.00906"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-25072-9_9","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,12]],"date-time":"2024-03-12T15:34:16Z","timestamp":1710257656000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-25072-9_9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031250712","9783031250729"],"references-count":45,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-25072-9_9","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"18 February 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}