{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T17:56:54Z","timestamp":1774720614749,"version":"3.50.1"},"publisher-location":"Cham","reference-count":47,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031197963","type":"print"},{"value":"9783031197970","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19797-0_31","type":"book-chapter","created":{"date-parts":[[2022,11,2]],"date-time":"2022-11-02T20:28:41Z","timestamp":1667420921000},"page":"539-555","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":124,"title":["ReCoNet: Recurrent Correction Network for\u00a0Fast and\u00a0Efficient Multi-modality Image Fusion"],"prefix":"10.1007","author":[{"given":"Zhanbo","family":"Huang","sequence":"first","affiliation":[]},{"given":"Jinyuan","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Xin","family":"Fan","sequence":"additional","affiliation":[]},{"given":"Risheng","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Wei","family":"Zhong","sequence":"additional","affiliation":[]},{"given":"Zhongxuan","family":"Luo","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,3]]},"reference":[{"key":"31_CR1","unstructured":"Bochkovskiy, A., Wang, C.Y., Liao, H.Y.M.: YOLOv4: optimal speed and accuracy of object detection. arXiv preprint arXiv:2004.10934 (2020)"},{"key":"31_CR2","doi-asserted-by":"crossref","unstructured":"Bras\u00f3, G., Leal-Taix\u00e9, L.: Learning a neural solver for multiple object tracking. In: IEEE CVPR, pp. 6247\u20136257 (2020)","DOI":"10.1109\/CVPR42600.2020.00628"},{"key":"31_CR3","doi-asserted-by":"crossref","unstructured":"Cordts, M., et al.: The cityscapes dataset for semantic urban scene understanding. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)","DOI":"10.1109\/CVPR.2016.350"},{"issue":"11","key":"31_CR4","doi-asserted-by":"publisher","first-page":"3827","DOI":"10.3390\/s18113827","volume":"18","author":"Q Du","year":"2018","unstructured":"Du, Q., Xu, H., Ma, Y., Huang, J., Fan, F.: Fusing infrared and visible images of different resolutions via total variation model. Sensors 18(11), 3827 (2018)","journal-title":"Sensors"},{"key":"31_CR5","doi-asserted-by":"crossref","unstructured":"Fu, J., et al.: Dual attention network for scene segmentation. In: CVPR, pp. 3146\u20133154 (2019)","DOI":"10.1109\/CVPR.2019.00326"},{"issue":"9","key":"31_CR6","doi-asserted-by":"publisher","first-page":"4224","DOI":"10.1109\/TII.2018.2822828","volume":"14","author":"H Gao","year":"2018","unstructured":"Gao, H., Cheng, B., Wang, J., Li, K., Zhao, J., Li, D.: Object classification using CNN-based fusion of vision and lidar in autonomous vehicle environment. IEEE Trans. Ind. Informat. 14(9), 4224\u20134231 (2018)","journal-title":"IEEE Trans. Ind. Informat."},{"key":"31_CR7","doi-asserted-by":"crossref","unstructured":"Geiger, A., Lenz, P., Stiller, C., Urtasun, R.: Vision meets robotics: the KITTI dataset. Int. J. Rob. Res. 32(11), 1231\u20131237 (2013)","DOI":"10.1177\/0278364913491297"},{"key":"31_CR8","doi-asserted-by":"crossref","unstructured":"Godard, C., Mac Aodha, O., Brostow, G.J.: Unsupervised monocular depth estimation with left-right consistency. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.699"},{"key":"31_CR9","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask R-CNN. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2961\u20132969 (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"31_CR10","doi-asserted-by":"crossref","unstructured":"Jiang, Z., Li, Z., Yang, S., Fan, X., Liu, R.: Target oriented perceptual adversarial fusion network for underwater image enhancement. IEEE Trans. Circ. Syst. Video Technol. 32, 6584\u2013 6598 (2022)","DOI":"10.1109\/TCSVT.2022.3174817"},{"key":"31_CR11","unstructured":"Kristan, M., et al.: The visual object tracking vot2017 challenge results. In: Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 1949\u20131972 (2017)"},{"issue":"12","key":"31_CR12","doi-asserted-by":"publisher","first-page":"9887","DOI":"10.1109\/TIE.2019.2898618","volume":"66","author":"X Lan","year":"2019","unstructured":"Lan, X., et al.: Learning modality-consistency feature templates: a robust RGB-infrared tracking system. IEEE Tran. Ind. Enformat. 66(12), 9887\u20139897 (2019)","journal-title":"IEEE Tran. Ind. Enformat."},{"issue":"5","key":"31_CR13","doi-asserted-by":"publisher","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","volume":"28","author":"H Li","year":"2018","unstructured":"Li, H., Wu, X.J.: DenseFuse: a fusion approach to infrared and visible images. IEEE Trans. Image Process. 28(5), 2614\u20132623 (2018)","journal-title":"IEEE Trans. Image Process."},{"key":"31_CR14","doi-asserted-by":"publisher","first-page":"72","DOI":"10.1016\/j.inffus.2021.02.023","volume":"73","author":"H Li","year":"2021","unstructured":"Li, H., Wu, X.J., Kittler, J.: RFN-nest: an end-to-end residual fusion network for infrared and visible images. Inf. Fus. 73, 72\u201386 (2021)","journal-title":"Inf. Fus."},{"key":"31_CR15","doi-asserted-by":"publisher","first-page":"1383","DOI":"10.1109\/TMM.2020.2997127","volume":"23","author":"J Li","year":"2020","unstructured":"Li, J., Huo, H., Li, C., Wang, R., Feng, Q.: AttentionfGAN: Infrared and visible image fusion using attention-based generative adversarial networks. IEEE Trans. Multimedia 23, 1383\u20131396 (2020)","journal-title":"IEEE Trans. Multimedia"},{"key":"31_CR16","unstructured":"Li, P.: Didfuse: deep image decomposition for infrared and visible image fusion. In: Proceedings of the Twenty-Ninth International Conference on International Joint Conferences on Artificial Intelligence, pp. 976\u2013976 (2021)"},{"issue":"7","key":"31_CR17","doi-asserted-by":"publisher","first-page":"2864","DOI":"10.1109\/TIP.2013.2244222","volume":"22","author":"S Li","year":"2013","unstructured":"Li, S., Kang, X., Hu, J.: Image fusion with guided filtering. IEEE Trans. Image Process. 22(7), 2864\u20132875 (2013)","journal-title":"IEEE Trans. Image Process."},{"key":"31_CR18","doi-asserted-by":"crossref","unstructured":"Liu, J., et al.: Target-aware dual adversarial learning and a multi-scenario multi-modality benchmark to fuse infrared and visible for object detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5802\u20135811 (2022)","DOI":"10.1109\/CVPR52688.2022.00571"},{"key":"31_CR19","doi-asserted-by":"crossref","unstructured":"Liu, J., Fan, X., Jiang, J., Liu, R., Luo, Z.: Learning a deep multi-scale feature ensemble and an edge-attention guidance for image fusion. In: IEEE TCSVT (2021)","DOI":"10.1109\/TCSVT.2021.3056725"},{"key":"31_CR20","doi-asserted-by":"publisher","unstructured":"Liu, J., Shang, J., Liu, R., Fan, X.: Attention-guided global-local adversarial learning for detail-preserving multi-exposure image fusion. IEEE Trans. Circ. Syst. Video Technol. 32, 5026\u20135040 (2022). https:\/\/doi.org\/10.1109\/TCSVT.2022.3144455","DOI":"10.1109\/TCSVT.2022.3144455"},{"key":"31_CR21","doi-asserted-by":"publisher","first-page":"1818","DOI":"10.1109\/LSP.2021.3109818","volume":"28","author":"J Liu","year":"2021","unstructured":"Liu, J., Wu, Y., Huang, Z., Liu, R., Fan, X.: SMOA: searching a modality-oriented architecture for infrared and visible image fusion. IEEE Signal Process. Lett. 28, 1818\u20131822 (2021)","journal-title":"IEEE Signal Process. Lett."},{"key":"31_CR22","doi-asserted-by":"publisher","first-page":"1261","DOI":"10.1109\/TIP.2020.3043125","volume":"30","author":"R Liu","year":"2021","unstructured":"Liu, R., Liu, J., Jiang, Z., Fan, X., Luo, Z.: A bilevel integrated model with data-driven layer ensemble for multi-modality image fusion. IEEE Trans. Image Process. 30, 1261\u20131274 (2021). https:\/\/doi.org\/10.1109\/TIP.2020.3043125","journal-title":"IEEE Trans. Image Process."},{"key":"31_CR23","doi-asserted-by":"crossref","unstructured":"Liu, R., Liu, Z., Liu, J., Fan, X.: Searching a hierarchically aggregated fusion architecture for fast multi-modality image fusion. In: Proceedings of the 29th ACM International Conference on Multimedia, pp. 1600\u20131608 (2021)","DOI":"10.1145\/3474085.3475299"},{"key":"31_CR24","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"21","DOI":"10.1007\/978-3-319-46448-0_2","volume-title":"Computer Vision \u2013 ECCV 2016","author":"W Liu","year":"2016","unstructured":"Liu, W., et al.: SSD: single shot multibox detector. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9905, pp. 21\u201337. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46448-0_2"},{"key":"31_CR25","doi-asserted-by":"publisher","first-page":"100","DOI":"10.1016\/j.inffus.2016.02.001","volume":"31","author":"J Ma","year":"2016","unstructured":"Ma, J., Chen, C., Li, C., Huang, J.: Infrared and visible image fusion via gradient transfer and total variation minimization. Inf. Fus. 31, 100\u2013109 (2016)","journal-title":"Inf. Fus."},{"key":"31_CR26","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","volume":"48","author":"J Ma","year":"2019","unstructured":"Ma, J., Yu, W., Liang, P., Li, C., Jiang, J.: FusionGAN: a generative adversarial network for infrared and visible image fusion. Inf. Fus. 48, 11\u201326 (2019)","journal-title":"Inf. Fus."},{"key":"31_CR27","first-page":"1","volume":"70","author":"J Ma","year":"2020","unstructured":"Ma, J., Zhang, H., Shao, Z., Liang, P., Xu, H.: GANMcC:: a generative adversarial network with multiclassification constraints for infrared and visible image fusion. IEEE Trans. Instrum Meaure. 70, 1\u201314 (2020)","journal-title":"IEEE Trans. Instrum Meaure."},{"key":"31_CR28","doi-asserted-by":"publisher","first-page":"8","DOI":"10.1016\/j.infrared.2017.02.005","volume":"82","author":"J Ma","year":"2017","unstructured":"Ma, J., Zhou, Z., Wang, B., Zong, H.: Infrared and visible image fusion based on visual saliency map and weighted least square optimization. Infr. Phys. Technol. 82, 8\u201317 (2017)","journal-title":"Infr. Phys. Technol."},{"issue":"2","key":"31_CR29","doi-asserted-by":"publisher","first-page":"143","DOI":"10.1016\/j.inffus.2006.02.001","volume":"8","author":"F Nencini","year":"2007","unstructured":"Nencini, F., Garzelli, A., Baronti, S., Alparone, L.: Remote sensing image fusion using the curvelet transform. Inf. Fus. 8(2), 143\u2013156 (2007)","journal-title":"Inf. Fus."},{"issue":"5","key":"31_CR30","doi-asserted-by":"publisher","first-page":"639","DOI":"10.1109\/LGRS.2017.2668299","volume":"14","author":"F Palsson","year":"2017","unstructured":"Palsson, F., Sveinsson, J.R., Ulfarsson, M.O.: Multispectral and hyperspectral image fusion using a 3-d-convolutional neural network. IEEE Geosci. Remote Sens. Lett. 14(5), 639\u2013643 (2017)","journal-title":"IEEE Geosci. Remote Sens. Lett."},{"key":"31_CR31","doi-asserted-by":"publisher","first-page":"13","DOI":"10.1016\/j.infrared.2017.11.006","volume":"88","author":"N Paramanandham","year":"2018","unstructured":"Paramanandham, N., Rajendiran, K.: Infrared and visible image fusion using discrete cosine transform and swarm intelligence for surveillance applications. Infrar. Phys. Technol. 88, 13\u201322 (2018)","journal-title":"Infrar. Phys. Technol."},{"key":"31_CR32","unstructured":"Paszke, A., et al.: An imperative style, high-performance deep learning library. In: Wallach, H., Larochelle, H., Beygelzimer, A., d\u2019Alch\u00e9-Buc, F., Fox, E., Garnett, R. (eds.) Advances in Neural Information Processing Systems, vol. 32, pp. 8024\u20138035. Curran Associates, Inc. (2019), http:\/\/papers.neurips.cc\/paper\/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf"},{"key":"31_CR33","doi-asserted-by":"crossref","unstructured":"Pu, M., Huang, Y., Guan, Q., Zou, Q.: GraphNet: learning image pseudo annotations for weakly-supervised semantaic segmentation. In: ACM MM, pp. 483\u2013491. ACM (2018)","DOI":"10.1145\/3240508.3240542"},{"key":"31_CR34","doi-asserted-by":"crossref","unstructured":"Qin, X., Zhang, Z., Huang, C., Dehghan, M., Zaiane, O., Jagersand, M.: U2-Net: going deeper with nested u-structure for salient object detection, vol. 106, p. 107404 (2020)","DOI":"10.1016\/j.patcog.2020.107404"},{"key":"31_CR35","doi-asserted-by":"crossref","unstructured":"Ranftl, R., Lasinger, K., Hafner, D., Schindler, K., Koltun, V.: Towards robust monocular depth estimation: mixing datasets for zero-shot cross-dataset transfer. IEEE Trans. Pattern Anal. Mach. Intell. 44, 1623\u20131637 (2020)","DOI":"10.1109\/TPAMI.2020.3019967"},{"issue":"5","key":"31_CR36","doi-asserted-by":"publisher","first-page":"1193","DOI":"10.1007\/s11760-013-0556-9","volume":"9","author":"B Shreyamsha Kumar","year":"2015","unstructured":"Shreyamsha Kumar, B.: Image fusion based on pixel significance using cross bilateral filter. Sig. Image Video. Process. 9(5), 1193\u20131204 (2015)","journal-title":"Sig. Image Video. Process."},{"key":"31_CR37","doi-asserted-by":"publisher","first-page":"249","DOI":"10.1016\/j.dib.2017.09.038","volume":"15","author":"A Toet","year":"2017","unstructured":"Toet, A.: The tno multiband image data collection. Data Brief 15, 249 (2017)","journal-title":"Data Brief"},{"key":"31_CR38","doi-asserted-by":"crossref","unstructured":"Wang, D., Liu, J., Fan, X., Liu, R.: Unsupervised misaligned infrared and visible image fusion via cross-modality image generation and registration. arXiv preprint arXiv:2205.11876 (2022)","DOI":"10.24963\/ijcai.2022\/487"},{"key":"31_CR39","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"316","DOI":"10.1007\/978-3-030-58558-7_19","volume-title":"Computer Vision \u2013 ECCV 2020","author":"L Wang","year":"2020","unstructured":"Wang, L., Zhang, J., Wang, Y., Lu, H., Ruan, X.: CLIFFNet for monocular depth estimation with hierarchical embedding Loss. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12350, pp. 316\u2013331. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58558-7_19"},{"key":"31_CR40","doi-asserted-by":"crossref","unstructured":"Xiao, Y., Codevilla, F., Gurram, A., Urfalioglu, O., L\u00f3pez, A.M.: Multimodal end-to-end autonomous driving. IEEE Trans. Intell. Trans. Syst. 23, 537\u2013547 (2020)","DOI":"10.1109\/TITS.2020.3013234"},{"key":"31_CR41","unstructured":"Xu, H., Ma, J., Jiang, J., Guo, X., Ling, H.: U2fusion: A unified unsupervised image fusion network. In: IEEE TPAMI (2020)"},{"key":"31_CR42","doi-asserted-by":"crossref","unstructured":"Xu, H., Ma, J., Yuan, J., Le, Z., Liu, W.: RfNet: unsupervised network for mutually reinforcing multi-modal image registration and fusion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19679\u201319688 (2022)","DOI":"10.1109\/CVPR52688.2022.01906"},{"issue":"11","key":"31_CR43","doi-asserted-by":"publisher","first-page":"5596","DOI":"10.1109\/TIP.2019.2919201","volume":"28","author":"T Xu","year":"2019","unstructured":"Xu, T., Feng, Z.H., Wu, X.J., Kittler, J.: Learning adaptive discriminative correlation filters via temporal consistency preserving spatial feature selection for robust visual object tracking. IEEE Trans. Image Process. 28(11), 5596\u20135609 (2019)","journal-title":"IEEE Trans. Image Process."},{"key":"31_CR44","doi-asserted-by":"crossref","unstructured":"Zhang, H., Xu, H., Xiao, Y., Guo, X., Ma, J.: Rethinking the image fusion: A fast unified image fusion network based on proportional maintenance of gradient and intensity. In: AAAI. vol. 34, pp. 12797\u201312804 (2020)","DOI":"10.1609\/aaai.v34i07.6975"},{"key":"31_CR45","doi-asserted-by":"crossref","unstructured":"Zhang, L., Zhu, X., Chen, X., Yang, X., Lei, Z., Liu, Z.: Weakly aligned cross-modal learning for multispectral pedestrian detection. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5127\u20135137 (2019)","DOI":"10.1109\/ICCV.2019.00523"},{"key":"31_CR46","doi-asserted-by":"publisher","first-page":"166","DOI":"10.1016\/j.inffus.2020.05.002","volume":"63","author":"X Zhang","year":"2020","unstructured":"Zhang, X., Ye, P., Leung, H., Gong, K., Xiao, G.: Object fusion tracking based on visible and infrared images: A comprehensive review. Inf. Fus. 63, 166\u2013187 (2020)","journal-title":"Inf. Fus."},{"key":"31_CR47","doi-asserted-by":"crossref","unstructured":"Zhao, J.X., Liu, J.J., Fan, D.P., Cao, Y., Yang, J., Cheng, M.M.: EgNet: edge guidance network for salient object detection. In: CVPR, pp. 8779\u20138788 (2019)","DOI":"10.1109\/ICCV.2019.00887"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19797-0_31","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,2]],"date-time":"2022-11-02T20:46:51Z","timestamp":1667422011000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19797-0_31"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031197963","9783031197970"],"references-count":47,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19797-0_31","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"3 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}