{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,6]],"date-time":"2026-02-06T00:18:43Z","timestamp":1770337123322,"version":"3.49.0"},"publisher-location":"Cham","reference-count":61,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031198113","type":"print"},{"value":"9783031198120","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19812-0_16","type":"book-chapter","created":{"date-parts":[[2022,10,29]],"date-time":"2022-10-29T14:03:42Z","timestamp":1667052222000},"page":"266-283","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":25,"title":["Hierarchical Memory Learning for\u00a0Fine-Grained Scene Graph Generation"],"prefix":"10.1007","author":[{"given":"Youming","family":"Deng","sequence":"first","affiliation":[]},{"given":"Yansheng","family":"Li","sequence":"additional","affiliation":[]},{"given":"Yongjun","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Xiang","family":"Xiang","sequence":"additional","affiliation":[]},{"given":"Jian","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Jingdong","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Jiayi","family":"Ma","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,30]]},"reference":[{"key":"16_CR1","doi-asserted-by":"crossref","unstructured":"Agrawal, A., Batra, D., Parikh, D., Kembhavi, A.: Don\u2019t just assume; look and answer: overcoming priors for visual question answering. In: CVPR, pp. 4971\u20134980 (2018)","DOI":"10.1109\/CVPR.2018.00522"},{"key":"16_CR2","unstructured":"Ba, J., Caruana, R.: Do deep nets really need to be deep? In: NIPS, pp. 2654\u20132662 (2014)"},{"key":"16_CR3","doi-asserted-by":"crossref","unstructured":"Bucilu\u01ce, C., Caruana, R., Niculescu-Mizil, A.: Model compression. In: KDD, pp. 535\u2013541 (2006)","DOI":"10.1145\/1150402.1150464"},{"key":"16_CR4","doi-asserted-by":"crossref","unstructured":"Cao, D., Zhu, X., Huang, X., Guo, J., Lei, Z.: Domain balancing: face recognition on long-tailed domains. In: CVPR, pp. 5671\u20135679 (2020)","DOI":"10.1109\/CVPR42600.2020.00571"},{"key":"16_CR5","doi-asserted-by":"publisher","unstructured":"Chaudhry, A., Dokania, P.K., Ajanthan, T., Torr, P.H.S.: Riemannian walk for incremental learning: understanding forgetting and intransigence. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11215, pp. 556\u2013572. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01252-6_33","DOI":"10.1007\/978-3-030-01252-6_33"},{"key":"16_CR6","doi-asserted-by":"crossref","unstructured":"Chen, L., Zhang, H., Xiao, J., He, X., Pu, S., Chang, S.F.: Counterfactual critic multi-agent training for scene graph generation. In: ICCV, pp. 4613\u20134623 (2019)","DOI":"10.1109\/ICCV.2019.00471"},{"key":"16_CR7","doi-asserted-by":"crossref","unstructured":"Chiou, M.J., Ding, H., Yan, H., Wang, C., Zimmermann, R., Feng, J.: Recovering the unbiased scene graphs from the biased ones. In: ACMMM, pp. 1581\u20131590 (2021)","DOI":"10.1145\/3474085.3475297"},{"key":"16_CR8","doi-asserted-by":"crossref","unstructured":"Cui, Y., Jia, M., Lin, T.Y., Song, Y., Belongie, S.: Class-balanced loss based on effective number of samples. In: CVPR, pp. 9268\u20139277 (2019)","DOI":"10.1109\/CVPR.2019.00949"},{"key":"16_CR9","doi-asserted-by":"crossref","unstructured":"Desai, A., Wu, T.Y., Tripathi, S., Vasconcelos, N.: Learning of visual relations: the devil is in the tails. In: ICCV, pp. 15404\u201315413 (2021)","DOI":"10.1109\/ICCV48922.2021.01512"},{"key":"16_CR10","doi-asserted-by":"crossref","unstructured":"Dhingra, N., Ritter, F., Kunz, A.: BGT-Net: Bidirectional GRU transformer network for scene graph generation. In: CVPR, pp. 2150\u20132159 (2021)","DOI":"10.1109\/CVPRW53098.2021.00244"},{"key":"16_CR11","doi-asserted-by":"crossref","unstructured":"Dong, X., Gan, T., Song, X., Wu, J., Cheng, Y., Nie, L.: Stacked hybrid-attention and group collaborative learning for unbiased scene graph generation. In: CVPR, pp. 19427\u201319436 (2022)","DOI":"10.1109\/CVPR52688.2022.01882"},{"key":"16_CR12","unstructured":"Dosovitskiy, A., et al.: An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. In: ICLR (2021)"},{"key":"16_CR13","doi-asserted-by":"crossref","unstructured":"Feng, C., Zhong, Y., Huang, W.: Exploring classification equilibrium in long-tailed object detection. In: ICCV, pp. 3417\u20133426 (2021)","DOI":"10.1109\/ICCV48922.2021.00340"},{"issue":"4","key":"16_CR14","doi-asserted-by":"publisher","first-page":"350","DOI":"10.1016\/j.tics.2018.01.010","volume":"22","author":"S Genon","year":"2018","unstructured":"Genon, S., Reid, A., Langner, R., Amunts, K., Eickhoff, S.B.: How to characterize the function of a brain region. Trends Cogn. Sci. 22(4), 350\u2013364 (2018)","journal-title":"Trends Cogn. Sci."},{"key":"16_CR15","unstructured":"Goodfellow, I.J., Mirza, M., Xiao, D., Courville, A., Bengio, Y.: An empirical investigation of catastrophic forgetting in gradient-based neural networks. In: ICLR (2014)"},{"key":"16_CR16","doi-asserted-by":"crossref","unstructured":"Guo, Y., et\u00a0al.: From general to specific: informative scene graph generation via balance adjustment. In: ICCV, pp. 16383\u201316392 (2021)","DOI":"10.1109\/ICCV48922.2021.01607"},{"key":"16_CR17","doi-asserted-by":"publisher","unstructured":"Hendricks, L.A., Burns, K., Saenko, K., Darrell, T., Rohrbach, A.: Women also snowboard: overcoming bias in captioning models. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11207, pp. 793\u2013811. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01219-9_47","DOI":"10.1007\/978-3-030-01219-9_47"},{"key":"16_CR18","unstructured":"Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network. CoRR (2015)"},{"key":"16_CR19","doi-asserted-by":"crossref","unstructured":"Hudson, D.A., Manning, C.D.: GQA: a new dataset for real-world visual reasoning and compositional question answering. In: CVPR, pp. 6700\u20136709 (2019)","DOI":"10.1109\/CVPR.2019.00686"},{"issue":"11","key":"16_CR20","doi-asserted-by":"publisher","first-page":"3820","DOI":"10.1109\/TPAMI.2020.2992222","volume":"43","author":"Z Hung","year":"2021","unstructured":"Hung, Z., Mallya, A., Lazebnik, S.: Contextual translation embedding for visual relationship detection and scene graph generation. TPAMI 43(11), 3820\u20133832 (2021)","journal-title":"TPAMI"},{"key":"16_CR21","unstructured":"Kang, B., et al.: Decoupling representation and classifier for long-tailed recognition. In: ICLR (2020)"},{"key":"16_CR22","doi-asserted-by":"crossref","unstructured":"Khan, S., Hayat, M., Zamir, S.W., Shen, J., Shao, L.: Striking the right balance with uncertainty. In: CVPR, pp. 103\u2013112 (2019)","DOI":"10.1109\/CVPR.2019.00019"},{"key":"16_CR23","doi-asserted-by":"crossref","unstructured":"Khandelwal, S., Suhail, M., Sigal, L.: Segmentation-grounded scene graph generation. In: ICCV, pp. 15879\u201315889 (2021)","DOI":"10.1109\/ICCV48922.2021.01558"},{"key":"16_CR24","doi-asserted-by":"crossref","unstructured":"Krishna, R., et al.: Visual genome: Connecting language and vision using crowdsourced dense image annotations. IJCV 123(1), 32\u201373 (2017)","DOI":"10.1007\/s11263-016-0981-7"},{"issue":"1","key":"16_CR25","doi-asserted-by":"publisher","first-page":"79","DOI":"10.1214\/aoms\/1177729694","volume":"22","author":"S Kullback","year":"1951","unstructured":"Kullback, S., Leibler, R.A.: On information and sufficiency. Ann. Math. Stat. 22(1), 79\u201386 (1951)","journal-title":"Ann. Math. Stat."},{"key":"16_CR26","series-title":"Graduate Texts in Mathematics","doi-asserted-by":"publisher","DOI":"10.1007\/b98852","volume-title":"Riemannian Manifolds","author":"JM Lee","year":"1997","unstructured":"Lee, J.M.: Riemannian Manifolds. GTM, vol. 176. Springer, New York (1997). https:\/\/doi.org\/10.1007\/b98852"},{"key":"16_CR27","doi-asserted-by":"crossref","unstructured":"Li, R., Zhang, S., Wan, B., He, X.: Bipartite graph network with adaptive message passing for unbiased scene graph generation. In: CVPR, pp. 11109\u201311119 (2021)","DOI":"10.1109\/CVPR46437.2021.01096"},{"key":"16_CR28","doi-asserted-by":"crossref","unstructured":"Li, T., Wang, L., Wu, G.: Self supervision to distillation for long-tailed visual recognition. In: ICCV, pp. 630\u2013639 (2021)","DOI":"10.1109\/ICCV48922.2021.00067"},{"key":"16_CR29","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"16_CR30","doi-asserted-by":"crossref","unstructured":"Liu, J., Sun, Y., Han, C., Dou, Z., Li, W.: Deep representation learning on long-tailed data: a learnable embedding augmentation perspective. In: CVPR, pp. 2970\u20132979 (2020)","DOI":"10.1109\/CVPR42600.2020.00304"},{"key":"16_CR31","doi-asserted-by":"crossref","unstructured":"Manjunatha, V., Saini, N., Davis, L.S.: Explicit bias discovery in visual question answering models. In: CVPR, pp. 9562\u20139571 (2019)","DOI":"10.1109\/CVPR.2019.00979"},{"key":"16_CR32","doi-asserted-by":"crossref","unstructured":"McCloskey, M., Cohen, N.J.: Catastrophic interference in connectionist networks: the sequential learning problem. In: Psychology of Learning and Motivation, pp. 109\u2013165 (1989)","DOI":"10.1016\/S0079-7421(08)60536-8"},{"key":"16_CR33","unstructured":"Mikolov, T., Grave, E., Bojanowski, P., Puhrsch, C., Joulin, A.: Advances in pre-training distributed word representations. In: LREC (2018)"},{"key":"16_CR34","doi-asserted-by":"crossref","unstructured":"Misra, I., Lawrence Zitnick, C., Mitchell, M., Girshick, R.: Seeing through the human reporting bias: visual classifiers from noisy human-centric labels. In: CVPR, pp. 2930\u20132939 (2016)","DOI":"10.1109\/CVPR.2016.320"},{"key":"16_CR35","unstructured":"Pascanu, R., Bengio, Y.: Revisiting natural gradient for deep networks. In: ICLR (2014)"},{"key":"16_CR36","doi-asserted-by":"crossref","unstructured":"Peyre, J., Laptev, I., Schmid, C., Sivic, J.: Detecting unseen visual relations using analogies. In: ICCV, pp. 1981\u20131990 (2019)","DOI":"10.1109\/ICCV.2019.00207"},{"key":"16_CR37","doi-asserted-by":"crossref","unstructured":"Rebuffi, S.A., Kolesnikov, A., Sperl, G., Lampert, C.H.: ICARL: Incremental classifier and representation learning. In: CVPR, July 2017","DOI":"10.1109\/CVPR.2017.587"},{"key":"16_CR38","unstructured":"Ren, J., et al.: Balanced meta-softmax for long-tailed visual recognition. In: NIPS, pp. 4175\u20134186 (2020)"},{"key":"16_CR39","doi-asserted-by":"crossref","unstructured":"Suhail, M., et al.: Energy-based learning for scene graph generation. In: CVPR, pp. 13936\u201313945 (2021)","DOI":"10.1109\/CVPR46437.2021.01372"},{"key":"16_CR40","doi-asserted-by":"crossref","unstructured":"Tan, J., et al.: Equalization loss for long-tailed object recognition. In: CVPR, pp. 11662\u201311671 (2020)","DOI":"10.1109\/CVPR42600.2020.01168"},{"key":"16_CR41","unstructured":"Tang, K.: A scene graph generation codebase in pytorch (2020). https:\/\/github.com\/KaihuaTang\/Scene-Graph-Benchmark.pytorch"},{"key":"16_CR42","doi-asserted-by":"crossref","unstructured":"Tang, K., Niu, Y., Huang, J., Shi, J., 8 Zhang, H.: Unbiased scene graph generation from biased training. In: CVPR, pp. 3716\u20133725 (2020)","DOI":"10.1109\/CVPR42600.2020.00377"},{"key":"16_CR43","doi-asserted-by":"crossref","unstructured":"Tang, K., Zhang, H., Wu, B., Luo, W., Liu, W.: Learning to compose dynamic tree structures for visual contexts. In: CVPR, pp. 6619\u20136628 (2019)","DOI":"10.1109\/CVPR.2019.00678"},{"key":"16_CR44","doi-asserted-by":"crossref","unstructured":"Tao, L., Mi, L., Li, N., Cheng, X., Hu, Y., Chen, Z.: Predicate correlation learning for scene graph generation. TIP. 31, 4173\u20134185 (2022)","DOI":"10.1109\/TIP.2022.3181511"},{"key":"16_CR45","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NIPS, pp. 5998\u20136008 (2017)"},{"key":"16_CR46","doi-asserted-by":"crossref","unstructured":"Wang, S., Wang, R., Yao, Z., Shan, S., Chen, X.: Cross-modal scene graph matching for relationship-aware image-text retrieval. In: WACV, pp. 1508\u20131517 (2020)","DOI":"10.1109\/WACV45572.2020.9093614"},{"key":"16_CR47","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"728","DOI":"10.1007\/978-3-030-58568-6_43","volume-title":"Computer Vision \u2013 ECCV 2020","author":"T Wang","year":"2020","unstructured":"Wang, T., Li, Yu., Kang, B., Li, J., Liew, J., Tang, S., Hoi, S., Feng, J.: The devil is in classification: a simple framework for long-tail instance segmentation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12359, pp. 728\u2013744. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58568-6_43"},{"key":"16_CR48","unstructured":"Wang, Y., Ramanan, D., Hebert, M.: Learning to model the tail. In: NIPS, pp. 7029\u20137039 (2017)"},{"key":"16_CR49","doi-asserted-by":"crossref","unstructured":"Xie, S., Girshick, R., Doll\u00e1r, P., Tu, Z., He, K.: Aggregated residual transformations for deep neural networks. In: CVPR, pp. 1492\u20131500 (2017)","DOI":"10.1109\/CVPR.2017.634"},{"key":"16_CR50","doi-asserted-by":"crossref","unstructured":"Xiong, S., Huang, W., Duan, P.: Knowledge graph embedding via relation paths and dynamic mapping matrix. In: Advances in Conceptual Modeling, pp. 106\u2013118 (2018)","DOI":"10.1007\/978-3-030-01391-2_18"},{"key":"16_CR51","doi-asserted-by":"crossref","unstructured":"Xu, D., Zhu, Y., Choy, C.B., Fei-Fei, L.: Scene graph generation by iterative message passing. In: CVPR, pp. 5410\u20135419 (2017)","DOI":"10.1109\/CVPR.2017.330"},{"key":"16_CR52","doi-asserted-by":"crossref","unstructured":"Yan, S., et al.: PCPL: predicate-correlation perception learning for unbiased scene graph generation. In: ACMMM, pp. 265\u2013273 (2020)","DOI":"10.1145\/3394171.3413722"},{"key":"16_CR53","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"690","DOI":"10.1007\/978-3-030-01246-5_41","volume-title":"Computer Vision \u2013 ECCV 2018","author":"J Yang","year":"2018","unstructured":"Yang, J., Lu, J., Lee, S., Batra, D., Parikh, D.: Graph R-CNN for scene graph\u00a0generation. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11205, pp. 690\u2013706. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01246-5_41"},{"key":"16_CR54","doi-asserted-by":"crossref","unstructured":"Yang, X., Tang, K., Zhang, H., Cai, J.: Auto-encoding scene graphs for image captioning. In: CVPR, pp. 10685\u201310694 (2019)","DOI":"10.1109\/CVPR.2019.01094"},{"key":"16_CR55","doi-asserted-by":"crossref","unstructured":"Yin, X., Yu, X., Sohn, K., Liu, X., Chandraker, M.: Feature transfer learning for face recognition with under-represented data. In: CVPR, pp. 5704\u20135713 (2019)","DOI":"10.1109\/CVPR.2019.00585"},{"key":"16_CR56","doi-asserted-by":"crossref","unstructured":"Yu, J., Chai, Y., Wang, Y., Hu, Y., Wu, Q.: CogTree: cognition tree loss for unbiased scene graph generation. In: IJCAI, pp. 1274\u20131280 (2020)","DOI":"10.24963\/ijcai.2021\/176"},{"key":"16_CR57","doi-asserted-by":"crossref","unstructured":"Zellers, R., Yatskar, M., Thomson, S., Choi, Y.: Neural motifs: scene graph parsing with global context. In: CVPR, pp. 5831\u20135840 (2018)","DOI":"10.1109\/CVPR.2018.00611"},{"key":"16_CR58","unstructured":"Zenke, F., Poole, B., Ganguli, S.: Continual learning through synaptic intelligence. In: ICML, pp. 3987\u20133995 (2017)"},{"key":"16_CR59","doi-asserted-by":"crossref","unstructured":"Zhang, H., Kyaw, Z., Chang, S., Chua, T.: Visual translation embedding network for visual relation detection. In: CVPR, pp. 3107\u20133115 (2017)","DOI":"10.1109\/CVPR.2017.331"},{"key":"16_CR60","doi-asserted-by":"crossref","unstructured":"Zhang, J., et al.: Class-incremental learning via deep model consolidation. In: WACV, pp. 1131\u20131140 (2020)","DOI":"10.1109\/WACV45572.2020.9093365"},{"key":"16_CR61","doi-asserted-by":"crossref","unstructured":"Zhang, X., et al.: VideoLT: large-scale long-tailed video recognition. In: ICCV, pp. 7960\u20137969 (2021)","DOI":"10.1109\/ICCV48922.2021.00786"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19812-0_16","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,4,27]],"date-time":"2023-04-27T13:06:32Z","timestamp":1682600792000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19812-0_16"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198113","9783031198120"],"references-count":61,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19812-0_16","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"30 October 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}