{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T18:59:54Z","timestamp":1772823594745,"version":"3.50.1"},"publisher-location":"Cham","reference-count":55,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031197833","type":"print"},{"value":"9783031197840","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19784-0_30","type":"book-chapter","created":{"date-parts":[[2022,10,30]],"date-time":"2022-10-30T14:02:50Z","timestamp":1667138570000},"page":"511-528","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":11,"title":["Learning Cross-Video Neural Representations for\u00a0High-Quality Frame Interpolation"],"prefix":"10.1007","author":[{"given":"Wentao","family":"Shangguan","sequence":"first","affiliation":[]},{"given":"Yu","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Weijie","family":"Gan","sequence":"additional","affiliation":[]},{"given":"Ulugbek S.","family":"Kamilov","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,31]]},"reference":[{"issue":"3","key":"30_CR1","doi-asserted-by":"publisher","first-page":"933","DOI":"10.1109\/TPAMI.2019.2941941","volume":"43","author":"W Bao","year":"2021","unstructured":"Bao, W., Lai, W.S., Zhang, X., Gao, Z., Yang, M.H.: MEMC-Net: motion estimation and motion compensation driven neural network for video interpolation and enhancement. IEEE Trans. Pattern Anal. Mach. Intell. 43(3), 933\u2013948 (2021)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"30_CR2","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"25","DOI":"10.1007\/978-3-540-24673-2_3","volume-title":"Computer Vision - ECCV 2004","author":"T Brox","year":"2004","unstructured":"Brox, T., Bruhn, A., Papenberg, N., Weickert, J.: High accuracy optical flow estimation based on a theory for warping. In: Pajdla, T., Matas, J. (eds.) ECCV 2004. LNCS, vol. 3024, pp. 25\u201336. Springer, Heidelberg (2004). https:\/\/doi.org\/10.1007\/978-3-540-24673-2_3"},{"issue":"5","key":"30_CR3","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1109\/76.538926","volume":"6","author":"R Castagno","year":"1996","unstructured":"Castagno, R., Haavisto, P., Ramponi, G.: A method for motion adaptive frame rate up-conversion. IEEE Trans. Circuits Syst. Video Technol. 6(5), 436\u2013446 (1996)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"30_CR4","first-page":"21557","volume":"34","author":"H Chen","year":"2021","unstructured":"Chen, H., He, B., Wang, H., Ren, Y., Lim, S.N., Shrivastava, A.: NeRV: neural representations for videos. Adv. Neural. Inf. Process. Syst. 34, 21557\u201321568 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"30_CR5","doi-asserted-by":"crossref","unstructured":"Chen, Z., Jin, H., Lin, Z., Cohen, S., Wu, Y.: Large displacement optical flow from nearest neighbor fields. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2443\u20132450 (2013)","DOI":"10.1109\/CVPR.2013.316"},{"key":"30_CR6","doi-asserted-by":"crossref","unstructured":"Chen, Z., Zhang, H.: Learning implicit fields for generative shape modeling. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5939\u20135948 (2019)","DOI":"10.1109\/CVPR.2019.00609"},{"key":"30_CR7","doi-asserted-by":"crossref","unstructured":"Choi, M., Kim, H., Han, B., Xu, N., Lee, K.M.: Channel attention is all you need for video frame interpolation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 10663\u201310671 (2020)","DOI":"10.1609\/aaai.v34i07.6693"},{"key":"30_CR8","doi-asserted-by":"crossref","unstructured":"Du, Y., Zhang, Y., Yu, H.X., Tenenbaum, J.B., Wu, J.: Neural radiance flow for 4D view synthesis and video processing. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 14324\u201314334 (2021)","DOI":"10.1109\/ICCV48922.2021.01406"},{"key":"30_CR9","doi-asserted-by":"crossref","unstructured":"Flynn, J., Neulander, I., Philbin, J., Snavely, N.: DeepStereo: learning to predict new views from the world\u2019s imagery. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5515\u20135524 (2016)","DOI":"10.1109\/CVPR.2016.595"},{"key":"30_CR10","doi-asserted-by":"crossref","unstructured":"Gupta, A., Aich, A., Roy-Chowdhury, A.K.: ALANET: adaptive latent attention network forjoint video deblurring and interpolation. arXiv:2009.01005 [cs.CV] (2020)","DOI":"10.1145\/3394171.3413686"},{"key":"30_CR11","doi-asserted-by":"crossref","unstructured":"Hui, T.W., Tang, X., Loy, C.C.: LiteFlowNet: a lightweight convolutional neural network for optical flow estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8981\u20138989 (2018)","DOI":"10.1109\/CVPR.2018.00936"},{"key":"30_CR12","unstructured":"Jaderberg, M., Simonyan, K., Zisserman, A., et al.: Spatial transformer networks. Adv. Neural Inf. Process. Syst. 28 (2015)"},{"key":"30_CR13","doi-asserted-by":"crossref","unstructured":"Jiang, H., Sun, D., Jampani, V., Yang, M.H., Learned-Miller, E., Kautz, J.: SuperSloMo: high quality estimation of multiple intermediate frames for video interpolation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 9000\u20139008 (2018)","DOI":"10.1109\/CVPR.2018.00938"},{"key":"30_CR14","unstructured":"Kingma, D., Ba, J.: Adam: a method for stochastic optimization. In: International Conference on Learning Representations (ICLR) (2015)"},{"key":"30_CR15","doi-asserted-by":"crossref","unstructured":"Lee, H., Kim, T., Chung, T.Y., Pak, D., Ban, Y., Lee, S.: AdaCof: adaptive collaboration of flows for video frame interpolation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5316\u20135325 (2020)","DOI":"10.1109\/CVPR42600.2020.00536"},{"key":"30_CR16","doi-asserted-by":"crossref","unstructured":"Li, H., Yuan, Y., Wang, Q.: Video frame interpolation via residue refinement. In: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2613\u20132617 (2020)","DOI":"10.1109\/ICASSP40776.2020.9053987"},{"key":"30_CR17","unstructured":"Li, T., et al.: Neural 3D video synthesis. arXiv:2103.02597 (2021)"},{"key":"30_CR18","doi-asserted-by":"crossref","unstructured":"Li, Z., Niklaus, S., Snavely, N., Wang, O.: Neural scene flow fields for space-time view synthesis of dynamic scenes. arXiv:2011.13084 (2020)","DOI":"10.1109\/CVPR46437.2021.00643"},{"key":"30_CR19","doi-asserted-by":"crossref","unstructured":"Li, Z., Niklaus, S., Snavely, N., Wang, O.: Neural scene flow fields for space-time view synthesis of dynamic scenes. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6498\u20136508 (2021)","DOI":"10.1109\/CVPR46437.2021.00643"},{"key":"30_CR20","doi-asserted-by":"crossref","unstructured":"Lindell, D.B., Martel, J.N.P., Wetzstein, G.: AutoInt: automatic integration for fast neural volume rendering. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2021)","DOI":"10.1109\/CVPR46437.2021.01432"},{"key":"30_CR21","first-page":"15651","volume":"33","author":"L Liu","year":"2020","unstructured":"Liu, L., Gu, J., Lin, K.Z., Chua, T.S., Theobalt, C.: Neural sparse voxel fields. Adv. Neural. Inf. Process. Syst. 33, 15651\u201315663 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"30_CR22","doi-asserted-by":"crossref","unstructured":"Liu, R., Sun, Y., Zhu, J., Tian, L., Kamilov, U.S.: Zero-shot learning of continuous 3D refractive index maps from discrete intensity-only measurements. arXiv:2112.00002 (2021)","DOI":"10.1038\/s42256-022-00530-3"},{"key":"30_CR23","doi-asserted-by":"crossref","unstructured":"Liu, Z., Yeh, R.A., Tang, X., Liu, Y., Agarwala, A.: Video frame synthesis using deep voxel flow. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 4463\u20134471 (2017)","DOI":"10.1109\/ICCV.2017.478"},{"key":"30_CR24","doi-asserted-by":"crossref","unstructured":"Long, G., Kneip, L., Alvarez, J.M., Li, H., Zhang, X., Yu, Q.: Learning image matching by simply watching video. In: European Conference on Computer Vision, pp. 434\u2013450 (2016)","DOI":"10.1007\/978-3-319-46466-4_26"},{"issue":"2","key":"30_CR25","doi-asserted-by":"publisher","first-page":"678","DOI":"10.1109\/TIP.2017.2767782","volume":"27","author":"G Lu","year":"2017","unstructured":"Lu, G., Zhang, X., Chen, L., Gao, Z.: Novel integration of frame rate up conversion and HEVC coding based on rate-distortion optimization. IEEE Trans. Image Process. 27(2), 678\u2013691 (2017)","journal-title":"IEEE Trans. Image Process."},{"key":"30_CR26","doi-asserted-by":"crossref","unstructured":"Martin-Brualla, R., Radwan, N., Sajjadi, M.S., Barron, J.T., Dosovitskiy, A., Duckworth, D.: NeRF in the wild: neural radiance fields for unconstrained photo collections. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7210\u20137219 (2021)","DOI":"10.1109\/CVPR46437.2021.00713"},{"key":"30_CR27","doi-asserted-by":"crossref","unstructured":"Meyer, S., Djelouah, A., McWilliams, B., Sorkine-Hornung, A., Gross, M., Schroers, C.: PhaseNet for video frame interpolation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 498\u2013507 (2018)","DOI":"10.1109\/CVPR.2018.00059"},{"key":"30_CR28","doi-asserted-by":"crossref","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. In: European Conference on Computer Vision, pp. 405\u2013421 (2020)","DOI":"10.1007\/978-3-030-58452-8_24"},{"key":"30_CR29","doi-asserted-by":"crossref","unstructured":"Niklaus, S., Liu, F.: Context-aware synthesis for video frame interpolation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1701\u20131710 (2018)","DOI":"10.1109\/CVPR.2018.00183"},{"key":"30_CR30","doi-asserted-by":"crossref","unstructured":"Niklaus, S., Liu, F.: Softmax splatting for video frame interpolation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5437\u20135446 (2020)","DOI":"10.1109\/CVPR42600.2020.00548"},{"key":"30_CR31","doi-asserted-by":"crossref","unstructured":"Niklaus, S., Mai, L., Liu, F.: Video frame interpolation via adaptive convolution. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 670\u2013679 (2017)","DOI":"10.1109\/CVPR.2017.244"},{"key":"30_CR32","doi-asserted-by":"crossref","unstructured":"Niklaus, S., Mai, L., Liu, F.: Video frame interpolation via adaptive separable convolution. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 261\u2013270 (2017)","DOI":"10.1109\/ICCV.2017.37"},{"key":"30_CR33","doi-asserted-by":"crossref","unstructured":"Niklaus, S., Mai, L., Wang, O.: Revisiting adaptive convolutions for video frame interpolation. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 1099\u20131109 (2021)","DOI":"10.1109\/WACV48630.2021.00114"},{"key":"30_CR34","doi-asserted-by":"crossref","unstructured":"Oh, J., Kim, M.: DeMFI: deep joint deblurring and multi-frame interpolation with flow-guided attentive correlation and recursive boosting. arXiv:2111.09985 [cs.CV] (2021)","DOI":"10.1007\/978-3-031-20071-7_12"},{"key":"30_CR35","doi-asserted-by":"crossref","unstructured":"Park, J., Ko, K., Lee, C., Kim, C.S.: BMBC: bilateral motion estimation with bilateral cost volume for video interpolation. In: European Conference on Computer Vision, pp. 109\u2013125 (2020)","DOI":"10.1007\/978-3-030-58568-6_7"},{"key":"30_CR36","doi-asserted-by":"crossref","unstructured":"Park, J., Lee, C., Kim, C.S.: Asymmetric bilateral motion estimation for video frame interpolation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 14539\u201314548 (2021)","DOI":"10.1109\/ICCV48922.2021.01427"},{"key":"30_CR37","doi-asserted-by":"crossref","unstructured":"Park, J.J., Florence, P., Straub, J., Newcombe, R., Lovegrove, S.: DeepSDF: learning continuous signed distance functions for shape representation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 165\u2013174 (2019)","DOI":"10.1109\/CVPR.2019.00025"},{"key":"30_CR38","doi-asserted-by":"crossref","unstructured":"Park, K., et al.: Nerfies: deformable neural radiance fields. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5865\u20135874 (2021)","DOI":"10.1109\/ICCV48922.2021.00581"},{"key":"30_CR39","doi-asserted-by":"crossref","unstructured":"Peng, S., et al.: Neural body: implicit neural representations with structured latent codes for novel view synthesis of dynamic humans. In: Proceedings of IEEE Conference Computer Vision and Pattern Recognition (2021)","DOI":"10.1109\/CVPR46437.2021.00894"},{"key":"30_CR40","doi-asserted-by":"crossref","unstructured":"Ranjan, A., Black, M.J.: Optical flow estimation using a spatial pyramid network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4161\u20134170 (2017)","DOI":"10.1109\/CVPR.2017.291"},{"key":"30_CR41","doi-asserted-by":"crossref","unstructured":"Ren, Z., Yan, J., Ni, B., Liu, B., Yang, X., Zha, H.: Unsupervised deep learning for optical flow estimation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 31 (2017)","DOI":"10.1609\/aaai.v31i1.10723"},{"key":"30_CR42","unstructured":"Shen, L., Pauly, J., Xing, L.: NeRP: implicit neural representation learning with prior embedding for sparsely sampled image reconstruction. arXiv:2108.10991 [eess.IV] (2021)"},{"key":"30_CR43","doi-asserted-by":"publisher","first-page":"426","DOI":"10.1109\/TMM.2021.3052419","volume":"24","author":"Z Shi","year":"2021","unstructured":"Shi, Z., Liu, X., Shi, K., Dai, L., Chen, J.: Video frame interpolation via generalized deformable convolution. IEEE Trans. Multimedia 24, 426\u2013439 (2021)","journal-title":"IEEE Trans. Multimedia"},{"key":"30_CR44","doi-asserted-by":"crossref","unstructured":"Sim, H., Oh, J., Kim, M.: XVFI: extreme video frame interpolation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 14489\u201314498 (2021)","DOI":"10.1109\/ICCV48922.2021.01422"},{"key":"30_CR45","unstructured":"Sitzmann, V., Martel, J., Bergman, A., Lindell, D., Wetzstein, G.: Implicit neural representations with periodic activation functions. In: Advances in Neural Information Processing Systems, vol. 33 (2020)"},{"key":"30_CR46","unstructured":"Sitzmann, V., Zollhoefer, M., Wetzstein, G.: Scene representation networks: continuous 3D-structure-aware neural scene representations. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"30_CR47","unstructured":"Soomro, K., Zamir, A.R., Shah, M.: UCF101: a dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)"},{"key":"30_CR48","doi-asserted-by":"publisher","first-page":"1400","DOI":"10.1109\/TCI.2021.3125564","volume":"7","author":"Y Sun","year":"2021","unstructured":"Sun, Y., Liu, J., Xie, M., Wohlberg, B., Kamilov, U.S.: CoIL: coordinate-based internal learning for tomographic imaging. IEEE Trans. Comp. Imag. 7, 1400\u20131412 (2021)","journal-title":"IEEE Trans. Comp. Imag."},{"key":"30_CR49","doi-asserted-by":"crossref","unstructured":"Takeda, H., Van Beek, P., Milanfar, P.: Spatio-temporal video interpolation and denoising using motion-assisted steering kernel (MASK) regression. In: Proceedings of the IEEE International Conference on Image Processing, pp. 637\u2013640 (2008)","DOI":"10.1109\/ICIP.2008.4711835"},{"key":"30_CR50","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"402","DOI":"10.1007\/978-3-030-58536-5_24","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Z Teed","year":"2020","unstructured":"Teed, Z., Deng, J.: RAFT: recurrent all-pairs field transforms for optical flow. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12347, pp. 402\u2013419. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58536-5_24"},{"issue":"4","key":"30_CR51","doi-asserted-by":"publisher","first-page":"2713","DOI":"10.1109\/TWC.2015.2509063","volume":"15","author":"J Wu","year":"2015","unstructured":"Wu, J., Yuen, C., Cheung, N.M., Chen, J., Chen, C.W.: Modeling and optimization of high frame rate video transmission over wireless networks. IEEE Trans. Wireless Commun. 15(4), 2713\u20132726 (2015)","journal-title":"IEEE Trans. Wireless Commun."},{"key":"30_CR52","doi-asserted-by":"crossref","unstructured":"Xian, W., Huang, J.B., Kopf, J., Kim, C.: Space-time neural irradiance fields for free-viewpoint video. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9421\u20139431 (2021)","DOI":"10.1109\/CVPR46437.2021.00930"},{"issue":"8","key":"30_CR53","doi-asserted-by":"publisher","first-page":"1106","DOI":"10.1007\/s11263-018-01144-2","volume":"127","author":"T Xue","year":"2019","unstructured":"Xue, T., Chen, B., Wu, J., Wei, D., Freeman, W.T.: Video enhancement with task-oriented flow. Int. J. Comput. Vision 127(8), 1106\u20131125 (2019)","journal-title":"Int. J. Comput. Vision"},{"key":"30_CR54","unstructured":"Yoon, J.S., Kim, K., Gallo, O., Park, H.S., Kautz, J.: Novel view synthesis of dynamic scenes with globally coherent depths from a monocular camera. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5336\u20135345 (2020)"},{"key":"30_CR55","unstructured":"Zhang, K., Riegler, G., Snavely, N., Koltun, V.: NeRF++: analyzing and improving neural radiance fields. arXiv:2010.07492 [cs.CV] (2020)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19784-0_30","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,13]],"date-time":"2024-03-13T19:38:08Z","timestamp":1710358688000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19784-0_30"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031197833","9783031197840"],"references-count":55,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19784-0_30","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"31 October 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}