{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T07:44:47Z","timestamp":1767339887368,"version":"3.40.3"},"publisher-location":"Cham","reference-count":73,"publisher":"Springer International Publishing","isbn-type":[{"type":"print","value":"9783030585679"},{"type":"electronic","value":"9783030585686"}],"license":[{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2020,1,1]],"date-time":"2020-01-01T00:00:00Z","timestamp":1577836800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020]]},"DOI":"10.1007\/978-3-030-58568-6_5","type":"book-chapter","created":{"date-parts":[[2020,11,12]],"date-time":"2020-11-12T14:04:57Z","timestamp":1605189897000},"page":"71-89","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":13,"title":["Self-supervised Motion Representation via Scattering Local Motion Cues"],"prefix":"10.1007","author":[{"given":"Yuan","family":"Tian","sequence":"first","affiliation":[]},{"given":"Zhaohui","family":"Che","sequence":"additional","affiliation":[]},{"given":"Wenbo","family":"Bao","sequence":"additional","affiliation":[]},{"given":"Guangtao","family":"Zhai","sequence":"additional","affiliation":[]},{"given":"Zhiyong","family":"Gao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2020,11,13]]},"reference":[{"key":"5_CR1","unstructured":"Abu-El-Haija, S., et al.: Youtube-8m: A large-scale video classification benchmark. arXiv (2016)"},{"key":"5_CR2","doi-asserted-by":"crossref","unstructured":"Bao, W., Lai, W.S., Ma, C., Zhang, X., Gao, Z., Yang, M.H.: Depth-aware video frame interpolation. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00382"},{"key":"5_CR3","unstructured":"Brabandere, B.D., Jia, X., Tuytelaars, T., Gool, L.V.: Dynamic filter networks. In: NeurIPS (2016)"},{"key":"5_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-33783-3_44","volume-title":"Computer Vision - ECCV 2012","author":"DJ Butler","year":"2012","unstructured":"Butler, D.J., Wulff, J., Stanley, G.B., Black, M.J.: A naturalistic open source movie for optical flow evaluation. In: Fitzgibbon, A., Lazebnik, S., Perona, P., Sato, Y., Schmid, C. (eds.) ECCV 2012. LNCS, vol. 7577. Springer, Heidelberg (2012). https:\/\/doi.org\/10.1007\/978-3-642-33783-3_44"},{"key":"5_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01270-0_46","volume-title":"Computer Vision - ECCV 2018","author":"W Byeon","year":"2018","unstructured":"Byeon, W., Wang, Q., Srivastava, R.K., Koumoutsakos, P.: ContextVP: fully context-aware video prediction. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11220. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01270-0_46"},{"key":"5_CR6","doi-asserted-by":"crossref","unstructured":"Cai, Z., Wang, L., Peng, X., Qiao, Y.: Multi-view super vector for action recognition. In: CVPR (2014)","DOI":"10.1109\/CVPR.2014.83"},{"key":"5_CR7","unstructured":"Carreira, J., Noland, E., Banki-Horvath, A., Hillier, C., Zisserman, A.: A short note about kinetics-600. arXiv (2018)"},{"key":"5_CR8","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo vadis, action recognition? A new model and the kinetics dataset. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"5_CR9","unstructured":"Charbonnier, P., Blanc-Feraud, L., Aubert, G., Barlaud, M.: Two deterministic half-quadratic regularization algorithms for computed imaging. In: ICIP (1994)"},{"key":"5_CR10","first-page":"2287","volume":"29","author":"Z Che","year":"2019","unstructured":"Che, Z., Borji, A., Zhai, G., Min, X., Guo, G., Le Callet, P.: How is gaze influenced by image transformations? Dataset and model. TIP 29, 2287\u20132300 (2019)","journal-title":"TIP"},{"key":"5_CR11","doi-asserted-by":"crossref","unstructured":"Choutas, V., Weinzaepfel, P., Revaud, J., Schmid, C.: PoTion: pose motion representation for action recognition. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00734"},{"key":"5_CR12","doi-asserted-by":"crossref","unstructured":"Dai, J., et al.: Deformable convolutional networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.89"},{"key":"5_CR13","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01225-0_18","volume-title":"Computer Vision - ECCV 2018","author":"A Diba","year":"2018","unstructured":"Diba, A., et al.: Spatio-temporal channel correlation networks for action classification. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11208. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01225-0_18"},{"key":"5_CR14","doi-asserted-by":"crossref","unstructured":"Diba, A., Sharma, V., Gool, L.V., Stiefelhagen, R.: DynamoNet: Dynamic action and motion network. arXiv (2019)","DOI":"10.1109\/ICCV.2019.00629"},{"key":"5_CR15","doi-asserted-by":"crossref","unstructured":"Dosovitskiy, A., et al.: FlowNet: learning optical flow with convolutional networks. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.316"},{"key":"5_CR16","doi-asserted-by":"crossref","unstructured":"Fan, L., Huang, W., Gan, C., Ermon, S., Gong, B., Huang, J.: End-to-end learning of motion representation for video understanding. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00630"},{"key":"5_CR17","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Fan, H., Malik, J., He, K.: SlowFast networks for video recognition. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00630"},{"key":"5_CR18","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Pinz, A., Wildes, R.P.: Spatiotemporal residual networks for video action recognition. In: NeurIPS (2016)","DOI":"10.1109\/CVPR.2017.787"},{"key":"5_CR19","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C., Pinz, A., Zisserman, A.: Convolutional two-stream network fusion for video action recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.213"},{"key":"5_CR20","doi-asserted-by":"crossref","unstructured":"Fernando, B., Bilen, H., Gavves, E., Gould, S.: Self-supervised video representation learning with odd-one-out networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.607"},{"key":"5_CR21","doi-asserted-by":"crossref","unstructured":"Hara, K., Kataoka, H., Satoh, Y.: Learning spatio-temporal features with 3D residual networks for action recognition. In: ICCVW (2017)","DOI":"10.1109\/ICCVW.2017.373"},{"key":"5_CR22","doi-asserted-by":"crossref","unstructured":"He, D., et al.: StNet: local and global spatial-temporal modeling for action recognition. In: AAAI (2019)","DOI":"10.1609\/aaai.v33i01.33018401"},{"key":"5_CR23","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"5_CR24","doi-asserted-by":"crossref","unstructured":"Ilg, E., Mayer, N., Saikia, T., Keuper, M., Dosovitskiy, A., Brox, T.: FlowNet 2.0: evolution of optical flow estimation with deep networks. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.179"},{"key":"5_CR25","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01258-8_38","volume-title":"Computer Vision - ECCV 2018","author":"E Ilg","year":"2018","unstructured":"Ilg, E., Saikia, T., Keuper, M., Brox, T.: Occlusions, motion and depth boundaries with a generic network for disparity, optical flow or scene flow estimation. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11216. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01258-8_38"},{"key":"5_CR26","unstructured":"Kingma, D.P., Ba, J.L.: Adam: a method for stochastic optimization. In: ICLR (2015)"},{"key":"5_CR27","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_29","volume-title":"Computer Vision - ECCV 2016","author":"T Kroeger","year":"2016","unstructured":"Kroeger, T., Timofte, R., Dai, D., Van Gool, L.: Fast optical flow using dense inverse search. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_29"},{"key":"5_CR28","doi-asserted-by":"crossref","unstructured":"Kuehne, H., Jhuang, H., Garrote, E., Poggio, T., Serre, T.: HMDB: a large video database for human motion recognition. In: ICCV (2011)","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"5_CR29","doi-asserted-by":"crossref","unstructured":"Kwon, Y.H., Park, M.G.: Predicting future frames using retrospective cycle GAN. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00191"},{"key":"5_CR30","doi-asserted-by":"crossref","unstructured":"Lee, H.Y., Huang, J.B., Singh, M., Yang, M.H.: Unsupervised representation learning by sorting sequences. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.79"},{"key":"5_CR31","unstructured":"Li, X., Hu, X., Yang, J.: Spatial group-wise enhance: Improving semantic feature learning in convolutional networks. arXiv (2019)"},{"key":"5_CR32","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01240-3_37","volume-title":"Computer Vision - ECCV 2018","author":"Y Li","year":"2018","unstructured":"Li, Y., Fang, C., Yang, J., Wang, Z., Lu, X., Yang, M.H.: Flow-grounded spatial-temporal video prediction from still images. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11213. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01240-3_37"},{"key":"5_CR33","doi-asserted-by":"crossref","unstructured":"Liang, X., Lee, L., Dai, W., Xing, E.P.: Dual motion gan for future-flow embedded video prediction. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.194"},{"key":"5_CR34","doi-asserted-by":"crossref","unstructured":"Liu, W., Luo, W., Lian, D., Gao, S.: Future frame prediction for anomaly detection - a new baseline. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00684"},{"key":"5_CR35","doi-asserted-by":"crossref","unstructured":"Liu, Z., Yeh, R.A., Tang, X., Liu, Y., Agarwala, A.: Video frame synthesis using deep voxel flow. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.478"},{"key":"5_CR36","unstructured":"Mathieu, M., Couprie, C., LeCun, Y.: Deep multi-scale video prediction beyond mean square error. In: ICLR (2016)"},{"key":"5_CR37","first-page":"2049","volume":"20","author":"X Min","year":"2017","unstructured":"Min, X., Gu, K., Zhai, G., Liu, J., Yang, X., Chen, C.W.: Blind quality assessment based on pseudo-reference image. TMM 20, 2049\u20132062 (2017)","journal-title":"TMM"},{"key":"5_CR38","doi-asserted-by":"publisher","first-page":"2879","DOI":"10.1109\/TITS.2018.2868771","volume":"20","author":"X Min","year":"2018","unstructured":"Min, X., Zhai, G., Gu, K., Yang, X., Guan, X.: Objective quality evaluation of dehazed images. IEEE Trans. Intell. Transp. Syst. 20, 2879\u20132892 (2018)","journal-title":"IEEE Trans. Intell. Transp. Syst."},{"key":"5_CR39","first-page":"3805","volume":"29","author":"X Min","year":"2020","unstructured":"Min, X., Zhai, G., Zhou, J., Zhang, X.P., Yang, X., Guan, X.: A multimodal saliency model for videos with high audio-visual correspondence. TIP 29, 3805\u20133819 (2020)","journal-title":"TIP"},{"key":"5_CR40","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46448-0_32","volume-title":"Computer Vision - ECCV 2016","author":"I Misra","year":"2016","unstructured":"Misra, I., Zitnick, C.L., Hebert, M.: Shuffle and learn: unsupervised learning using temporal order verification. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9905. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46448-0_32"},{"key":"5_CR41","doi-asserted-by":"crossref","unstructured":"Ng, J.Y.H., Choi, J., Neumann, J., Davis, L.S.: ActionFlowNet: learning motion representation for action recognition. In: WACV (2018)","DOI":"10.1109\/WACV.2018.00179"},{"key":"5_CR42","doi-asserted-by":"crossref","unstructured":"Pan, J., et al.: Video generation from single semantic label map. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00385"},{"key":"5_CR43","unstructured":"Paszke, A., et al.: Automatic differentiation in PyTorch (2017)"},{"key":"5_CR44","doi-asserted-by":"publisher","first-page":"137","DOI":"10.5201\/ipol.2013.26","volume":"3","author":"JS P\u00e9rez","year":"2013","unstructured":"P\u00e9rez, J.S., Meinhardt-Llopis, E., Facciolo, G.: Tv-l1 optical flow estimation. Image Process. On Line 3, 137\u2013150 (2013)","journal-title":"Image Process. On Line"},{"key":"5_CR45","doi-asserted-by":"crossref","unstructured":"Qiu, Z., Yao, T., Mei, T.: Learning spatio-temporal representation with pseudo-3D residual networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.590"},{"key":"5_CR46","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_44","volume-title":"Computer Vision - ECCV 2018","author":"FA Reda","year":"2018","unstructured":"Reda, F.A., et al.: SDC-Net: video prediction using spatially-displaced convolution. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11211. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01234-2_44"},{"key":"5_CR47","doi-asserted-by":"crossref","unstructured":"Revaud, J., Weinzaepfel, P., Harchaoui, Z., Schmid, C.: EpicFlow: edge-preserving interpolation of correspondences for optical flow. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7298720"},{"key":"5_CR48","doi-asserted-by":"crossref","unstructured":"Shen, W., Bao, W., Zhai, G., Chen, L., Min, X., Gao, Z.: Blurry video frame interpolation. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00516"},{"key":"5_CR49","doi-asserted-by":"crossref","unstructured":"Shi, W., et al.: Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.207"},{"key":"5_CR50","unstructured":"Simonyan, K., Zisserman, A.: Two-stream convolutional networks for action recognition in videos. In: NeurIPS (2014)"},{"key":"5_CR51","unstructured":"Soomro, K., Zamir, A.R., Shah, M.: UCF101: A dataset of 101 human actions classes from videos in the wild. arXiv (2012)"},{"key":"5_CR52","doi-asserted-by":"crossref","unstructured":"Su, H., Jampani, V., Sun, D., Gallo, O., Learned-Miller, E., Kautz, J.: Pixel-adaptive convolutional neural networks. arXiv (2019)","DOI":"10.1109\/CVPR.2019.01142"},{"key":"5_CR53","doi-asserted-by":"crossref","unstructured":"Sun, D., Yang, X., Liu, M.Y., Kautz, J.: PWC-Net: CNNs for optical flow using pyramid, warping, and cost volume. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00931"},{"key":"5_CR54","doi-asserted-by":"crossref","unstructured":"Sun, S., Kuang, Z., Sheng, L., Ouyang, W., Zhang, W.: Optical flow guided feature: a fast and robust motion representation for video action recognition. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00151"},{"key":"5_CR55","doi-asserted-by":"crossref","unstructured":"Tian, Y., Min, X., Zhai, G., Gao, Z.: Video-based early ASD detection via temporal pyramid networks. In: ICME (2019)","DOI":"10.1109\/ICME.2019.00055"},{"key":"5_CR56","doi-asserted-by":"crossref","unstructured":"Tran, D., Bourdev, L., Fergus, R., Torresani, L., Paluri, M.: Learning spatiotemporal features with 3D convolutional networks. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.510"},{"key":"5_CR57","doi-asserted-by":"crossref","unstructured":"Tran, D., Wang, H., Torresani, L., Ray, J., LeCun, Y., Paluri, M.: A closer look at spatiotemporal convolutions for action recognition. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00675"},{"key":"5_CR58","unstructured":"Villegas, R., Yang, J., Hong, S., Lin, X., Lee, H.: Decomposing motion and content for natural video sequence prediction. arXiv (2017)"},{"key":"5_CR59","doi-asserted-by":"crossref","unstructured":"Wang, H., Schmid, C.: Action recognition with improved trajectories. In: ICCV (2013)","DOI":"10.1109\/ICCV.2013.441"},{"key":"5_CR60","doi-asserted-by":"crossref","unstructured":"Wang, L., Qiao, Y., Tang, X.: Action recognition with trajectory-pooled deep-convolutional descriptors. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7299059"},{"key":"5_CR61","unstructured":"Wang, L., Xiong, Y., Wang, Z., Qiao, Y.: Towards good practices for very deep two-stream convnets. arXiv (2015)"},{"key":"5_CR62","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_2","volume-title":"Computer Vision - ECCV 2016","author":"L Wang","year":"2016","unstructured":"Wang, L., et al.: Temporal segment networks: towards good practices for deep action recognition. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9912. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46484-8_2"},{"key":"5_CR63","first-page":"600","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. TIP 13, 600\u2013612 (2004)","journal-title":"TIP"},{"key":"5_CR64","doi-asserted-by":"crossref","unstructured":"Wei, D., Lim, J., Zisserman, A., Freeman, W.T.: Learning and using the arrow of time. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00840"},{"key":"5_CR65","doi-asserted-by":"crossref","unstructured":"Weinzaepfel, P., Revaud, J., Harchaoui, Z., Schmid, C.: DeepFlow: large displacement optical flow with deep matching. In: ICCV (2013)","DOI":"10.1109\/ICCV.2013.175"},{"key":"5_CR66","doi-asserted-by":"crossref","unstructured":"Xiao, H., Feng, J., Lin, G., Liu, Y., Zhang, M.: MoNet: deep motion exploitation for video object segmentation. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00125"},{"key":"5_CR67","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01267-0_19","volume-title":"Computer Vision - ECCV 2018","author":"S Xie","year":"2018","unstructured":"Xie, S., Sun, C., Huang, J., Tu, Z., Murphy, K.: Rethinking spatiotemporal feature learning: speed-accuracy trade-offs in video classification. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11219. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01267-0_19"},{"key":"5_CR68","doi-asserted-by":"crossref","unstructured":"Xu, D., Xiao, J., Zhao, Z., Shao, J., Xie, D., Zhuang, Y.: Self-supervised spatiotemporal learning via video clip order prediction. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.01058"},{"key":"5_CR69","doi-asserted-by":"crossref","unstructured":"Xu, J., Ni, B., Li, Z., Cheng, S., Yang, X.: Structure preserving video prediction. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00158"},{"key":"5_CR70","unstructured":"Xu, L., Ren, J.S., Liu, C., Jia, J.: Deep convolutional neural network for image deconvolution. In: NeurIPS (2014)"},{"key":"5_CR71","doi-asserted-by":"crossref","unstructured":"Xu, X., Cheong, L.F., Li, Z.: Motion segmentation by exploiting complementary geometric models. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00302"},{"key":"5_CR72","unstructured":"Yu, F., Koltun, V.: Multi-scale context aggregation by dilated convolutions. arXiv (2015)"},{"key":"5_CR73","doi-asserted-by":"publisher","first-page":"211301","DOI":"10.1007\/s11432-019-2757-1","volume":"63","author":"G Zhai","year":"2020","unstructured":"Zhai, G., Min, X.: Perceptual image quality assessment: a survey. Sci. China Inf. Sci. 63, 211301 (2020). https:\/\/doi.org\/10.1007\/s11432-019-2757-1","journal-title":"Sci. China Inf. Sci."}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2020"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-030-58568-6_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,12]],"date-time":"2024-11-12T00:17:48Z","timestamp":1731370668000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-030-58568-6_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020]]},"ISBN":["9783030585679","9783030585686"],"references-count":73,"URL":"https:\/\/doi.org\/10.1007\/978-3-030-58568-6_5","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2020]]},"assertion":[{"value":"13 November 2020","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Glasgow","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"United Kingdom","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2020","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 August 2020","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28 August 2020","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"16","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2020","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2020.eu\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"OpenReview","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5025","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1360","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"27% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"7","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"The conference was held virtually due to the COVID-19 pandemic. From the ECCV Workshops 249 full papers, 18 short papers, and 21 further contributions were published out of a total of 467 submissions.","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}